Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/htup_details.h"
23 : #include "access/parallel.h"
24 : #include "access/sysattr.h"
25 : #include "access/table.h"
26 : #include "access/xact.h"
27 : #include "catalog/pg_aggregate.h"
28 : #include "catalog/pg_constraint.h"
29 : #include "catalog/pg_inherits.h"
30 : #include "catalog/pg_proc.h"
31 : #include "catalog/pg_type.h"
32 : #include "executor/executor.h"
33 : #include "executor/nodeAgg.h"
34 : #include "foreign/fdwapi.h"
35 : #include "jit/jit.h"
36 : #include "lib/bipartite_match.h"
37 : #include "lib/knapsack.h"
38 : #include "miscadmin.h"
39 : #include "nodes/makefuncs.h"
40 : #include "nodes/nodeFuncs.h"
41 : #ifdef OPTIMIZER_DEBUG
42 : #include "nodes/print.h"
43 : #endif
44 : #include "nodes/supportnodes.h"
45 : #include "optimizer/appendinfo.h"
46 : #include "optimizer/clauses.h"
47 : #include "optimizer/cost.h"
48 : #include "optimizer/inherit.h"
49 : #include "optimizer/optimizer.h"
50 : #include "optimizer/paramassign.h"
51 : #include "optimizer/pathnode.h"
52 : #include "optimizer/paths.h"
53 : #include "optimizer/plancat.h"
54 : #include "optimizer/planmain.h"
55 : #include "optimizer/planner.h"
56 : #include "optimizer/prep.h"
57 : #include "optimizer/subselect.h"
58 : #include "optimizer/tlist.h"
59 : #include "parser/analyze.h"
60 : #include "parser/parse_agg.h"
61 : #include "parser/parse_relation.h"
62 : #include "parser/parsetree.h"
63 : #include "partitioning/partdesc.h"
64 : #include "rewrite/rewriteManip.h"
65 : #include "storage/dsm_impl.h"
66 : #include "utils/lsyscache.h"
67 : #include "utils/rel.h"
68 : #include "utils/selfuncs.h"
69 : #include "utils/syscache.h"
70 :
71 : /* GUC parameters */
72 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
73 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
74 : bool parallel_leader_participation = true;
75 :
76 : /* Hook for plugins to get control in planner() */
77 : planner_hook_type planner_hook = NULL;
78 :
79 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
80 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
81 :
82 :
83 : /* Expression kind codes for preprocess_expression */
84 : #define EXPRKIND_QUAL 0
85 : #define EXPRKIND_TARGET 1
86 : #define EXPRKIND_RTFUNC 2
87 : #define EXPRKIND_RTFUNC_LATERAL 3
88 : #define EXPRKIND_VALUES 4
89 : #define EXPRKIND_VALUES_LATERAL 5
90 : #define EXPRKIND_LIMIT 6
91 : #define EXPRKIND_APPINFO 7
92 : #define EXPRKIND_PHV 8
93 : #define EXPRKIND_TABLESAMPLE 9
94 : #define EXPRKIND_ARBITER_ELEM 10
95 : #define EXPRKIND_TABLEFUNC 11
96 : #define EXPRKIND_TABLEFUNC_LATERAL 12
97 :
98 : /*
99 : * Data specific to grouping sets
100 : */
101 : typedef struct
102 : {
103 : List *rollups;
104 : List *hash_sets_idx;
105 : double dNumHashGroups;
106 : bool any_hashable;
107 : Bitmapset *unsortable_refs;
108 : Bitmapset *unhashable_refs;
109 : List *unsortable_sets;
110 : int *tleref_to_colnum_map;
111 : } grouping_sets_data;
112 :
113 : /*
114 : * Temporary structure for use during WindowClause reordering in order to be
115 : * able to sort WindowClauses on partitioning/ordering prefix.
116 : */
117 : typedef struct
118 : {
119 : WindowClause *wc;
120 : List *uniqueOrder; /* A List of unique ordering/partitioning
121 : * clauses per Window */
122 : } WindowClauseSortData;
123 :
124 : /* Passthrough data for standard_qp_callback */
125 : typedef struct
126 : {
127 : List *activeWindows; /* active windows, if any */
128 : grouping_sets_data *gset_data; /* grouping sets data, if any */
129 : } standard_qp_extra;
130 :
131 : /* Local functions */
132 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
133 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
134 : static void grouping_planner(PlannerInfo *root, double tuple_fraction);
135 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
136 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
137 : int *tleref_to_colnum_map);
138 : static void preprocess_rowmarks(PlannerInfo *root);
139 : static double preprocess_limit(PlannerInfo *root,
140 : double tuple_fraction,
141 : int64 *offset_est, int64 *count_est);
142 : static void remove_useless_groupby_columns(PlannerInfo *root);
143 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
144 : static List *extract_rollup_sets(List *groupingSets);
145 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
146 : static void standard_qp_callback(PlannerInfo *root, void *extra);
147 : static double get_number_of_groups(PlannerInfo *root,
148 : double path_rows,
149 : grouping_sets_data *gd,
150 : List *target_list);
151 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
152 : RelOptInfo *input_rel,
153 : PathTarget *target,
154 : bool target_parallel_safe,
155 : grouping_sets_data *gd);
156 : static bool is_degenerate_grouping(PlannerInfo *root);
157 : static void create_degenerate_grouping_paths(PlannerInfo *root,
158 : RelOptInfo *input_rel,
159 : RelOptInfo *grouped_rel);
160 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
161 : PathTarget *target, bool target_parallel_safe,
162 : Node *havingQual);
163 : static void create_ordinary_grouping_paths(PlannerInfo *root,
164 : RelOptInfo *input_rel,
165 : RelOptInfo *grouped_rel,
166 : const AggClauseCosts *agg_costs,
167 : grouping_sets_data *gd,
168 : GroupPathExtraData *extra,
169 : RelOptInfo **partially_grouped_rel_p);
170 : static void consider_groupingsets_paths(PlannerInfo *root,
171 : RelOptInfo *grouped_rel,
172 : Path *path,
173 : bool is_sorted,
174 : bool can_hash,
175 : grouping_sets_data *gd,
176 : const AggClauseCosts *agg_costs,
177 : double dNumGroups);
178 : static RelOptInfo *create_window_paths(PlannerInfo *root,
179 : RelOptInfo *input_rel,
180 : PathTarget *input_target,
181 : PathTarget *output_target,
182 : bool output_target_parallel_safe,
183 : WindowFuncLists *wflists,
184 : List *activeWindows);
185 : static void create_one_window_path(PlannerInfo *root,
186 : RelOptInfo *window_rel,
187 : Path *path,
188 : PathTarget *input_target,
189 : PathTarget *output_target,
190 : WindowFuncLists *wflists,
191 : List *activeWindows);
192 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
193 : RelOptInfo *input_rel);
194 : static void create_partial_distinct_paths(PlannerInfo *root,
195 : RelOptInfo *input_rel,
196 : RelOptInfo *final_distinct_rel);
197 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
198 : RelOptInfo *input_rel,
199 : RelOptInfo *distinct_rel);
200 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
201 : RelOptInfo *input_rel,
202 : PathTarget *target,
203 : bool target_parallel_safe,
204 : double limit_tuples);
205 : static PathTarget *make_group_input_target(PlannerInfo *root,
206 : PathTarget *final_target);
207 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
208 : PathTarget *grouping_target,
209 : Node *havingQual);
210 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
211 : static void optimize_window_clauses(PlannerInfo *root,
212 : WindowFuncLists *wflists);
213 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
214 : static PathTarget *make_window_input_target(PlannerInfo *root,
215 : PathTarget *final_target,
216 : List *activeWindows);
217 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
218 : List *tlist);
219 : static PathTarget *make_sort_input_target(PlannerInfo *root,
220 : PathTarget *final_target,
221 : bool *have_postponed_srfs);
222 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
223 : List *targets, List *targets_contain_srfs);
224 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
225 : RelOptInfo *grouped_rel,
226 : RelOptInfo *partially_grouped_rel,
227 : const AggClauseCosts *agg_costs,
228 : grouping_sets_data *gd,
229 : double dNumGroups,
230 : GroupPathExtraData *extra);
231 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
232 : RelOptInfo *grouped_rel,
233 : RelOptInfo *input_rel,
234 : grouping_sets_data *gd,
235 : GroupPathExtraData *extra,
236 : bool force_rel_creation);
237 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
238 : static bool can_partial_agg(PlannerInfo *root);
239 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
240 : RelOptInfo *rel,
241 : List *scanjoin_targets,
242 : List *scanjoin_targets_contain_srfs,
243 : bool scanjoin_target_parallel_safe,
244 : bool tlist_same_exprs);
245 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
246 : RelOptInfo *input_rel,
247 : RelOptInfo *grouped_rel,
248 : RelOptInfo *partially_grouped_rel,
249 : const AggClauseCosts *agg_costs,
250 : grouping_sets_data *gd,
251 : PartitionwiseAggregateType patype,
252 : GroupPathExtraData *extra);
253 : static bool group_by_has_partkey(RelOptInfo *input_rel,
254 : List *targetList,
255 : List *groupClause);
256 : static int common_prefix_cmp(const void *a, const void *b);
257 :
258 :
259 : /*****************************************************************************
260 : *
261 : * Query optimizer entry point
262 : *
263 : * To support loadable plugins that monitor or modify planner behavior,
264 : * we provide a hook variable that lets a plugin get control before and
265 : * after the standard planning process. The plugin would normally call
266 : * standard_planner().
267 : *
268 : * Note to plugin authors: standard_planner() scribbles on its Query input,
269 : * so you'd better copy that data structure if you want to plan more than once.
270 : *
271 : *****************************************************************************/
272 : PlannedStmt *
1105 fujii 273 GIC 197901 : planner(Query *parse, const char *query_string, int cursorOptions,
274 : ParamListInfo boundParams)
275 : {
276 : PlannedStmt *result;
5798 tgl 277 ECB :
5798 tgl 278 GIC 197901 : if (planner_hook)
1105 fujii 279 37058 : result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
280 : else
281 160843 : result = standard_planner(parse, query_string, cursorOptions, boundParams);
5798 tgl 282 CBC 196242 : return result;
5798 tgl 283 ECB : }
284 :
285 : PlannedStmt *
1105 fujii 286 CBC 197901 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
287 : ParamListInfo boundParams)
288 : {
289 : PlannedStmt *result;
5893 tgl 290 ECB : PlannerGlobal *glob;
291 : double tuple_fraction;
292 : PlannerInfo *root;
293 : RelOptInfo *final_rel;
294 : Path *best_path;
295 : Plan *top_plan;
296 : ListCell *lp,
297 : *lr;
298 :
299 : /*
300 : * Set up global state for this planner invocation. This data is needed
301 : * across all levels of sub-Query that might exist in the given command,
302 : * so we keep it in a separate struct that's linked to by each per-Query
303 : * PlannerInfo.
304 : */
5893 tgl 305 GIC 197901 : glob = makeNode(PlannerGlobal);
306 :
307 197901 : glob->boundParams = boundParams;
5890 308 197901 : glob->subplans = NIL;
4236 tgl 309 CBC 197901 : glob->subroots = NIL;
5885 tgl 310 GIC 197901 : glob->rewindPlanIDs = NULL;
5890 tgl 311 CBC 197901 : glob->finalrtable = NIL;
124 alvherre 312 GNC 197901 : glob->finalrteperminfos = NIL;
4927 tgl 313 CBC 197901 : glob->finalrowmarks = NIL;
4426 314 197901 : glob->resultRelations = NIL;
1215 315 197901 : glob->appendRelations = NIL;
5659 316 197901 : glob->relationOids = NIL;
5325 317 197901 : glob->invalItems = NIL;
1973 rhaas 318 197901 : glob->paramExecTypes = NIL;
5283 tgl 319 197901 : glob->lastPHId = 0;
4442 320 197901 : glob->lastRowMarkId = 0;
2750 rhaas 321 197901 : glob->lastPlanNodeId = 0;
5680 tgl 322 197901 : glob->transientPlan = false;
2459 323 197901 : glob->dependsOnRole = false;
9173 bruce 324 ECB :
2762 rhaas 325 : /*
2649 tgl 326 : * Assess whether it's feasible to use parallel mode for this query. We
327 : * can't do this in a standalone backend, or if the command will try to
746 akapila 328 : * modify any data, or if this is a cursor operation, or if GUCs are set
329 : * to values that don't permit parallelism, or if parallel-unsafe
330 : * functions are present in the query tree.
331 : *
332 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
333 : * MATERIALIZED VIEW to use parallel plans, but as of now, only the leader
334 : * backend writes into a completely new table. In the future, we can
335 : * extend it to allow workers to write into the table. However, to allow
336 : * parallel updates and deletes, we have to solve other problems,
337 : * especially around combo CIDs.)
338 : *
339 : * For now, we don't try to use parallel mode if we're running inside a
340 : * parallel worker. We might eventually be able to relax this
341 : * restriction, but for now it seems best not to have parallel workers
342 : * trying to create their own parallel workers.
343 : */
2424 tgl 344 GIC 197901 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
345 169492 : IsUnderPostmaster &&
746 akapila 346 169492 : parse->commandType == CMD_SELECT &&
2424 tgl 347 136061 : !parse->hasModifyingCTE &&
348 135997 : max_parallel_workers_per_gather > 0 &&
1486 tmunro 349 CBC 135761 : !IsParallelWorker())
2424 tgl 350 ECB : {
351 : /* all the cheap tests pass, so scan the query tree */
746 akapila 352 CBC 135744 : glob->maxParallelHazard = max_parallel_hazard(parse);
2424 tgl 353 135744 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
2424 tgl 354 ECB : }
355 : else
356 : {
357 : /* skip the query tree scan, just assume it's unsafe */
2424 tgl 358 CBC 62157 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
2424 tgl 359 GIC 62157 : glob->parallelModeOK = false;
360 : }
361 :
362 : /*
2061 rhaas 363 ECB : * glob->parallelModeNeeded is normally set to false here and changed to
364 : * true during plan creation if a Gather or Gather Merge plan is actually
365 : * created (cf. create_gather_plan, create_gather_merge_plan).
366 : *
367 : * However, if debug_parallel_query = on or debug_parallel_query =
368 : * regress, then we impose parallel mode whenever it's safe to do so, even
369 : * if the final plan doesn't use parallelism. It's not safe to do so if
370 : * the query contains anything parallel-unsafe; parallelModeOK will be
371 : * false in that case. Note that parallelModeOK can't change after this
372 : * point. Otherwise, everything in the query is either parallel-safe or
373 : * parallel-restricted, and in either case it should be OK to impose
374 : * parallel-mode restrictions. If that ends up breaking something, then
375 : * either some function the user included in the query is incorrectly
376 : * labeled as parallel-safe or parallel-restricted when in reality it's
377 : * parallel-unsafe, or else the query planner itself has a bug.
378 : */
2473 rhaas 379 GIC 311776 : glob->parallelModeNeeded = glob->parallelModeOK &&
53 drowley 380 GNC 113875 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
381 :
382 : /* Determine what fraction of the plan is likely to be scanned */
5837 tgl 383 GIC 197901 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
7335 tgl 384 ECB : {
385 : /*
386 : * We have no real idea how many tuples the user will ultimately FETCH
387 : * from a cursor, but it is often the case that he doesn't want 'em
5455 388 : * all, or would prefer a fast-start plan anyway so that he can
389 : * process some of the tuples sooner. Use a GUC parameter to decide
390 : * what fraction to optimize for.
391 : */
5455 tgl 392 GIC 1427 : tuple_fraction = cursor_tuple_fraction;
393 :
394 : /*
395 : * We document cursor_tuple_fraction as simply being a fraction, which
396 : * means the edge cases 0 and 1 have to be treated specially here. We
5050 bruce 397 ECB : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
398 : */
5455 tgl 399 GIC 1427 : if (tuple_fraction >= 1.0)
5455 tgl 400 UIC 0 : tuple_fraction = 0.0;
5455 tgl 401 GIC 1427 : else if (tuple_fraction <= 0.0)
5455 tgl 402 UIC 0 : tuple_fraction = 1e-10;
403 : }
7335 tgl 404 ECB : else
7335 tgl 405 EUB : {
7335 tgl 406 ECB : /* Default assumption is we need all the tuples */
7335 tgl 407 GBC 196474 : tuple_fraction = 0.0;
408 : }
409 :
410 : /* primary planning entry point (may recurse for subqueries) */
2589 tgl 411 GIC 197901 : root = subquery_planner(glob, parse, NULL,
2589 tgl 412 ECB : false, tuple_fraction);
413 :
414 : /* Select best Path and turn it into a Plan */
2589 tgl 415 GIC 196331 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
2589 tgl 416 CBC 196331 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
417 :
2589 tgl 418 GIC 196331 : top_plan = create_plan(root, best_path);
419 :
7335 tgl 420 ECB : /*
7188 bruce 421 : * If creating a plan for a scrollable cursor, make sure it can run
422 : * backwards on demand. Add a Material node at the top at need.
7335 tgl 423 : */
5837 tgl 424 GIC 196242 : if (cursorOptions & CURSOR_OPT_SCROLL)
425 : {
5892 426 132 : if (!ExecSupportsBackwardScan(top_plan))
2257 427 15 : top_plan = materialize_finished_plan(top_plan);
428 : }
7335 tgl 429 ECB :
430 : /*
2618 rhaas 431 : * Optionally add a Gather node for testing purposes, provided this is
2188 tgl 432 : * actually a safe thing to do.
433 : */
53 drowley 434 GNC 196242 : if (debug_parallel_query != DEBUG_PARALLEL_OFF && top_plan->parallel_safe)
435 : {
2618 rhaas 436 GIC 12 : Gather *gather = makeNode(Gather);
437 :
438 : /*
1970 rhaas 439 ECB : * If there are any initPlans attached to the formerly-top plan node,
440 : * move them up to the Gather node; same as we do for Material node in
441 : * materialize_finished_plan.
442 : */
1970 rhaas 443 GIC 12 : gather->plan.initPlan = top_plan->initPlan;
444 12 : top_plan->initPlan = NIL;
445 :
2618 446 12 : gather->plan.targetlist = top_plan->targetlist;
447 12 : gather->plan.qual = NIL;
2618 rhaas 448 CBC 12 : gather->plan.lefttree = top_plan;
449 12 : gather->plan.righttree = NULL;
2618 rhaas 450 GIC 12 : gather->num_workers = 1;
2618 rhaas 451 CBC 12 : gather->single_copy = true;
53 drowley 452 GNC 12 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
2471 tgl 453 ECB :
2048 454 : /*
455 : * Since this Gather has no parallel-aware descendants to signal to,
456 : * we don't need a rescan Param.
457 : */
2048 tgl 458 GIC 12 : gather->rescan_param = -1;
459 :
460 : /*
461 : * Ideally we'd use cost_gather here, but setting up dummy path data
462 : * to satisfy it doesn't seem much cleaner than knowing what it does.
2471 tgl 463 ECB : */
2471 tgl 464 GIC 12 : gather->plan.startup_cost = top_plan->startup_cost +
465 : parallel_setup_cost;
466 12 : gather->plan.total_cost = top_plan->total_cost +
467 12 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
468 12 : gather->plan.plan_rows = top_plan->plan_rows;
2471 tgl 469 CBC 12 : gather->plan.plan_width = top_plan->plan_width;
2471 tgl 470 GIC 12 : gather->plan.parallel_aware = false;
2188 tgl 471 CBC 12 : gather->plan.parallel_safe = false;
2471 tgl 472 ECB :
473 : /* use parallel mode for parallel plans. */
2618 rhaas 474 CBC 12 : root->glob->parallelModeNeeded = true;
2471 tgl 475 ECB :
2618 rhaas 476 CBC 12 : top_plan = &gather->plan;
477 : }
478 :
2798 tgl 479 ECB : /*
480 : * If any Params were generated, run through the plan tree and compute
481 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
482 : * set_plan_references' tree traversal, but for now it has to be separate
483 : * because we need to visit subplans before not after main plan.
484 : */
1973 rhaas 485 GIC 196242 : if (glob->paramExecTypes != NIL)
486 : {
2798 tgl 487 75233 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
488 94219 : forboth(lp, glob->subplans, lr, glob->subroots)
489 : {
2798 tgl 490 CBC 18986 : Plan *subplan = (Plan *) lfirst(lp);
2042 tgl 491 GIC 18986 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
2798 tgl 492 ECB :
2798 tgl 493 CBC 18986 : SS_finalize_plan(subroot, subplan);
494 : }
495 75233 : SS_finalize_plan(root, top_plan);
2798 tgl 496 ECB : }
497 :
6531 498 : /* final cleanup of the plan */
5890 tgl 499 GIC 196242 : Assert(glob->finalrtable == NIL);
124 alvherre 500 GNC 196242 : Assert(glob->finalrteperminfos == NIL);
4927 tgl 501 CBC 196242 : Assert(glob->finalrowmarks == NIL);
4426 tgl 502 GIC 196242 : Assert(glob->resultRelations == NIL);
1215 503 196242 : Assert(glob->appendRelations == NIL);
4236 504 196242 : top_plan = set_plan_references(root, top_plan);
5890 tgl 505 ECB : /* ... and the subplans (both regular subplans and initplans) */
4236 tgl 506 CBC 196242 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
507 215228 : forboth(lp, glob->subplans, lr, glob->subroots)
5890 tgl 508 ECB : {
5624 bruce 509 CBC 18986 : Plan *subplan = (Plan *) lfirst(lp);
2042 tgl 510 18986 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
511 :
4236 512 18986 : lfirst(lp) = set_plan_references(subroot, subplan);
5890 tgl 513 ECB : }
514 :
5892 515 : /* build the PlannedStmt result */
5892 tgl 516 CBC 196242 : result = makeNode(PlannedStmt);
517 :
518 196242 : result->commandType = parse->commandType;
4030 tgl 519 GIC 196242 : result->queryId = parse->queryId;
4929 520 196242 : result->hasReturning = (parse->returningList != NIL);
4426 521 196242 : result->hasModifyingCTE = parse->hasModifyingCTE;
5892 tgl 522 CBC 196242 : result->canSetTag = parse->canSetTag;
5680 tgl 523 GIC 196242 : result->transientPlan = glob->transientPlan;
2459 tgl 524 CBC 196242 : result->dependsOnRole = glob->dependsOnRole;
525 196242 : result->parallelModeNeeded = glob->parallelModeNeeded;
5892 526 196242 : result->planTree = top_plan;
129 alvherre 527 GNC 196242 : result->partPruneInfos = glob->partPruneInfos;
5890 tgl 528 CBC 196242 : result->rtable = glob->finalrtable;
124 alvherre 529 GNC 196242 : result->permInfos = glob->finalrteperminfos;
4426 tgl 530 CBC 196242 : result->resultRelations = glob->resultRelations;
1215 531 196242 : result->appendRelations = glob->appendRelations;
5890 532 196242 : result->subplans = glob->subplans;
5885 533 196242 : result->rewindPlanIDs = glob->rewindPlanIDs;
4927 534 196242 : result->rowMarks = glob->finalrowmarks;
5659 535 196242 : result->relationOids = glob->relationOids;
5325 536 196242 : result->invalItems = glob->invalItems;
1973 rhaas 537 196242 : result->paramExecTypes = glob->paramExecTypes;
2276 tgl 538 ECB : /* utilityStmt should be null, but we might as well copy it */
2276 tgl 539 CBC 196242 : result->utilityStmt = parse->utilityStmt;
540 196242 : result->stmt_location = parse->stmt_location;
541 196242 : result->stmt_len = parse->stmt_len;
5892 tgl 542 ECB :
1844 andres 543 CBC 196242 : result->jitFlags = PGJIT_NONE;
544 196242 : if (jit_enabled && jit_above_cost >= 0 &&
545 196098 : top_plan->total_cost > jit_above_cost)
546 : {
547 624 : result->jitFlags |= PGJIT_PERFORM;
1844 andres 548 ECB :
549 : /*
550 : * Decide how much effort should be put into generating better code.
551 : */
1844 andres 552 CBC 624 : if (jit_optimize_above_cost >= 0 &&
553 624 : top_plan->total_cost > jit_optimize_above_cost)
1844 andres 554 GIC 438 : result->jitFlags |= PGJIT_OPT3;
1838 andres 555 CBC 624 : if (jit_inline_above_cost >= 0 &&
1838 andres 556 GIC 624 : top_plan->total_cost > jit_inline_above_cost)
557 438 : result->jitFlags |= PGJIT_INLINE;
558 :
559 : /*
1846 andres 560 ECB : * Decide which operations should be JITed.
561 : */
1846 andres 562 CBC 624 : if (jit_expressions)
563 624 : result->jitFlags |= PGJIT_EXPR;
1840 564 624 : if (jit_tuple_deforming)
565 624 : result->jitFlags |= PGJIT_DEFORM;
566 : }
567 :
1494 rhaas 568 GIC 196242 : if (glob->partition_directory != NULL)
569 4993 : DestroyPartitionDirectory(glob->partition_directory);
1494 rhaas 570 ECB :
5892 tgl 571 CBC 196242 : return result;
9186 vadim4o 572 ECB : }
9770 scrappy 573 :
574 :
575 : /*--------------------
8419 tgl 576 : * subquery_planner
577 : * Invokes the planner on a subquery. We recurse to here for each
578 : * sub-SELECT found in the query tree.
9345 bruce 579 : *
580 : * glob is the global state for the current planner run.
581 : * parse is the querytree produced by the parser & rewriter.
582 : * parent_root is the immediate parent Query's info (NULL at the top level).
583 : * hasRecursion is true if this is a recursive WITH query.
584 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
585 : * tuple_fraction is interpreted as explained for grouping_planner, below.
586 : *
587 : * Basically, this routine does the stuff that should only be done once
588 : * per Query object. It then calls grouping_planner. At one time,
589 : * grouping_planner could be invoked recursively on the same Query object;
590 : * that's not currently true, but we keep the separation between the two
591 : * routines anyway, in case we need it again someday.
592 : *
593 : * subquery_planner will be called recursively to handle sub-Query nodes
594 : * found within the query's expressions and rangetable.
595 : *
596 : * Returns the PlannerInfo struct ("root") that contains all data generated
597 : * while planning the subquery. In particular, the Path(s) attached to
598 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
599 : * cheapest way(s) to implement the query. The top level will select the
600 : * best Path and pass it through createplan.c to produce a finished Plan.
601 : *--------------------
602 : */
603 : PlannerInfo *
5624 bruce 604 GIC 227036 : subquery_planner(PlannerGlobal *glob, Query *parse,
605 : PlannerInfo *parent_root,
606 : bool hasRecursion, double tuple_fraction)
607 : {
608 : PlannerInfo *root;
609 : List *newWithCheckOptions;
610 : List *newHaving;
611 : bool hasOuterJoins;
1532 tgl 612 ECB : bool hasResultRTEs;
613 : RelOptInfo *final_rel;
614 : ListCell *l;
615 :
616 : /* Create a PlannerInfo data structure for this subquery */
6517 tgl 617 GIC 227036 : root = makeNode(PlannerInfo);
618 227036 : root->parse = parse;
5893 619 227036 : root->glob = glob;
5300 620 227036 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
621 227036 : root->parent_root = parent_root;
3868 622 227036 : root->plan_params = NIL;
2798 623 227036 : root->outer_params = NULL;
5923 624 227036 : root->planner_cxt = CurrentMemoryContext;
5893 tgl 625 CBC 227036 : root->init_plans = NIL;
5300 626 227036 : root->cte_plan_ids = NIL;
3217 627 227036 : root->multiexpr_params = NIL;
69 tgl 628 GNC 227036 : root->join_domains = NIL;
5923 tgl 629 CBC 227036 : root->eq_classes = NIL;
1358 drowley 630 227036 : root->ec_merging_done = false;
69 tgl 631 GNC 227036 : root->last_rinfo_serial = 0;
739 tgl 632 CBC 227036 : root->all_result_relids =
633 227036 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
634 227036 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
6277 635 227036 : root->append_rel_list = NIL;
739 636 227036 : root->row_identity_vars = NIL;
4913 637 227036 : root->rowMarks = NIL;
2589 638 227036 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
2582 639 227036 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
81 tgl 640 GNC 227036 : root->processed_groupClause = NIL;
641 227036 : root->processed_distinctClause = NIL;
2589 tgl 642 CBC 227036 : root->processed_tlist = NIL;
739 643 227036 : root->update_colnos = NIL;
2885 andres 644 227036 : root->grouping_map = NULL;
2589 tgl 645 227036 : root->minmax_aggs = NIL;
2272 646 227036 : root->qual_security_level = 0;
924 647 227036 : root->hasPseudoConstantQuals = false;
648 227036 : root->hasAlternativeSubPlans = false;
235 tgl 649 GNC 227036 : root->placeholdersFrozen = false;
5300 tgl 650 CBC 227036 : root->hasRecursion = hasRecursion;
651 227036 : if (hasRecursion)
1549 652 357 : root->wt_param_id = assign_special_exec_param(root);
5300 tgl 653 ECB : else
5300 tgl 654 CBC 226679 : root->wt_param_id = -1;
2589 655 227036 : root->non_recursive_path = NULL;
1829 alvherre 656 227036 : root->partColsUpdated = false;
5300 tgl 657 ECB :
658 : /*
659 : * Create the top-level join domain. This won't have valid contents until
660 : * deconstruct_jointree fills it in, but the node needs to exist before
661 : * that so we can build EquivalenceClasses referencing it.
662 : */
69 tgl 663 GNC 227036 : root->join_domains = list_make1(makeNode(JoinDomain));
664 :
5300 tgl 665 ECB : /*
1513 666 : * If there is a WITH list, process each WITH query and either convert it
667 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
5300 668 : */
5300 tgl 669 CBC 227036 : if (parse->cteList)
670 1303 : SS_process_ctes(root);
5300 tgl 671 ECB :
377 alvherre 672 : /*
673 : * If it's a MERGE command, transform the joinlist as appropriate.
674 : */
377 alvherre 675 CBC 227033 : transform_MERGE_to_join(parse);
377 alvherre 676 ECB :
677 : /*
678 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
679 : * that we don't need so many special cases to deal with that situation.
680 : */
1532 tgl 681 GIC 227033 : replace_empty_jointree(parse);
682 :
7384 tgl 683 ECB : /*
684 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
685 : * to transform them into joins. Note that this step does not descend
686 : * into subqueries; if we pull up any subqueries below, their SubLinks are
687 : * processed just before pulling them up.
688 : */
7384 tgl 689 CBC 227033 : if (parse->hasSubLinks)
5348 690 13006 : pull_up_sublinks(root);
691 :
692 : /*
693 : * Scan the rangetable for function RTEs, do const-simplification on them,
694 : * and then inline them if possible (producing subqueries that might get
1347 tgl 695 ECB : * pulled up next). Recursion issues here are handled in the same way as
696 : * for SubLinks.
697 : */
1347 tgl 698 GIC 227033 : preprocess_function_rtes(root);
699 :
700 : /*
4382 bruce 701 ECB : * Check to see if any subqueries in the jointree can be merged into this
702 : * query.
703 : */
2951 tgl 704 GIC 227030 : pull_up_subqueries(root);
705 :
706 : /*
707 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
708 : * do this now because it requires applying pull_up_subqueries to the leaf
4382 bruce 709 ECB : * queries of the UNION ALL, which weren't touched above because they
4535 tgl 710 : * weren't referenced by the jointree (they will be after we do this).
711 : */
4535 tgl 712 GIC 227027 : if (parse->setOperations)
713 2743 : flatten_simple_union_all(root);
714 :
715 : /*
716 : * Survey the rangetable to see what kinds of entries are present. We can
717 : * skip some later processing if relevant SQL features are not used; for
1471 tgl 718 ECB : * example if there are no JOIN RTEs we can avoid the expense of doing
719 : * flatten_join_alias_vars(). This must be done after we have finished
720 : * adding rangetable entries, of course. (Note: actually, processing of
721 : * inherited or partitioned rels can cause RTEs for their child tables to
722 : * get added later; but those must all be RTE_RELATION entries, so they
723 : * don't invalidate the conclusions drawn here.)
7446 724 : */
6517 tgl 725 GIC 227027 : root->hasJoinRTEs = false;
3878 726 227027 : root->hasLateralRTEs = false;
5351 727 227027 : hasOuterJoins = false;
1532 728 227027 : hasResultRTEs = false;
6892 neilc 729 605702 : foreach(l, parse->rtable)
730 : {
2042 tgl 731 378675 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
7446 tgl 732 ECB :
1471 tgl 733 CBC 378675 : switch (rte->rtekind)
734 : {
1471 tgl 735 GIC 199073 : case RTE_RELATION:
736 199073 : if (rte->inh)
737 : {
738 : /*
739 : * Check to see if the relation actually has any children;
740 : * if not, clear the inh flag so we can treat it as a
741 : * plain base relation.
742 : *
743 : * Note: this could give a false-positive result, if the
744 : * rel once had children but no longer does. We used to
1471 tgl 745 ECB : * be able to clear rte->inh later on when we discovered
746 : * that, but no more; we have to handle such cases as
747 : * full-fledged inheritance.
748 : */
1471 tgl 749 CBC 150493 : rte->inh = has_subclass(rte->relid);
750 : }
751 199073 : break;
1471 tgl 752 GIC 31924 : case RTE_JOIN:
1471 tgl 753 CBC 31924 : root->hasJoinRTEs = true;
1471 tgl 754 GIC 31924 : if (IS_OUTER_JOIN(rte->jointype))
1471 tgl 755 CBC 18609 : hasOuterJoins = true;
756 31924 : break;
1471 tgl 757 GIC 97741 : case RTE_RESULT:
758 97741 : hasResultRTEs = true;
759 97741 : break;
760 49937 : default:
761 : /* No work here for other RTE types */
762 49937 : break;
763 : }
764 :
3878 765 378675 : if (rte->lateral)
766 3786 : root->hasLateralRTEs = true;
767 :
768 : /*
1470 tgl 769 ECB : * We can also determine the maximum security level required for any
770 : * securityQuals now. Addition of inheritance-child RTEs won't affect
771 : * this, because child tables don't have their own securityQuals; see
772 : * expand_single_inheritance_child().
773 : */
1470 tgl 774 CBC 378675 : if (rte->securityQuals)
775 1056 : root->qual_security_level = Max(root->qual_security_level,
1470 tgl 776 ECB : list_length(rte->securityQuals));
7446 777 : }
778 :
739 779 : /*
780 : * If we have now verified that the query target relation is
781 : * non-inheriting, mark it as a leaf target.
782 : */
739 tgl 783 GIC 227027 : if (parse->resultRelation)
784 : {
739 tgl 785 CBC 52386 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
739 tgl 786 ECB :
739 tgl 787 GIC 52386 : if (!rte->inh)
788 51220 : root->leaf_result_relids =
789 51220 : bms_make_singleton(parse->resultRelation);
790 : }
791 :
792 : /*
793 : * Preprocess RowMark information. We need to do this after subquery
1471 tgl 794 ECB : * pullup, so that all base relations are present.
4927 795 : */
4913 tgl 796 GIC 227027 : preprocess_rowmarks(root);
797 :
798 : /*
799 : * Set hasHavingQual to remember if HAVING clause is present. Needed
800 : * because preprocess_expression will reduce a constant-true condition to
801 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
802 : */
6517 tgl 803 CBC 227027 : root->hasHavingQual = (parse->havingQual != NULL);
804 :
8585 tgl 805 ECB : /*
806 : * Do expression preprocessing on targetlist and quals, as well as other
4804 807 : * random expressions in the querytree. Note that we do not need to
808 : * handle sort/group expressions explicitly, because they are actually
809 : * part of the targetlist.
810 : */
8419 tgl 811 GIC 225492 : parse->targetList = (List *)
6517 812 227027 : preprocess_expression(root, (Node *) parse->targetList,
813 : EXPRKIND_TARGET);
814 :
815 : /* Constant-folding might have removed all set-returning functions */
2399 tgl 816 CBC 225492 : if (parse->hasTargetSRFs)
2399 tgl 817 GIC 3246 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
818 :
3552 sfrost 819 225492 : newWithCheckOptions = NIL;
820 226496 : foreach(l, parse->withCheckOptions)
821 : {
2042 tgl 822 1004 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
3552 sfrost 823 ECB :
3552 sfrost 824 GIC 1004 : wco->qual = preprocess_expression(root, wco->qual,
825 : EXPRKIND_QUAL);
826 1004 : if (wco->qual != NULL)
827 840 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
828 : }
829 225492 : parse->withCheckOptions = newWithCheckOptions;
830 :
6084 tgl 831 CBC 225492 : parse->returningList = (List *)
832 225492 : preprocess_expression(root, (Node *) parse->returningList,
833 : EXPRKIND_TARGET);
834 :
6517 tgl 835 GIC 225492 : preprocess_qual_conditions(root, (Node *) parse->jointree);
8227 tgl 836 ECB :
6517 tgl 837 CBC 225492 : parse->havingQual = preprocess_expression(root, parse->havingQual,
838 : EXPRKIND_QUAL);
7384 tgl 839 ECB :
4804 tgl 840 CBC 226617 : foreach(l, parse->windowClause)
841 : {
2042 842 1125 : WindowClause *wc = lfirst_node(WindowClause, l);
843 :
4804 tgl 844 ECB : /* partitionClause/orderClause are sort/group expressions */
4804 tgl 845 GIC 1125 : wc->startOffset = preprocess_expression(root, wc->startOffset,
4804 tgl 846 ECB : EXPRKIND_LIMIT);
4804 tgl 847 CBC 1125 : wc->endOffset = preprocess_expression(root, wc->endOffset,
848 : EXPRKIND_LIMIT);
120 drowley 849 1125 : wc->runCondition = (List *) preprocess_expression(root,
120 drowley 850 GIC 1125 : (Node *) wc->runCondition,
120 drowley 851 ECB : EXPRKIND_TARGET);
4804 tgl 852 : }
853 :
6517 tgl 854 GIC 225492 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
7220 tgl 855 ECB : EXPRKIND_LIMIT);
6517 tgl 856 GIC 225492 : parse->limitCount = preprocess_expression(root, parse->limitCount,
7220 tgl 857 ECB : EXPRKIND_LIMIT);
858 :
2893 andres 859 GIC 225492 : if (parse->onConflict)
2893 andres 860 ECB : {
2524 tgl 861 GIC 1430 : parse->onConflict->arbiterElems = (List *)
2524 tgl 862 CBC 715 : preprocess_expression(root,
2524 tgl 863 GIC 715 : (Node *) parse->onConflict->arbiterElems,
864 : EXPRKIND_ARBITER_ELEM);
2524 tgl 865 CBC 1430 : parse->onConflict->arbiterWhere =
2524 tgl 866 GIC 715 : preprocess_expression(root,
2524 tgl 867 CBC 715 : parse->onConflict->arbiterWhere,
868 : EXPRKIND_QUAL);
2893 andres 869 1430 : parse->onConflict->onConflictSet = (List *)
2524 tgl 870 715 : preprocess_expression(root,
2524 tgl 871 GIC 715 : (Node *) parse->onConflict->onConflictSet,
872 : EXPRKIND_TARGET);
2893 andres 873 715 : parse->onConflict->onConflictWhere =
2524 tgl 874 CBC 715 : preprocess_expression(root,
2524 tgl 875 GIC 715 : parse->onConflict->onConflictWhere,
2893 andres 876 ECB : EXPRKIND_QUAL);
877 : /* exclRelTlist contains only Vars, so no preprocessing needed */
878 : }
879 :
377 alvherre 880 GIC 226203 : foreach(l, parse->mergeActionList)
377 alvherre 881 ECB : {
377 alvherre 882 CBC 711 : MergeAction *action = (MergeAction *) lfirst(l);
377 alvherre 883 ECB :
377 alvherre 884 GIC 711 : action->targetList = (List *)
377 alvherre 885 CBC 711 : preprocess_expression(root,
886 711 : (Node *) action->targetList,
377 alvherre 887 ECB : EXPRKIND_TARGET);
377 alvherre 888 GIC 711 : action->qual =
377 alvherre 889 CBC 711 : preprocess_expression(root,
377 alvherre 890 ECB : (Node *) action->qual,
891 : EXPRKIND_QUAL);
892 : }
893 :
6277 tgl 894 CBC 225492 : root->append_rel_list = (List *)
895 225492 : preprocess_expression(root, (Node *) root->append_rel_list,
896 : EXPRKIND_APPINFO);
897 :
898 : /* Also need to preprocess expressions within RTEs */
6892 neilc 899 GIC 602505 : foreach(l, parse->rtable)
7631 tgl 900 ECB : {
2042 tgl 901 GIC 377013 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
3873 tgl 902 ECB : int kind;
903 : ListCell *lcsq;
7631 904 :
2886 simon 905 CBC 377013 : if (rte->rtekind == RTE_RELATION)
2886 simon 906 ECB : {
2886 simon 907 GIC 198946 : if (rte->tablesample)
2815 tgl 908 CBC 105 : rte->tablesample = (TableSampleClause *)
909 105 : preprocess_expression(root,
2815 tgl 910 GIC 105 : (Node *) rte->tablesample,
911 : EXPRKIND_TABLESAMPLE);
912 : }
2886 simon 913 178067 : else if (rte->rtekind == RTE_SUBQUERY)
3873 tgl 914 ECB : {
915 : /*
916 : * We don't want to do all preprocessing yet on the subquery's
917 : * expressions, since that will happen when we plan it. But if it
918 : * contains any join aliases of our level, those have to get
919 : * expanded now, because planning of the subquery won't do it.
920 : * That's only possible if the subquery is LATERAL.
921 : */
3873 tgl 922 GIC 26748 : if (rte->lateral && root->hasJoinRTEs)
923 337 : rte->subquery = (Query *)
69 tgl 924 GNC 337 : flatten_join_alias_vars(root, root->parse,
1531 tgl 925 CBC 337 : (Node *) rte->subquery);
926 : }
3873 927 151319 : else if (rte->rtekind == RTE_FUNCTION)
3873 tgl 928 ECB : {
3426 929 : /* Preprocess the function expression(s) fully */
3873 tgl 930 CBC 17700 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
2223 alvherre 931 GIC 17700 : rte->functions = (List *)
932 17700 : preprocess_expression(root, (Node *) rte->functions, kind);
2223 alvherre 933 ECB : }
2223 alvherre 934 GIC 133619 : else if (rte->rtekind == RTE_TABLEFUNC)
935 : {
936 : /* Preprocess the function expression(s) fully */
937 108 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
938 108 : rte->tablefunc = (TableFunc *)
939 108 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
940 : }
6094 mail 941 133511 : else if (rte->rtekind == RTE_VALUES)
3873 tgl 942 ECB : {
943 : /* Preprocess the values lists fully */
3873 tgl 944 CBC 3559 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
6094 mail 945 3559 : rte->values_lists = (List *)
3873 tgl 946 GIC 3559 : preprocess_expression(root, (Node *) rte->values_lists, kind);
3873 tgl 947 ECB : }
948 :
949 : /*
2272 950 : * Process each element of the securityQuals list as if it were a
951 : * separate qual expression (as indeed it is). We need to do it this
952 : * way to get proper canonicalization of AND/OR structure. Note that
953 : * this converts each element into an implicit-AND sublist.
954 : */
2272 tgl 955 GIC 378236 : foreach(lcsq, rte->securityQuals)
956 : {
2272 tgl 957 CBC 1223 : lfirst(lcsq) = preprocess_expression(root,
958 1223 : (Node *) lfirst(lcsq),
2272 tgl 959 ECB : EXPRKIND_QUAL);
960 : }
7631 961 : }
962 :
963 : /*
1993 964 : * Now that we are done preprocessing expressions, and in particular done
965 : * flattening join alias variables, get rid of the joinaliasvars lists.
966 : * They no longer match what expressions in the rest of the tree look
967 : * like, because we have not preprocessed expressions in those lists (and
968 : * do not want to; for example, expanding a SubLink there would result in
969 : * a useless unreferenced subplan). Leaving them in place simply creates
970 : * a hazard for later scans of the tree. We could try to prevent that by
971 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
972 : * but that doesn't sound very reliable.
973 : */
1993 tgl 974 GIC 225492 : if (root->hasJoinRTEs)
1993 tgl 975 ECB : {
1993 tgl 976 GIC 111072 : foreach(l, parse->rtable)
1993 tgl 977 ECB : {
1993 tgl 978 CBC 91740 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
979 :
1993 tgl 980 GIC 91740 : rte->joinaliasvars = NIL;
981 : }
982 : }
983 :
984 : /*
985 : * In some cases we may want to transfer a HAVING clause into WHERE. We
986 : * cannot do so if the HAVING clause contains aggregates (obviously) or
987 : * volatile functions (since a HAVING clause is supposed to be executed
988 : * only once per group). We also can't do this if there are any nonempty
989 : * grouping sets; moving such a clause into WHERE would potentially change
990 : * the results, if any referenced column isn't present in all the grouping
991 : * sets. (If there are only empty grouping sets, then the HAVING clause
992 : * must be degenerate as discussed below.)
993 : *
2617 andres 994 ECB : * Also, it may be that the clause is so expensive to execute that we're
995 : * better off doing it only once per group, despite the loss of
996 : * selectivity. This is hard to estimate short of doing the entire
997 : * planning process twice, so we use a heuristic: clauses containing
998 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
999 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1000 : * instead of after.
1001 : *
1002 : * If the query has explicit grouping then we can simply move such a
1003 : * clause into WHERE; any group that fails the clause will not be in the
1004 : * output because none of its tuples will reach the grouping or
1005 : * aggregation stage. Otherwise we must have a degenerate (variable-free)
1006 : * HAVING clause, which we put in WHERE so that query_planner() can use it
1007 : * in a gating Result node, but also keep in HAVING to ensure that we
1008 : * don't emit a bogus aggregated row. (This could be done better, but it
1009 : * seems not worth optimizing.)
1010 : *
1011 : * Note that both havingQual and parse->jointree->quals are in
1012 : * implicitly-ANDed-list form at this point, even though they are declared
1013 : * as Node *.
1014 : */
8116 tgl 1015 GIC 225492 : newHaving = NIL;
6892 neilc 1016 225817 : foreach(l, (List *) parse->havingQual)
1017 : {
1018 325 : Node *havingclause = (Node *) lfirst(l);
1019 :
2617 andres 1020 629 : if ((parse->groupClause && parse->groupingSets) ||
1021 369 : contain_agg_clause(havingclause) ||
6604 tgl 1022 130 : contain_volatile_functions(havingclause) ||
2814 andres 1023 65 : contain_subplans(havingclause))
1024 : {
1025 : /* keep it in HAVING */
8116 tgl 1026 260 : newHaving = lappend(newHaving, havingclause);
1027 : }
2814 andres 1028 65 : else if (parse->groupClause && !parse->groupingSets)
1029 : {
1030 : /* move it to WHERE */
8116 tgl 1031 56 : parse->jointree->quals = (Node *)
1032 56 : lappend((List *) parse->jointree->quals, havingclause);
1033 : }
1034 : else
6604 tgl 1035 ECB : {
1036 : /* put a copy in WHERE, keep it in HAVING */
6604 tgl 1037 GIC 18 : parse->jointree->quals = (Node *)
6604 tgl 1038 CBC 9 : lappend((List *) parse->jointree->quals,
1039 : copyObject(havingclause));
1040 9 : newHaving = lappend(newHaving, havingclause);
6604 tgl 1041 ECB : }
8116 1042 : }
8116 tgl 1043 CBC 225492 : parse->havingQual = (Node *) newHaving;
1044 :
7364 tgl 1045 ECB : /*
1046 : * If we have any outer joins, try to reduce them to plain inner joins.
1047 : * This step is most easily done after we've done expression
7188 bruce 1048 : * preprocessing.
7364 tgl 1049 : */
5351 tgl 1050 GIC 225492 : if (hasOuterJoins)
6517 1051 12570 : reduce_outer_joins(root);
1052 :
1053 : /*
1532 tgl 1054 ECB : * If we have any RTE_RESULT relations, see if they can be deleted from
1055 : * the jointree. We also rely on this processing to flatten single-child
1056 : * FromExprs underneath outer joins. This step is most effectively done
1057 : * after we've done expression preprocessing and outer join reduction.
1058 : */
69 tgl 1059 GNC 225492 : if (hasResultRTEs || hasOuterJoins)
1532 tgl 1060 GIC 108355 : remove_useless_result_rtes(root);
1532 tgl 1061 ECB :
1062 : /*
1063 : * Do the main planning.
1064 : */
739 tgl 1065 GIC 225492 : grouping_planner(root, tuple_fraction);
1066 :
1067 : /*
2798 tgl 1068 ECB : * Capture the set of outer-level param IDs we have access to, for use in
1069 : * extParam/allParam calculations later.
1070 : */
2798 tgl 1071 GIC 225463 : SS_identify_outer_params(root);
1072 :
1073 : /*
1074 : * If any initPlans were created in this query level, adjust the surviving
1075 : * Paths' costs and parallel-safety flags to account for them. The
1076 : * initPlans won't actually get attached to the plan tree till
2326 tgl 1077 ECB : * create_plan() runs, but we must include their effects now.
8221 1078 : */
2589 tgl 1079 GIC 225463 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1080 225463 : SS_charge_for_initplans(root, final_rel);
1081 :
1082 : /*
2589 tgl 1083 ECB : * Make sure we've identified the cheapest Path for the final rel. (By
1084 : * doing this here not in grouping_planner, we include initPlan costs in
1085 : * the decision, though it's unlikely that will change anything.)
1086 : */
2589 tgl 1087 GIC 225463 : set_cheapest(final_rel);
1088 :
2589 tgl 1089 CBC 225463 : return root;
1090 : }
1091 :
1092 : /*
1093 : * preprocess_expression
1094 : * Do subquery_planner's preprocessing work for an expression,
1095 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1096 : * conditions), a HAVING clause, or a few other things.
8227 tgl 1097 ECB : */
1098 : static Node *
6517 tgl 1099 GIC 1652358 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1100 : {
1101 : /*
1102 : * Fall out quickly if expression is empty. This occurs often enough to
1103 : * be worth checking. Note that null->null is the correct conversion for
1104 : * implicit-AND result format, too.
6523 tgl 1105 ECB : */
6523 tgl 1106 GIC 1652358 : if (expr == NULL)
6523 tgl 1107 CBC 1274846 : return NULL;
1108 :
1109 : /*
1110 : * If the query has any join RTEs, replace join alias variables with
1111 : * base-relation variables. We must do this first, since any expressions
1112 : * we may extract from the joinaliasvars lists have not been preprocessed.
1113 : * For example, if we did this after sublink processing, sublinks expanded
1114 : * out from join aliases would not get processed. But we can skip this in
1115 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1116 : * they can't contain any Vars of the current query level.
7389 tgl 1117 ECB : */
3873 tgl 1118 GIC 377512 : if (root->hasJoinRTEs &&
2815 1119 139617 : !(kind == EXPRKIND_RTFUNC ||
1120 69738 : kind == EXPRKIND_VALUES ||
1121 : kind == EXPRKIND_TABLESAMPLE ||
1122 : kind == EXPRKIND_TABLEFUNC))
69 tgl 1123 GNC 69738 : expr = flatten_join_alias_vars(root, root->parse, expr);
7389 tgl 1124 ECB :
8227 1125 : /*
1126 : * Simplify constant expressions. For function RTEs, this was already
1127 : * done by preprocess_function_rtes. (But note we must do it again for
1128 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1129 : * un-simplified subexpressions inserted by flattening of subqueries or
1130 : * join alias variables.)
1131 : *
1132 : * Note: an essential effect of this is to convert named-argument function
1133 : * calls to positional notation and insert the current actual values of
1134 : * any default arguments for functions. To ensure that happens, we *must*
1135 : * process all expressions here. Previous PG versions sometimes skipped
4790 bruce 1136 : * const-simplification if it didn't seem worth the trouble, but we can't
1137 : * do that anymore.
5225 tgl 1138 : *
1139 : * Note: this also flattens nested AND and OR expressions into N-argument
1140 : * form. All processing of a qual expression after this point must be
6586 1141 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1142 : * with AND directly under AND, nor OR directly under OR.
1143 : */
542 tgl 1144 GIC 377512 : if (kind != EXPRKIND_RTFUNC)
1347 1145 362919 : expr = eval_const_expressions(root, expr);
1146 :
1147 : /*
1148 : * If it's a qual or havingQual, canonicalize it.
1149 : */
7384 1150 375977 : if (kind == EXPRKIND_QUAL)
1151 : {
1855 1152 125554 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1153 :
1154 : #ifdef OPTIMIZER_DEBUG
1155 : printf("After canonicalize_qual()\n");
1156 : pprint(expr);
1157 : #endif
1158 : }
1159 :
1160 : /*
1161 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
731 drowley 1162 ECB : * hashfuncid of any that might execute more quickly by using hash lookups
1163 : * instead of a linear search.
1164 : */
731 drowley 1165 GIC 375977 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1166 : {
1167 349804 : convert_saop_to_hashed_saop(expr);
731 drowley 1168 ECB : }
1169 :
7831 tgl 1170 : /* Expand SubLinks to SubPlans */
6517 tgl 1171 GIC 375977 : if (root->parse->hasSubLinks)
5893 1172 39878 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1173 :
1174 : /*
1175 : * XXX do not insert anything here unless you have grokked the comments in
1176 : * SS_replace_correlation_vars ...
1177 : */
1178 :
1179 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1180 375977 : if (root->query_level > 1)
1181 61993 : expr = SS_replace_correlation_vars(root, expr);
1182 :
7027 tgl 1183 ECB : /*
1184 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
6385 bruce 1185 : * don't want to do this before eval_const_expressions, since the latter
1186 : * would be unable to simplify a top-level AND correctly. Also,
1187 : * SS_process_sublinks expects explicit-AND format.)
1188 : */
7027 tgl 1189 CBC 375977 : if (kind == EXPRKIND_QUAL)
1190 125554 : expr = (Node *) make_ands_implicit((Expr *) expr);
1191 :
8227 tgl 1192 GIC 375977 : return expr;
1193 : }
1194 :
1195 : /*
1196 : * preprocess_qual_conditions
1197 : * Recursively scan the query's jointree and do subquery_planner's
8227 tgl 1198 ECB : * preprocessing work on each qual condition found therein.
1199 : */
1200 : static void
6517 tgl 1201 GIC 541633 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1202 : {
8227 1203 541633 : if (jtnode == NULL)
8227 tgl 1204 UIC 0 : return;
8227 tgl 1205 GIC 541633 : if (IsA(jtnode, RangeTblRef))
1206 : {
8227 tgl 1207 ECB : /* nothing to do here */
1208 : }
8227 tgl 1209 GIC 266479 : else if (IsA(jtnode, FromExpr))
8227 tgl 1210 ECB : {
8227 tgl 1211 GIC 232219 : FromExpr *f = (FromExpr *) jtnode;
1212 : ListCell *l;
1213 :
1214 479840 : foreach(l, f->fromlist)
6517 1215 247621 : preprocess_qual_conditions(root, lfirst(l));
1216 :
1217 232219 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1218 : }
8227 tgl 1219 CBC 34260 : else if (IsA(jtnode, JoinExpr))
1220 : {
1221 34260 : JoinExpr *j = (JoinExpr *) jtnode;
8227 tgl 1222 EUB :
6517 tgl 1223 CBC 34260 : preprocess_qual_conditions(root, j->larg);
6517 tgl 1224 GIC 34260 : preprocess_qual_conditions(root, j->rarg);
1225 :
1226 34260 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
8244 tgl 1227 ECB : }
1228 : else
7198 tgl 1229 LBC 0 : elog(ERROR, "unrecognized node type: %d",
1230 : (int) nodeTag(jtnode));
1231 : }
8419 tgl 1232 ECB :
3878 1233 : /*
1234 : * preprocess_phv_expression
1235 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1236 : *
1237 : * If a LATERAL subquery references an output of another subquery, and that
1238 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1239 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1240 : * and later pull it back up during find_lateral_references, which runs after
1241 : * subquery_planner has preprocessed all the expressions that were in the
1242 : * current query level to start with. So we need to preprocess it then.
1243 : */
1244 : Expr *
3878 tgl 1245 GIC 36 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1246 : {
3878 tgl 1247 GBC 36 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1248 : }
1249 :
1250 : /*--------------------
1251 : * grouping_planner
1252 : * Perform planning steps related to grouping, aggregation, etc.
1253 : *
1254 : * This function adds all required top-level processing to the scan/join
1255 : * Path(s) produced by query_planner.
1256 : *
1257 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1258 : * tuple_fraction is interpreted as follows:
1259 : * 0: expect all tuples to be retrieved (normal case)
1260 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1261 : * from the plan to be retrieved
1262 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
8419 tgl 1263 ECB : * expected to be retrieved (ie, a LIMIT specification)
1264 : *
2589 1265 : * Returns nothing; the useful output is in the Paths we attach to the
1266 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1267 : * root->processed_tlist contains the final processed targetlist.
1268 : *
1269 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1270 : * to leave this to the caller.
1271 : *--------------------
1272 : */
1273 : static void
739 tgl 1274 GIC 225492 : grouping_planner(PlannerInfo *root, double tuple_fraction)
1275 : {
6517 1276 225492 : Query *parse = root->parse;
6101 bruce 1277 225492 : int64 offset_est = 0;
1278 225492 : int64 count_est = 0;
5819 tgl 1279 225492 : double limit_tuples = -1.0;
2585 1280 225492 : bool have_postponed_srfs = false;
1281 : PathTarget *final_target;
1282 : List *final_targets;
1283 : List *final_targets_contain_srfs;
1284 : bool final_target_parallel_safe;
1285 : RelOptInfo *current_rel;
1286 : RelOptInfo *final_rel;
1287 : FinalPathExtraData extra;
1288 : ListCell *lc;
1289 :
1290 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
6443 1291 225492 : if (parse->limitCount || parse->limitOffset)
5819 tgl 1292 ECB : {
6443 tgl 1293 GIC 2440 : tuple_fraction = preprocess_limit(root, tuple_fraction,
6443 tgl 1294 ECB : &offset_est, &count_est);
5624 bruce 1295 :
5819 tgl 1296 : /*
5624 bruce 1297 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1298 : * estimate the effects of using a bounded sort.
1299 : */
5819 tgl 1300 GIC 2440 : if (count_est > 0 && offset_est >= 0)
1301 2241 : limit_tuples = (double) count_est + (double) offset_est;
1302 : }
1303 :
1304 : /* Make tuple_fraction accessible to lower-level routines */
2589 1305 225492 : root->tuple_fraction = tuple_fraction;
1306 :
8221 1307 225492 : if (parse->setOperations)
1308 : {
6512 tgl 1309 ECB : /*
1310 : * If there's a top-level ORDER BY, assume we have to fetch all the
3260 bruce 1311 : * tuples. This might be too simplistic given all the hackery below
1312 : * to possibly avoid the sort; but the odds of accurate estimates here
1313 : * are pretty low anyway. XXX try to get rid of this in favor of
1314 : * letting plan_set_operations generate both fast-start and
1315 : * cheapest-total paths.
1316 : */
6512 tgl 1317 GIC 2591 : if (parse->sortClause)
2589 tgl 1318 CBC 1478 : root->tuple_fraction = 0.0;
6512 tgl 1319 ECB :
1320 : /*
1321 : * Construct Paths for set operations. The results will not need any
1322 : * work except perhaps a top-level sort and/or LIMIT. Note that any
2589 1323 : * special work for recursive unions is the responsibility of
1324 : * plan_set_operations.
8221 1325 : */
2589 tgl 1326 GIC 2591 : current_rel = plan_set_operations(root);
1327 :
1328 : /*
1329 : * We should not need to call preprocess_targetlist, since we must be
1330 : * in a SELECT query node. Instead, use the processed_tlist returned
1331 : * by plan_set_operations (since this tells whether it returned any
1332 : * resjunk columns!), and transfer any sort key information from the
1333 : * original tlist.
1334 : */
8221 tgl 1335 CBC 2588 : Assert(parse->commandType == CMD_SELECT);
8397 bruce 1336 ECB :
1337 : /* for safety, copy processed_tlist instead of modifying in-place */
1474 tgl 1338 GIC 2588 : root->processed_tlist =
1339 2588 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1340 : parse->targetList);
1341 :
1342 : /* Also extract the PathTarget form of the setop result tlist */
2585 1343 2588 : final_target = current_rel->cheapest_total_path->pathtarget;
2585 tgl 1344 ECB :
1345 : /* And check whether it's parallel safe */
1346 : final_target_parallel_safe =
1858 rhaas 1347 GIC 2588 : is_parallel_safe(root, (Node *) final_target->exprs);
1348 :
1349 : /* The setop result tlist couldn't contain any SRFs */
2272 andres 1350 2588 : Assert(!parse->hasTargetSRFs);
1351 2588 : final_targets = final_targets_contain_srfs = NIL;
1352 :
8159 tgl 1353 ECB : /*
1354 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1355 : * checked already, but let's make sure).
1356 : */
8159 tgl 1357 CBC 2588 : if (parse->rowMarks)
7198 tgl 1358 UIC 0 : ereport(ERROR,
1359 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1360 : /*------
3260 bruce 1361 ECB : translator: %s is a SQL row locking clause such as FOR UPDATE */
1362 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1363 : LCS_asString(linitial_node(RowMarkClause,
1364 : parse->rowMarks)->strength))));
8159 tgl 1365 :
1366 : /*
1367 : * Calculate pathkeys that represent result ordering requirements
8397 bruce 1368 : */
5365 tgl 1369 CBC 2588 : Assert(parse->distinctClause == NIL);
5360 tgl 1370 GIC 2588 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1371 : parse->sortClause,
1372 : root->processed_tlist);
1373 : }
1374 : else
9243 bruce 1375 ECB : {
7459 tgl 1376 EUB : /* No set operations, do regular planning */
1377 : PathTarget *sort_input_target;
1378 : List *sort_input_targets;
1379 : List *sort_input_targets_contain_srfs;
1380 : bool sort_input_target_parallel_safe;
1381 : PathTarget *grouping_target;
1382 : List *grouping_targets;
1383 : List *grouping_targets_contain_srfs;
1384 : bool grouping_target_parallel_safe;
1385 : PathTarget *scanjoin_target;
1386 : List *scanjoin_targets;
2272 andres 1387 ECB : List *scanjoin_targets_contain_srfs;
1858 rhaas 1388 : bool scanjoin_target_parallel_safe;
1389 : bool scanjoin_target_same_exprs;
1390 : bool have_grouping;
5215 tgl 1391 GIC 222901 : WindowFuncLists *wflists = NULL;
1392 222901 : List *activeWindows = NIL;
2204 rhodiumtoad 1393 222901 : grouping_sets_data *gset_data = NULL;
1394 : standard_qp_extra qp_extra;
1395 :
1396 : /* A recursive query should always have setOperations */
5300 tgl 1397 222901 : Assert(!root->hasRecursion);
1398 :
1399 : /* Preprocess grouping sets and GROUP BY clause, if any */
2885 andres 1400 222901 : if (parse->groupingSets)
1401 : {
2204 rhodiumtoad 1402 367 : gset_data = preprocess_grouping_sets(root);
1403 : }
81 tgl 1404 GNC 222534 : else if (parse->groupClause)
1405 : {
1406 : /* Preprocess regular GROUP BY clause, if any */
1407 1598 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1408 : /* Remove any redundant GROUP BY columns */
1409 1598 : remove_useless_groupby_columns(root);
2885 andres 1410 ECB : }
1411 :
2589 tgl 1412 : /*
1413 : * Preprocess targetlist. Note that much of the remaining planning
1414 : * work will be done with the PathTarget representation of tlists, but
1415 : * we must also maintain the full representation of the final tlist so
1416 : * that we can transfer its decoration (resnames etc) to the topmost
1417 : * tlist of the finished Plan. This is kept in processed_tlist.
1418 : */
739 tgl 1419 CBC 222898 : preprocess_targetlist(root);
1420 :
2472 tgl 1421 ECB : /*
1422 : * Mark all the aggregates with resolved aggtranstypes, and detect
866 heikki.linnakangas 1423 : * aggregates that are duplicates or can share transition state. We
1424 : * must do this before slicing and dicing the tlist into various
1425 : * pathtargets, else some copies of the Aggref nodes might escape
1426 : * being marked.
1427 : */
2472 tgl 1428 CBC 222898 : if (parse->hasAggs)
1429 : {
866 heikki.linnakangas 1430 GIC 16002 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1431 16002 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1432 : }
1433 :
1434 : /*
1435 : * Locate any window functions in the tlist. (We don't need to look
1436 : * anywhere else, since expressions used in ORDER BY will be in there
1437 : * too.) Note that they could all have been eliminated by constant
5215 tgl 1438 ECB : * folding, in which case we don't need to do any more work.
1439 : */
5215 tgl 1440 GIC 222898 : if (parse->hasWindowFuncs)
1441 : {
1474 1442 1020 : wflists = find_window_functions((Node *) root->processed_tlist,
5215 1443 1020 : list_length(parse->windowClause));
1444 1020 : if (wflists->numWindowFuncs > 0)
1445 : {
1446 : /*
1447 : * See if any modifications can be made to each WindowClause
1448 : * to allow the executor to execute the WindowFuncs more
1449 : * quickly.
1450 : */
107 drowley 1451 GNC 1017 : optimize_window_clauses(root, wflists);
1452 :
5215 tgl 1453 GIC 1017 : activeWindows = select_active_windows(root, wflists);
1454 : }
1455 : else
5215 tgl 1456 CBC 3 : parse->hasWindowFuncs = false;
1457 : }
5215 tgl 1458 ECB :
4539 1459 : /*
1460 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1461 : * adding logic between here and the query_planner() call. Anything
1462 : * that is needed in MIN/MAX-optimizable cases will have to be
1463 : * duplicated in planagg.c.
1464 : */
4539 tgl 1465 GIC 222898 : if (parse->hasAggs)
1474 1466 16002 : preprocess_minmax_aggregates(root);
1467 :
4525 tgl 1468 ECB : /*
1469 : * Figure out whether there's a hard limit on the number of rows that
1470 : * query_planner's result subplan needs to return. Even if we know a
1471 : * hard limit overall, it doesn't apply if the query has any
2399 1472 : * grouping/aggregation operations, or SRFs in the tlist.
1473 : */
4525 tgl 1474 GIC 222898 : if (parse->groupClause ||
2885 andres 1475 220957 : parse->groupingSets ||
4525 tgl 1476 220936 : parse->distinctClause ||
1477 219971 : parse->hasAggs ||
1478 205715 : parse->hasWindowFuncs ||
2399 tgl 1479 CBC 204761 : parse->hasTargetSRFs ||
4525 tgl 1480 GIC 201708 : root->hasHavingQual)
3534 tgl 1481 CBC 21199 : root->limit_tuples = -1.0;
1482 : else
3534 tgl 1483 GIC 201699 : root->limit_tuples = limit_tuples;
4525 tgl 1484 ECB :
1485 : /* Set up data needed by standard_qp_callback */
3632 tgl 1486 GIC 222898 : qp_extra.activeWindows = activeWindows;
81 tgl 1487 GNC 222898 : qp_extra.gset_data = gset_data;
1488 :
1489 : /*
1490 : * Generate the best unsorted and presorted paths for the scan/join
2589 tgl 1491 ECB : * portion of this Query, ie the processing represented by the
1492 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1493 : * We also generate (in standard_qp_callback) pathkey representations
1494 : * of the query's sort clause, distinct clause, etc.
1495 : */
1474 tgl 1496 GIC 222898 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1497 :
1498 : /*
1499 : * Convert the query's result tlist into PathTarget format.
2589 tgl 1500 ECB : *
1474 1501 : * Note: this cannot be done before query_planner() has performed
1502 : * appendrel expansion, because that might add resjunk entries to
1503 : * root->processed_tlist. Waiting till afterwards is also helpful
2589 1504 : * because the target width estimates can use per-Var width numbers
1505 : * that were obtained within query_planner().
7459 1506 : */
1474 tgl 1507 CBC 222878 : final_target = create_pathtarget(root, root->processed_tlist);
1508 : final_target_parallel_safe =
1858 rhaas 1509 222878 : is_parallel_safe(root, (Node *) final_target->exprs);
1510 :
1511 : /*
2585 tgl 1512 ECB : * If ORDER BY was given, consider whether we should use a post-sort
1513 : * projection, and compute the adjusted target for preceding steps if
1514 : * so.
1515 : */
2585 tgl 1516 GIC 222878 : if (parse->sortClause)
1517 : {
1518 23324 : sort_input_target = make_sort_input_target(root,
1519 : final_target,
1520 : &have_postponed_srfs);
1521 : sort_input_target_parallel_safe =
1858 rhaas 1522 CBC 23324 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1523 : }
1524 : else
1525 : {
2585 tgl 1526 GIC 199554 : sort_input_target = final_target;
1858 rhaas 1527 199554 : sort_input_target_parallel_safe = final_target_parallel_safe;
1528 : }
1529 :
1530 : /*
1531 : * If we have window functions to deal with, the output from any
1532 : * grouping step needs to be what the window functions want;
2585 tgl 1533 ECB : * otherwise, it should be sort_input_target.
1534 : */
2587 tgl 1535 CBC 222878 : if (activeWindows)
1536 : {
2587 tgl 1537 GIC 1017 : grouping_target = make_window_input_target(root,
1538 : final_target,
1539 : activeWindows);
1540 : grouping_target_parallel_safe =
1858 rhaas 1541 1017 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1858 rhaas 1542 ECB : }
1543 : else
1544 : {
2585 tgl 1545 GIC 221861 : grouping_target = sort_input_target;
1858 rhaas 1546 221861 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1547 : }
2587 tgl 1548 ECB :
1549 : /*
1550 : * If we have grouping or aggregation to do, the topmost scan/join
1551 : * plan node must emit what the grouping step wants; otherwise, it
2585 1552 : * should emit grouping_target.
2587 1553 : */
2587 tgl 1554 GIC 220937 : have_grouping = (parse->groupClause || parse->groupingSets ||
1555 443815 : parse->hasAggs || root->hasHavingQual);
1556 222878 : if (have_grouping)
1557 : {
2585 1558 16240 : scanjoin_target = make_group_input_target(root, final_target);
1559 : scanjoin_target_parallel_safe =
1489 efujita 1560 16240 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1858 rhaas 1561 ECB : }
1562 : else
1563 : {
2587 tgl 1564 GIC 206638 : scanjoin_target = grouping_target;
1858 rhaas 1565 206638 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1566 : }
2587 tgl 1567 ECB :
1568 : /*
1569 : * If there are any SRFs in the targetlist, we must separate each of
1570 : * these PathTargets into SRF-computing and SRF-free targets. Replace
2272 andres 1571 : * each of the named targets with a SRF-free version, and remember the
1572 : * list of additional projection steps we need to add afterwards.
1573 : */
2272 andres 1574 GIC 222878 : if (parse->hasTargetSRFs)
1575 : {
1576 : /* final_target doesn't recompute any SRFs in sort_input_target */
1577 3246 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1578 : &final_targets,
1579 : &final_targets_contain_srfs);
2042 tgl 1580 CBC 3246 : final_target = linitial_node(PathTarget, final_targets);
2272 andres 1581 3246 : Assert(!linitial_int(final_targets_contain_srfs));
2272 andres 1582 ECB : /* likewise for sort_input_target vs. grouping_target */
2272 andres 1583 GIC 3246 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
2272 andres 1584 ECB : &sort_input_targets,
1585 : &sort_input_targets_contain_srfs);
2042 tgl 1586 CBC 3246 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
2272 andres 1587 GIC 3246 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1588 : /* likewise for grouping_target vs. scanjoin_target */
1589 3246 : split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
2272 andres 1590 ECB : &grouping_targets,
1591 : &grouping_targets_contain_srfs);
2042 tgl 1592 GIC 3246 : grouping_target = linitial_node(PathTarget, grouping_targets);
2272 andres 1593 3246 : Assert(!linitial_int(grouping_targets_contain_srfs));
1594 : /* scanjoin_target will not have any SRFs precomputed for it */
1595 3246 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1596 : &scanjoin_targets,
1597 : &scanjoin_targets_contain_srfs);
2042 tgl 1598 3246 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
2272 andres 1599 3246 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
2272 andres 1600 ECB : }
1601 : else
1602 : {
1837 rhaas 1603 : /* initialize lists; for most of these, dummy values are OK */
2272 andres 1604 GIC 219632 : final_targets = final_targets_contain_srfs = NIL;
1605 219632 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
2272 andres 1606 CBC 219632 : grouping_targets = grouping_targets_contain_srfs = NIL;
1837 rhaas 1607 219632 : scanjoin_targets = list_make1(scanjoin_target);
1837 rhaas 1608 GIC 219632 : scanjoin_targets_contain_srfs = NIL;
2272 andres 1609 ECB : }
1610 :
1611 : /* Apply scan/join target. */
1837 rhaas 1612 CBC 222878 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1613 222878 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1837 rhaas 1614 GIC 222878 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1837 rhaas 1615 ECB : scanjoin_targets_contain_srfs,
1616 : scanjoin_target_parallel_safe,
1617 : scanjoin_target_same_exprs);
2272 andres 1618 :
2582 tgl 1619 : /*
1620 : * Save the various upper-rel PathTargets we just computed into
1621 : * root->upper_targets[]. The core code doesn't use this, but it
1622 : * provides a convenient place for extensions to get at the info. For
1623 : * consistency, we save all the intermediate targets, even though some
1624 : * of the corresponding upperrels might not be needed for this query.
1625 : */
2582 tgl 1626 GIC 222878 : root->upper_targets[UPPERREL_FINAL] = final_target;
1511 efujita 1627 222878 : root->upper_targets[UPPERREL_ORDERED] = final_target;
595 drowley 1628 222878 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1511 efujita 1629 222878 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
2582 tgl 1630 CBC 222878 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1631 222878 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
2582 tgl 1632 ECB :
2589 1633 : /*
1634 : * If we have grouping and/or aggregation, consider ways to implement
1635 : * that. We build a new upperrel representing the output of this
1636 : * phase.
1637 : */
2587 tgl 1638 CBC 222878 : if (have_grouping)
7459 tgl 1639 ECB : {
2589 tgl 1640 CBC 16240 : current_rel = create_grouping_paths(root,
1641 : current_rel,
1642 : grouping_target,
1643 : grouping_target_parallel_safe,
1644 : gset_data);
1645 : /* Fix things up if grouping_target contains SRFs */
2272 andres 1646 GIC 16237 : if (parse->hasTargetSRFs)
1647 172 : adjust_paths_for_srfs(root, current_rel,
1648 : grouping_targets,
1649 : grouping_targets_contain_srfs);
1650 : }
1651 :
3534 tgl 1652 ECB : /*
2589 1653 : * If we have window functions, consider ways to implement those. We
1654 : * build a new upperrel representing the output of this phase.
3534 1655 : */
2589 tgl 1656 CBC 222875 : if (activeWindows)
2589 tgl 1657 ECB : {
2589 tgl 1658 GIC 1017 : current_rel = create_window_paths(root,
1659 : current_rel,
1660 : grouping_target,
1661 : sort_input_target,
1662 : sort_input_target_parallel_safe,
1663 : wflists,
2589 tgl 1664 ECB : activeWindows);
1665 : /* Fix things up if sort_input_target contains SRFs */
2272 andres 1666 CBC 1017 : if (parse->hasTargetSRFs)
2272 andres 1667 GIC 6 : adjust_paths_for_srfs(root, current_rel,
1668 : sort_input_targets,
1669 : sort_input_targets_contain_srfs);
1670 : }
1671 :
3534 tgl 1672 ECB : /*
2589 1673 : * If there is a DISTINCT clause, consider ways to implement that. We
1674 : * build a new upperrel representing the output of this phase.
1675 : */
2589 tgl 1676 GIC 222875 : if (parse->distinctClause)
1677 : {
1678 970 : current_rel = create_distinct_paths(root,
1679 : current_rel);
1680 : }
1681 : } /* end of if (setOperations) */
2885 andres 1682 ECB :
1683 : /*
2589 tgl 1684 : * If ORDER BY was given, consider ways to implement that, and generate a
1685 : * new upperrel containing only paths that emit the correct ordering and
1686 : * project the correct final_target. We can apply the original
1687 : * limit_tuples limit in sort costing here, but only if there are no
1688 : * postponed SRFs.
1689 : */
2589 tgl 1690 GIC 225463 : if (parse->sortClause)
1691 : {
2589 tgl 1692 CBC 24802 : current_rel = create_ordered_paths(root,
2589 tgl 1693 ECB : current_rel,
1694 : final_target,
1695 : final_target_parallel_safe,
1696 : have_postponed_srfs ? -1.0 :
1697 : limit_tuples);
1698 : /* Fix things up if final_target contains SRFs */
2272 andres 1699 GIC 24802 : if (parse->hasTargetSRFs)
1700 98 : adjust_paths_for_srfs(root, current_rel,
1701 : final_targets,
2272 andres 1702 ECB : final_targets_contain_srfs);
1703 : }
2585 tgl 1704 :
1705 : /*
1706 : * Now we are prepared to build the final-output upperrel.
1707 : */
2589 tgl 1708 GIC 225463 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1709 :
1710 : /*
1711 : * If the input rel is marked consider_parallel and there's nothing that's
1712 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1713 : * consider_parallel as well. Note that if the query has rowMarks or is
1714 : * not a SELECT, consider_parallel will be false for every relation in the
1715 : * query.
2473 rhaas 1716 ECB : */
2473 rhaas 1717 GIC 287717 : if (current_rel->consider_parallel &&
2424 tgl 1718 CBC 124502 : is_parallel_safe(root, parse->limitOffset) &&
2424 tgl 1719 GIC 62248 : is_parallel_safe(root, parse->limitCount))
2473 rhaas 1720 62245 : final_rel->consider_parallel = true;
1721 :
1722 : /*
1723 : * If the current_rel belongs to a single FDW, so does the final_rel.
1724 : */
2473 tgl 1725 CBC 225463 : final_rel->serverid = current_rel->serverid;
2459 1726 225463 : final_rel->userid = current_rel->userid;
2459 tgl 1727 GIC 225463 : final_rel->useridiscurrent = current_rel->useridiscurrent;
2473 1728 225463 : final_rel->fdwroutine = current_rel->fdwroutine;
1729 :
1730 : /*
1731 : * Generate paths for the final_rel. Insert all surviving paths, with
1732 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1733 : */
2589 tgl 1734 CBC 454690 : foreach(lc, current_rel->pathlist)
1735 : {
2589 tgl 1736 GIC 229227 : Path *path = (Path *) lfirst(lc);
1737 :
1738 : /*
1739 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1740 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1741 : * here. If there are only non-locking rowmarks, they should be
1742 : * handled by the ModifyTable node instead. However, root->rowMarks
2589 tgl 1743 ECB : * is what goes into the LockRows node.)
8742 1744 : */
2589 tgl 1745 CBC 229227 : if (parse->rowMarks)
2589 tgl 1746 ECB : {
2589 tgl 1747 GIC 3602 : path = (Path *) create_lockrows_path(root, final_rel, path,
1748 : root->rowMarks,
1749 : assign_special_exec_param(root));
1750 : }
8742 tgl 1751 ECB :
8631 1752 : /*
2589 1753 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
8631 1754 : */
2589 tgl 1755 GIC 229227 : if (limit_needed(parse))
1756 : {
1757 2817 : path = (Path *) create_limit_path(root, final_rel, path,
1758 : parse->limitOffset,
1759 : parse->limitCount,
1097 alvherre 1760 ECB : parse->limitOption,
1761 : offset_est, count_est);
6572 tgl 1762 : }
1763 :
1764 : /*
1765 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1766 : */
739 tgl 1767 GIC 229227 : if (parse->commandType != CMD_SELECT)
1768 : {
1769 : Index rootRelation;
1770 52267 : List *resultRelations = NIL;
739 tgl 1771 CBC 52267 : List *updateColnosLists = NIL;
739 tgl 1772 GIC 52267 : List *withCheckOptionLists = NIL;
739 tgl 1773 CBC 52267 : List *returningLists = NIL;
377 alvherre 1774 GIC 52267 : List *mergeActionLists = NIL;
1775 : List *rowMarks;
1776 :
739 tgl 1777 52267 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1778 : {
1779 : /* Inherited UPDATE/DELETE/MERGE */
1780 1151 : RelOptInfo *top_result_rel = find_base_rel(root,
739 tgl 1781 ECB : parse->resultRelation);
739 tgl 1782 GIC 1151 : int resultRelation = -1;
739 tgl 1783 ECB :
1784 : /* Add only leaf children to ModifyTable. */
739 tgl 1785 GIC 3406 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
1786 3406 : resultRelation)) >= 0)
1787 : {
1788 2255 : RelOptInfo *this_result_rel = find_base_rel(root,
1789 : resultRelation);
1790 :
1791 : /*
1792 : * Also exclude any leaf rels that have turned dummy since
739 tgl 1793 ECB : * being added to the list, for example, by being excluded
1794 : * by constraint exclusion.
1795 : */
739 tgl 1796 CBC 2255 : if (IS_DUMMY_REL(this_result_rel))
1797 36 : continue;
739 tgl 1798 ECB :
1799 : /* Build per-target-rel lists needed by ModifyTable */
739 tgl 1800 CBC 2219 : resultRelations = lappend_int(resultRelations,
1801 : resultRelation);
739 tgl 1802 GIC 2219 : if (parse->commandType == CMD_UPDATE)
739 tgl 1803 ECB : {
739 tgl 1804 GIC 1653 : List *update_colnos = root->update_colnos;
1805 :
739 tgl 1806 CBC 1653 : if (this_result_rel != top_result_rel)
1807 : update_colnos =
1808 1653 : adjust_inherited_attnums_multilevel(root,
1809 : update_colnos,
1810 : this_result_rel->relid,
739 tgl 1811 ECB : top_result_rel->relid);
739 tgl 1812 CBC 1653 : updateColnosLists = lappend(updateColnosLists,
1813 : update_colnos);
739 tgl 1814 ECB : }
739 tgl 1815 GIC 2219 : if (parse->withCheckOptions)
1816 : {
1817 213 : List *withCheckOptions = parse->withCheckOptions;
1818 :
1819 213 : if (this_result_rel != top_result_rel)
1820 : withCheckOptions = (List *)
1821 213 : adjust_appendrel_attrs_multilevel(root,
739 tgl 1822 ECB : (Node *) withCheckOptions,
1823 : this_result_rel,
1824 : top_result_rel);
739 tgl 1825 GIC 213 : withCheckOptionLists = lappend(withCheckOptionLists,
739 tgl 1826 ECB : withCheckOptions);
1827 : }
739 tgl 1828 CBC 2219 : if (parse->returningList)
1829 : {
1830 252 : List *returningList = parse->returningList;
1831 :
1832 252 : if (this_result_rel != top_result_rel)
1833 : returningList = (List *)
1834 252 : adjust_appendrel_attrs_multilevel(root,
1835 : (Node *) returningList,
1836 : this_result_rel,
1837 : top_result_rel);
1838 252 : returningLists = lappend(returningLists,
1839 : returningList);
1840 : }
377 alvherre 1841 2219 : if (parse->mergeActionList)
1842 : {
377 alvherre 1843 ECB : ListCell *l;
377 alvherre 1844 GIC 112 : List *mergeActionList = NIL;
377 alvherre 1845 ECB :
1846 : /*
1847 : * Copy MergeActions and translate stuff that
1848 : * references attribute numbers.
1849 : */
377 alvherre 1850 GIC 339 : foreach(l, parse->mergeActionList)
377 alvherre 1851 ECB : {
377 alvherre 1852 GIC 227 : MergeAction *action = lfirst(l),
1853 227 : *leaf_action = copyObject(action);
377 alvherre 1854 ECB :
377 alvherre 1855 GIC 227 : leaf_action->qual =
377 alvherre 1856 CBC 227 : adjust_appendrel_attrs_multilevel(root,
1857 : (Node *) action->qual,
1858 : this_result_rel,
1859 : top_result_rel);
1860 227 : leaf_action->targetList = (List *)
377 alvherre 1861 GIC 227 : adjust_appendrel_attrs_multilevel(root,
1862 227 : (Node *) action->targetList,
1863 : this_result_rel,
1864 : top_result_rel);
1865 227 : if (leaf_action->commandType == CMD_UPDATE)
1866 108 : leaf_action->updateColnos =
377 alvherre 1867 CBC 108 : adjust_inherited_attnums_multilevel(root,
1868 : action->updateColnos,
1869 : this_result_rel->relid,
377 alvherre 1870 ECB : top_result_rel->relid);
377 alvherre 1871 GIC 227 : mergeActionList = lappend(mergeActionList,
1872 : leaf_action);
1873 : }
1874 :
1875 112 : mergeActionLists = lappend(mergeActionLists,
377 alvherre 1876 ECB : mergeActionList);
1877 : }
739 tgl 1878 : }
1879 :
739 tgl 1880 GIC 1151 : if (resultRelations == NIL)
739 tgl 1881 ECB : {
1882 : /*
1883 : * We managed to exclude every child rel, so generate a
1884 : * dummy one-relation plan using info for the top target
1885 : * rel (even though that may not be a leaf target).
1886 : * Although it's clear that no data will be updated or
1887 : * deleted, we still need to have a ModifyTable node so
1888 : * that any statement triggers will be executed. (This
1889 : * could be cleaner if we fixed nodeModifyTable.c to allow
1890 : * zero target relations, but that probably wouldn't be a
1891 : * net win.)
1892 : */
739 tgl 1893 CBC 15 : resultRelations = list_make1_int(parse->resultRelation);
739 tgl 1894 GIC 15 : if (parse->commandType == CMD_UPDATE)
1895 15 : updateColnosLists = list_make1(root->update_colnos);
1896 15 : if (parse->withCheckOptions)
739 tgl 1897 LBC 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
739 tgl 1898 GIC 15 : if (parse->returningList)
1899 9 : returningLists = list_make1(parse->returningList);
377 alvherre 1900 15 : if (parse->mergeActionList)
377 alvherre 1901 LBC 0 : mergeActionLists = list_make1(parse->mergeActionList);
1902 : }
1903 : }
1904 : else
1905 : {
167 alvherre 1906 ECB : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
739 tgl 1907 GIC 51116 : resultRelations = list_make1_int(parse->resultRelation);
1908 51116 : if (parse->commandType == CMD_UPDATE)
1909 6718 : updateColnosLists = list_make1(root->update_colnos);
1910 51116 : if (parse->withCheckOptions)
1911 406 : withCheckOptionLists = list_make1(parse->withCheckOptions);
1912 51116 : if (parse->returningList)
1913 1008 : returningLists = list_make1(parse->returningList);
377 alvherre 1914 51116 : if (parse->mergeActionList)
1915 418 : mergeActionLists = list_make1(parse->mergeActionList);
1916 : }
1917 :
1918 : /*
1645 tgl 1919 ECB : * If target is a partition root table, we need to mark the
1920 : * ModifyTable node appropriately for that.
1921 : */
1645 tgl 1922 CBC 52267 : if (rt_fetch(parse->resultRelation, parse->rtable)->relkind ==
1645 tgl 1923 EUB : RELKIND_PARTITIONED_TABLE)
1645 tgl 1924 CBC 3595 : rootRelation = parse->resultRelation;
1645 tgl 1925 ECB : else
1645 tgl 1926 CBC 48672 : rootRelation = 0;
1645 tgl 1927 EUB :
1928 : /*
1929 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
1930 : * will have dealt with fetching non-locked marked rows, else we
1931 : * need to have ModifyTable do that.
1932 : */
2589 tgl 1933 CBC 52267 : if (parse->rowMarks)
2589 tgl 1934 LBC 0 : rowMarks = NIL;
2589 tgl 1935 ECB : else
2589 tgl 1936 CBC 52267 : rowMarks = root->rowMarks;
2885 andres 1937 ECB :
2589 tgl 1938 : path = (Path *)
2589 tgl 1939 CBC 52267 : create_modifytable_path(root, final_rel,
739 tgl 1940 ECB : path,
2589 1941 : parse->commandType,
2589 tgl 1942 GIC 52267 : parse->canSetTag,
1943 52267 : parse->resultRelation,
1944 : rootRelation,
739 1945 52267 : root->partColsUpdated,
1946 : resultRelations,
1947 : updateColnosLists,
2589 tgl 1948 ECB : withCheckOptionLists,
1949 : returningLists,
1950 : rowMarks,
1951 : parse->onConflict,
377 alvherre 1952 : mergeActionLists,
1953 : assign_special_exec_param(root));
1954 : }
1955 :
1956 : /* And shove it into final_rel */
2589 tgl 1957 GIC 229227 : add_path(final_rel, path);
1958 : }
2885 andres 1959 ECB :
1853 rhaas 1960 EUB : /*
1961 : * Generate partial paths for final_rel, too, if outer query levels might
1853 rhaas 1962 ECB : * be able to make use of them.
1963 : */
1853 rhaas 1964 GIC 225463 : if (final_rel->consider_parallel && root->query_level > 1 &&
1853 rhaas 1965 CBC 7179 : !limit_needed(parse))
1966 : {
1853 rhaas 1967 GIC 7102 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
1853 rhaas 1968 CBC 7147 : foreach(lc, current_rel->partial_pathlist)
1853 rhaas 1969 ECB : {
1853 rhaas 1970 GIC 45 : Path *partial_path = (Path *) lfirst(lc);
1853 rhaas 1971 ECB :
1853 rhaas 1972 GIC 45 : add_partial_path(final_rel, partial_path);
1973 : }
1974 : }
1975 :
1468 efujita 1976 225463 : extra.limit_needed = limit_needed(parse);
1977 225463 : extra.limit_tuples = limit_tuples;
1978 225463 : extra.count_est = count_est;
1979 225463 : extra.offset_est = offset_est;
1980 :
1981 : /*
1982 : * If there is an FDW that's responsible for all baserels of the query,
2473 tgl 1983 ECB : * let it consider adding ForeignPaths.
1984 : */
2473 tgl 1985 GIC 225463 : if (final_rel->fdwroutine &&
1986 591 : final_rel->fdwroutine->GetForeignUpperPaths)
1987 561 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
1988 : current_rel, final_rel,
1989 : &extra);
2473 tgl 1990 ECB :
2553 1991 : /* Let extensions possibly add some more paths */
2553 tgl 1992 GIC 225463 : if (create_upper_paths_hook)
2553 tgl 1993 LBC 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
1468 efujita 1994 ECB : current_rel, final_rel, &extra);
1995 :
2589 tgl 1996 : /* Note: currently, we leave it to callers to do set_cheapest() */
2589 tgl 1997 GIC 225463 : }
9770 scrappy 1998 ECB :
1999 : /*
2000 : * Do preprocessing for groupingSets clause and related data. This handles the
2001 : * preliminary steps of expanding the grouping sets, organizing them into lists
2204 rhodiumtoad 2002 : * of rollups, and preparing annotations which will later be filled in with
2003 : * size estimates.
2004 : */
2005 : static grouping_sets_data *
2204 rhodiumtoad 2006 GIC 367 : preprocess_grouping_sets(PlannerInfo *root)
2007 : {
2008 367 : Query *parse = root->parse;
2009 : List *sets;
2010 367 : int maxref = 0;
2204 rhodiumtoad 2011 ECB : ListCell *lc_set;
2204 rhodiumtoad 2012 CBC 367 : grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
2013 :
752 tomas.vondra 2014 GIC 367 : parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2015 :
2204 rhodiumtoad 2016 367 : gd->any_hashable = false;
2204 rhodiumtoad 2017 CBC 367 : gd->unhashable_refs = NULL;
2204 rhodiumtoad 2018 GBC 367 : gd->unsortable_refs = NULL;
2204 rhodiumtoad 2019 GIC 367 : gd->unsortable_sets = NIL;
2020 :
2021 : /*
2022 : * We don't currently make any attempt to optimize the groupClause when
2023 : * there are grouping sets, so just duplicate it in processed_groupClause.
2024 : */
81 tgl 2025 GNC 367 : root->processed_groupClause = parse->groupClause;
2026 :
2204 rhodiumtoad 2027 GIC 367 : if (parse->groupClause)
2204 rhodiumtoad 2028 ECB : {
2029 : ListCell *lc;
2030 :
2204 rhodiumtoad 2031 GIC 1126 : foreach(lc, parse->groupClause)
2032 : {
2042 tgl 2033 780 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2204 rhodiumtoad 2034 780 : Index ref = gc->tleSortGroupRef;
2035 :
2036 780 : if (ref > maxref)
2204 rhodiumtoad 2037 CBC 762 : maxref = ref;
2038 :
2039 780 : if (!gc->hashable)
2204 rhodiumtoad 2040 GIC 15 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2204 rhodiumtoad 2041 ECB :
2204 rhodiumtoad 2042 GIC 780 : if (!OidIsValid(gc->sortop))
2204 rhodiumtoad 2043 CBC 21 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2044 : }
2204 rhodiumtoad 2045 ECB : }
2046 :
2047 : /* Allocate workspace array for remapping */
2204 rhodiumtoad 2048 CBC 367 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2204 rhodiumtoad 2049 ECB :
2050 : /*
2051 : * If we have any unsortable sets, we must extract them before trying to
2052 : * prepare rollups. Unsortable sets don't go through
2053 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2054 : * here.
2055 : */
2204 rhodiumtoad 2056 CBC 367 : if (!bms_is_empty(gd->unsortable_refs))
2057 : {
2058 21 : List *sortable_sets = NIL;
2059 : ListCell *lc;
2060 :
2204 rhodiumtoad 2061 GIC 63 : foreach(lc, parse->groupingSets)
2062 : {
2042 tgl 2063 CBC 45 : List *gset = (List *) lfirst(lc);
2064 :
2204 rhodiumtoad 2065 45 : if (bms_overlap_list(gd->unsortable_refs, gset))
2204 rhodiumtoad 2066 ECB : {
2204 rhodiumtoad 2067 GIC 24 : GroupingSetData *gs = makeNode(GroupingSetData);
2204 rhodiumtoad 2068 ECB :
2204 rhodiumtoad 2069 CBC 24 : gs->set = gset;
2204 rhodiumtoad 2070 GIC 24 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2204 rhodiumtoad 2071 ECB :
2072 : /*
2073 : * We must enforce here that an unsortable set is hashable;
2074 : * later code assumes this. Parse analysis only checks that
2075 : * every individual column is either hashable or sortable.
2076 : *
2077 : * Note that passing this test doesn't guarantee we can
2078 : * generate a plan; there might be other showstoppers.
2079 : */
2204 rhodiumtoad 2080 CBC 24 : if (bms_overlap_list(gd->unhashable_refs, gset))
2204 rhodiumtoad 2081 GIC 3 : ereport(ERROR,
2082 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2083 : errmsg("could not implement GROUP BY"),
2084 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2085 : }
2086 : else
2087 21 : sortable_sets = lappend(sortable_sets, gset);
2204 rhodiumtoad 2088 ECB : }
2089 :
2204 rhodiumtoad 2090 CBC 18 : if (sortable_sets)
2204 rhodiumtoad 2091 GIC 15 : sets = extract_rollup_sets(sortable_sets);
2092 : else
2204 rhodiumtoad 2093 CBC 3 : sets = NIL;
2094 : }
2204 rhodiumtoad 2095 ECB : else
2204 rhodiumtoad 2096 GIC 346 : sets = extract_rollup_sets(parse->groupingSets);
2204 rhodiumtoad 2097 ECB :
2204 rhodiumtoad 2098 GIC 969 : foreach(lc_set, sets)
2204 rhodiumtoad 2099 ECB : {
2204 rhodiumtoad 2100 GIC 605 : List *current_sets = (List *) lfirst(lc_set);
2204 rhodiumtoad 2101 CBC 605 : RollupData *rollup = makeNode(RollupData);
2204 rhodiumtoad 2102 ECB : GroupingSetData *gs;
2103 :
2104 : /*
2105 : * Reorder the current list of grouping sets into correct prefix
2106 : * order. If only one aggregation pass is needed, try to make the
2107 : * list match the ORDER BY clause; if more than one pass is needed, we
2108 : * don't bother with that.
2109 : *
2110 : * Note that this reorders the sets from smallest-member-first to
2111 : * largest-member-first, and applies the GroupingSetData annotations,
2112 : * though the data will be filled in later.
2113 : */
2204 rhodiumtoad 2114 GIC 605 : current_sets = reorder_grouping_sets(current_sets,
2115 605 : (list_length(sets) == 1
2116 : ? parse->sortClause
2117 : : NIL));
2118 :
2204 rhodiumtoad 2119 ECB : /*
2120 : * Get the initial (and therefore largest) grouping set.
2121 : */
2042 tgl 2122 CBC 605 : gs = linitial_node(GroupingSetData, current_sets);
2204 rhodiumtoad 2123 ECB :
2124 : /*
2125 : * Order the groupClause appropriately. If the first grouping set is
2126 : * empty, then the groupClause must also be empty; otherwise we have
2127 : * to force the groupClause to match that grouping set's order.
2128 : *
2129 : * (The first grouping set can be empty even though parse->groupClause
2130 : * is not empty only if all non-empty grouping sets are unsortable.
2131 : * The groupClauses for hashed grouping sets are built later on.)
2132 : */
2204 rhodiumtoad 2133 CBC 605 : if (gs->set)
2204 rhodiumtoad 2134 GIC 584 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2135 : else
2136 21 : rollup->groupClause = NIL;
2137 :
2138 : /*
2139 : * Is it hashable? We pretend empty sets are hashable even though we
2140 : * actually force them not to be hashed later. But don't bother if
2141 : * there's nothing but empty sets (since in that case we can't hash
2142 : * anything).
2143 : */
2144 605 : if (gs->set &&
2145 584 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2204 rhodiumtoad 2146 ECB : {
2204 rhodiumtoad 2147 CBC 572 : rollup->hashable = true;
2204 rhodiumtoad 2148 GIC 572 : gd->any_hashable = true;
2149 : }
2150 :
2151 : /*
2152 : * Now that we've pinned down an order for the groupClause for this
2153 : * list of grouping sets, we need to remap the entries in the grouping
2204 rhodiumtoad 2154 ECB : * sets from sortgrouprefs to plain indices (0-based) into the
2155 : * groupClause for this collection of grouping sets. We keep the
2156 : * original form for later use, though.
2157 : */
2204 rhodiumtoad 2158 GIC 605 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2159 : current_sets,
2160 : gd->tleref_to_colnum_map);
2161 605 : rollup->gsets_data = current_sets;
2162 :
2163 605 : gd->rollups = lappend(gd->rollups, rollup);
2164 : }
2204 rhodiumtoad 2165 ECB :
2204 rhodiumtoad 2166 CBC 364 : if (gd->unsortable_sets)
2167 : {
2204 rhodiumtoad 2168 ECB : /*
2169 : * We have not yet pinned down a groupclause for this, but we will
2170 : * need index-based lists for estimation purposes. Construct
2171 : * hash_sets_idx based on the entire original groupclause for now.
2172 : */
2204 rhodiumtoad 2173 GIC 18 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2174 : gd->unsortable_sets,
2175 : gd->tleref_to_colnum_map);
2204 rhodiumtoad 2176 CBC 18 : gd->any_hashable = true;
2204 rhodiumtoad 2177 ECB : }
2178 :
2204 rhodiumtoad 2179 CBC 364 : return gd;
2204 rhodiumtoad 2180 ECB : }
2181 :
2182 : /*
2183 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2184 : * (without annotation) mapped to indexes into the given groupclause.
2185 : */
2186 : static List *
2204 rhodiumtoad 2187 GIC 1794 : remap_to_groupclause_idx(List *groupClause,
2188 : List *gsets,
2189 : int *tleref_to_colnum_map)
2204 rhodiumtoad 2190 ECB : {
2204 rhodiumtoad 2191 GIC 1794 : int ref = 0;
2192 1794 : List *result = NIL;
2204 rhodiumtoad 2193 ECB : ListCell *lc;
2194 :
2204 rhodiumtoad 2195 CBC 4460 : foreach(lc, groupClause)
2196 : {
2042 tgl 2197 GIC 2666 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2204 rhodiumtoad 2198 ECB :
2204 rhodiumtoad 2199 GIC 2666 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2200 : }
2201 :
2202 4191 : foreach(lc, gsets)
2203 : {
2204 2397 : List *set = NIL;
2204 rhodiumtoad 2205 ECB : ListCell *lc2;
2042 tgl 2206 GIC 2397 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2207 :
2204 rhodiumtoad 2208 CBC 5452 : foreach(lc2, gs->set)
2209 : {
2204 rhodiumtoad 2210 GIC 3055 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2204 rhodiumtoad 2211 ECB : }
2212 :
2204 rhodiumtoad 2213 GIC 2397 : result = lappend(result, set);
2214 : }
2215 :
2216 1794 : return result;
2217 : }
2218 :
2204 rhodiumtoad 2219 ECB :
2220 : /*
2221 : * preprocess_rowmarks - set up PlanRowMarks if needed
2222 : */
4913 tgl 2223 : static void
4913 tgl 2224 CBC 227027 : preprocess_rowmarks(PlannerInfo *root)
2225 : {
4913 tgl 2226 GIC 227027 : Query *parse = root->parse;
4913 tgl 2227 ECB : Bitmapset *rels;
2228 : List *prowmarks;
2229 : ListCell *l;
2230 : int i;
2231 :
4913 tgl 2232 GIC 227027 : if (parse->rowMarks)
2233 : {
4913 tgl 2234 ECB : /*
2235 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
3602 bruce 2236 : * grouping, since grouping renders a reference to individual tuple
2237 : * CTIDs invalid. This is also checked at parse time, but that's
4913 tgl 2238 : * insufficient because of rule substitution, query pullup, etc.
2239 : */
2042 tgl 2240 CBC 3371 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2241 : parse->rowMarks)->strength);
4913 tgl 2242 ECB : }
2243 : else
2244 : {
2245 : /*
2246 : * We only need rowmarks for UPDATE, DELETE, or FOR [KEY]
2247 : * UPDATE/SHARE.
2248 : */
4913 tgl 2249 GIC 223656 : if (parse->commandType != CMD_UPDATE &&
2250 216074 : parse->commandType != CMD_DELETE)
2251 214107 : return;
2252 : }
2253 :
2254 : /*
2255 : * We need to have rowmarks for all base relations except the target. We
4790 bruce 2256 ECB : * make a bitmapset of all base rels and then remove the items we don't
2257 : * need or have FOR [KEY] UPDATE/SHARE marks for.
4913 tgl 2258 : */
69 tgl 2259 GNC 12920 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
4913 tgl 2260 GIC 12920 : if (parse->resultRelation)
2261 9549 : rels = bms_del_member(rels, parse->resultRelation);
2262 :
2263 : /*
4913 tgl 2264 ECB : * Convert RowMarkClauses to PlanRowMark representation.
2265 : */
4913 tgl 2266 GIC 12920 : prowmarks = NIL;
2267 16419 : foreach(l, parse->rowMarks)
2268 : {
2042 2269 3499 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
4911 2270 3499 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2271 : PlanRowMark *newrc;
4913 tgl 2272 ECB :
2273 : /*
2274 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2275 : * applied to an update/delete target rel. If that ever becomes
2276 : * possible, we should drop the target from the PlanRowMark list.
2277 : */
4913 tgl 2278 GIC 3499 : Assert(rc->rti != parse->resultRelation);
2279 :
2280 : /*
4790 bruce 2281 ECB : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2282 : * can't support true locking. Subqueries that got flattened into the
2283 : * main query should be ignored completely. Any that didn't will get
2284 : * ROW_MARK_COPY items in the next loop.
2285 : */
4911 tgl 2286 GIC 3499 : if (rte->rtekind != RTE_RELATION)
2287 54 : continue;
2288 :
4913 2289 3445 : rels = bms_del_member(rels, rc->rti);
2290 :
4911 tgl 2291 CBC 3445 : newrc = makeNode(PlanRowMark);
4913 2292 3445 : newrc->rti = newrc->prti = rc->rti;
4442 2293 3445 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2940 tgl 2294 GIC 3445 : newrc->markType = select_rowmark_type(rte, rc->strength);
2947 2295 3445 : newrc->allMarkTypes = (1 << newrc->markType);
2296 3445 : newrc->strength = rc->strength;
3106 alvherre 2297 3445 : newrc->waitPolicy = rc->waitPolicy;
4913 tgl 2298 CBC 3445 : newrc->isParent = false;
4913 tgl 2299 ECB :
4913 tgl 2300 GIC 3445 : prowmarks = lappend(prowmarks, newrc);
4913 tgl 2301 ECB : }
2302 :
2303 : /*
2304 : * Now, add rowmarks for any non-target, non-locked base relations.
2305 : */
4913 tgl 2306 GIC 12920 : i = 0;
2307 28236 : foreach(l, parse->rtable)
2308 : {
2042 2309 15316 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
4913 tgl 2310 ECB : PlanRowMark *newrc;
2311 :
4913 tgl 2312 GIC 15316 : i++;
2313 15316 : if (!bms_is_member(i, rels))
2314 14581 : continue;
2315 :
2316 735 : newrc = makeNode(PlanRowMark);
2317 735 : newrc->rti = newrc->prti = i;
4442 tgl 2318 CBC 735 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2940 2319 735 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2947 tgl 2320 GIC 735 : newrc->allMarkTypes = (1 << newrc->markType);
2947 tgl 2321 CBC 735 : newrc->strength = LCS_NONE;
2118 tgl 2322 GIC 735 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
4913 tgl 2323 CBC 735 : newrc->isParent = false;
4913 tgl 2324 ECB :
4913 tgl 2325 CBC 735 : prowmarks = lappend(prowmarks, newrc);
4913 tgl 2326 ECB : }
2327 :
4913 tgl 2328 CBC 12920 : root->rowMarks = prowmarks;
4913 tgl 2329 ECB : }
2330 :
2331 : /*
2940 2332 : * Select RowMarkType to use for a given table
2333 : */
2334 : RowMarkType
2940 tgl 2335 GIC 5098 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2336 : {
2337 5098 : if (rte->rtekind != RTE_RELATION)
2940 tgl 2338 ECB : {
2339 : /* If it's not a table at all, use ROW_MARK_COPY */
2940 tgl 2340 GIC 122 : return ROW_MARK_COPY;
2940 tgl 2341 ECB : }
2940 tgl 2342 GIC 4976 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2343 : {
2889 tgl 2344 ECB : /* Let the FDW select the rowmark type, if it wants to */
2889 tgl 2345 CBC 96 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2889 tgl 2346 ECB :
2889 tgl 2347 GIC 96 : if (fdwroutine->GetForeignRowMarkType != NULL)
2889 tgl 2348 LBC 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2889 tgl 2349 ECB : /* Otherwise, use ROW_MARK_COPY by default */
2940 tgl 2350 CBC 96 : return ROW_MARK_COPY;
2940 tgl 2351 ECB : }
2352 : else
2353 : {
2354 : /* Regular table, apply the appropriate lock type */
2940 tgl 2355 CBC 4880 : switch (strength)
2356 : {
2357 699 : case LCS_NONE:
2358 :
2359 : /*
2909 sfrost 2360 ECB : * We don't need a tuple lock, only the ability to re-fetch
2361 : * the row.
2362 : */
2940 tgl 2363 GIC 699 : return ROW_MARK_REFERENCE;
2364 : break;
2365 3279 : case LCS_FORKEYSHARE:
2366 3279 : return ROW_MARK_KEYSHARE;
2940 tgl 2367 ECB : break;
2940 tgl 2368 GIC 150 : case LCS_FORSHARE:
2940 tgl 2369 CBC 150 : return ROW_MARK_SHARE;
2370 : break;
2940 tgl 2371 GIC 31 : case LCS_FORNOKEYUPDATE:
2940 tgl 2372 CBC 31 : return ROW_MARK_NOKEYEXCLUSIVE;
2373 : break;
2374 721 : case LCS_FORUPDATE:
2940 tgl 2375 GIC 721 : return ROW_MARK_EXCLUSIVE;
2376 : break;
2940 tgl 2377 ECB : }
2940 tgl 2378 UIC 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2118 tgl 2379 ECB : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2940 tgl 2380 EUB : }
2381 : }
2940 tgl 2382 ECB :
2383 : /*
2384 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2385 : *
2386 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
3260 bruce 2387 : * results back in *count_est and *offset_est. These variables are set to
2388 : * 0 if the corresponding clause is not present, and -1 if it's present
6443 tgl 2389 : * but we couldn't estimate the value for it. (The "0" convention is OK
2390 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2391 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2392 : * usual practice of never estimating less than one row.) These values will
2393 : * be passed to create_limit_path, which see if you change this code.
2394 : *
2395 : * The return value is the suitably adjusted tuple_fraction to use for
2396 : * planning the query. This adjustment is not overridable, since it reflects
2397 : * plan actions that grouping_planner() will certainly take, not assumptions
2398 : * about context.
2399 : */
6512 2400 : static double
6443 tgl 2401 CBC 2440 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2402 : int64 *offset_est, int64 *count_est)
6512 tgl 2403 ECB : {
6512 tgl 2404 CBC 2440 : Query *parse = root->parse;
2405 : Node *est;
6443 tgl 2406 ECB : double limit_fraction;
6512 2407 :
2408 : /* Should not be called unless LIMIT or OFFSET */
6443 tgl 2409 GIC 2440 : Assert(parse->limitCount || parse->limitOffset);
6512 tgl 2410 EUB :
2411 : /*
2412 : * Try to obtain the clause values. We use estimate_expression_value
2413 : * primarily because it can sometimes do something useful with Params.
2414 : */
6443 tgl 2415 GIC 2440 : if (parse->limitCount)
2416 : {
5893 2417 2253 : est = estimate_expression_value(root, parse->limitCount);
6443 2418 2253 : if (est && IsA(est, Const))
2419 : {
2420 2250 : if (((Const *) est)->constisnull)
2421 : {
2422 : /* NULL indicates LIMIT ALL, ie, no limit */
6385 bruce 2423 UIC 0 : *count_est = 0; /* treat as not present */
2424 : }
2425 : else
2426 : {
6101 bruce 2427 GIC 2250 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
6443 tgl 2428 2250 : if (*count_est <= 0)
2118 2429 75 : *count_est = 1; /* force to at least 1 */
2430 : }
2431 : }
2432 : else
6443 tgl 2433 CBC 3 : *count_est = -1; /* can't estimate */
2434 : }
2435 : else
2436 187 : *count_est = 0; /* not present */
2437 :
6443 tgl 2438 GIC 2440 : if (parse->limitOffset)
2439 : {
5893 2440 363 : est = estimate_expression_value(root, parse->limitOffset);
6443 tgl 2441 CBC 363 : if (est && IsA(est, Const))
2442 : {
6443 tgl 2443 GIC 351 : if (((Const *) est)->constisnull)
2444 : {
2445 : /* Treat NULL as no offset; the executor will too */
6385 bruce 2446 UIC 0 : *offset_est = 0; /* treat as not present */
6443 tgl 2447 ECB : }
2448 : else
2449 : {
6101 bruce 2450 CBC 351 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
6443 tgl 2451 GIC 351 : if (*offset_est < 0)
3183 tgl 2452 LBC 0 : *offset_est = 0; /* treat as not present */
2453 : }
2454 : }
6443 tgl 2455 EUB : else
6443 tgl 2456 GIC 12 : *offset_est = -1; /* can't estimate */
2457 : }
2458 : else
6443 tgl 2459 CBC 2077 : *offset_est = 0; /* not present */
6512 tgl 2460 ECB :
6443 tgl 2461 CBC 2440 : if (*count_est != 0)
2462 : {
2463 : /*
2464 : * A LIMIT clause limits the absolute number of tuples returned.
6443 tgl 2465 ECB : * However, if it's not a constant LIMIT then we have to guess; for
2466 : * lack of a better idea, assume 10% of the plan's result is wanted.
2467 : */
6443 tgl 2468 CBC 2253 : if (*count_est < 0 || *offset_est < 0)
2469 : {
6443 tgl 2470 ECB : /* LIMIT or OFFSET is an expression ... punt ... */
6443 tgl 2471 GIC 12 : limit_fraction = 0.10;
6443 tgl 2472 ECB : }
2473 : else
2474 : {
2475 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
6443 tgl 2476 GIC 2241 : limit_fraction = (double) *count_est + (double) *offset_est;
2477 : }
6443 tgl 2478 EUB :
2479 : /*
2480 : * If we have absolute limits from both caller and LIMIT, use the
2481 : * smaller value; likewise if they are both fractional. If one is
6443 tgl 2482 ECB : * fractional and the other absolute, we can't easily determine which
2483 : * is smaller, but we use the heuristic that the absolute will usually
6443 tgl 2484 EUB : * be smaller.
2485 : */
6512 tgl 2486 GIC 2253 : if (tuple_fraction >= 1.0)
2487 : {
6512 tgl 2488 CBC 3 : if (limit_fraction >= 1.0)
2489 : {
2490 : /* both absolute */
2491 3 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2492 : }
6512 tgl 2493 ECB : else
2494 : {
2495 : /* caller absolute, limit fractional; use caller's value */
2496 : }
2497 : }
6512 tgl 2498 GIC 2250 : else if (tuple_fraction > 0.0)
2499 : {
6512 tgl 2500 CBC 72 : if (limit_fraction >= 1.0)
2501 : {
2502 : /* caller fractional, limit absolute; use limit */
6443 2503 72 : tuple_fraction = limit_fraction;
2504 : }
2505 : else
2506 : {
2507 : /* both fractional */
6443 tgl 2508 LBC 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2509 : }
2510 : }
2511 : else
2512 : {
2513 : /* no info from caller, just use limit */
6512 tgl 2514 GIC 2178 : tuple_fraction = limit_fraction;
2515 : }
2516 : }
6443 2517 187 : else if (*offset_est != 0 && tuple_fraction > 0.0)
6443 tgl 2518 ECB : {
2519 : /*
3260 bruce 2520 : * We have an OFFSET but no LIMIT. This acts entirely differently
2521 : * from the LIMIT case: here, we need to increase rather than decrease
2522 : * the caller's tuple_fraction, because the OFFSET acts to cause more
6385 2523 : * tuples to be fetched instead of fewer. This only matters if we got
2524 : * a tuple_fraction > 0, however.
2525 : *
2526 : * As above, use 10% if OFFSET is present but unestimatable.
2527 : */
6443 tgl 2528 GIC 6 : if (*offset_est < 0)
6443 tgl 2529 UIC 0 : limit_fraction = 0.10;
6443 tgl 2530 ECB : else
6443 tgl 2531 GIC 6 : limit_fraction = (double) *offset_est;
6443 tgl 2532 ECB :
2533 : /*
2534 : * If we have absolute counts from both caller and OFFSET, add them
3260 bruce 2535 : * together; likewise if they are both fractional. If one is
2536 : * fractional and the other absolute, we want to take the larger, and
2537 : * we heuristically assume that's the fractional one.
2538 : */
6443 tgl 2539 GIC 6 : if (tuple_fraction >= 1.0)
6443 tgl 2540 EUB : {
6443 tgl 2541 UIC 0 : if (limit_fraction >= 1.0)
2542 : {
2543 : /* both absolute, so add them together */
2544 0 : tuple_fraction += limit_fraction;
2545 : }
6443 tgl 2546 ECB : else
2547 : {
2548 : /* caller absolute, limit fractional; use limit */
6443 tgl 2549 LBC 0 : tuple_fraction = limit_fraction;
2550 : }
2551 : }
2552 : else
2553 : {
6443 tgl 2554 GIC 6 : if (limit_fraction >= 1.0)
2555 : {
2556 : /* caller fractional, limit absolute; use caller's value */
2557 : }
2558 : else
2559 : {
6443 tgl 2560 ECB : /* both fractional, so add them together */
6443 tgl 2561 UBC 0 : tuple_fraction += limit_fraction;
6443 tgl 2562 UIC 0 : if (tuple_fraction >= 1.0)
2118 tgl 2563 LBC 0 : tuple_fraction = 0.0; /* assume fetch all */
2564 : }
2565 : }
2566 : }
2567 :
6512 tgl 2568 GIC 2440 : return tuple_fraction;
2569 : }
2570 :
3678 tgl 2571 ECB : /*
2572 : * limit_needed - do we actually need a Limit plan node?
3678 tgl 2573 EUB : *
2574 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2575 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
3260 bruce 2576 : * locution for an optimization fence. (Because other places in the planner
2577 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2578 : * an optimization fence --- we're just suppressing unnecessary run-time
2579 : * overhead.)
2580 : *
3678 tgl 2581 : * This might look like it could be merged into preprocess_limit, but there's
2582 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2583 : * in preprocess_limit it's good enough to consider estimated values.
2584 : */
2585 : bool
3678 tgl 2586 CBC 464587 : limit_needed(Query *parse)
2587 : {
2588 : Node *node;
2589 :
3678 tgl 2590 GIC 464587 : node = parse->limitCount;
2591 464587 : if (node)
2592 : {
3678 tgl 2593 GBC 5207 : if (IsA(node, Const))
3678 tgl 2594 EUB : {
2595 : /* NULL indicates LIMIT ALL, ie, no limit */
3678 tgl 2596 GIC 5111 : if (!((Const *) node)->constisnull)
2597 5111 : return true; /* LIMIT with a constant value */
2598 : }
2599 : else
3678 tgl 2600 CBC 96 : return true; /* non-constant LIMIT */
2601 : }
2602 :
3678 tgl 2603 GIC 459380 : node = parse->limitOffset;
2604 459380 : if (node)
2605 : {
2606 547 : if (IsA(node, Const))
2607 : {
2608 : /* Treat NULL as no offset; the executor would too */
2609 429 : if (!((Const *) node)->constisnull)
2610 : {
3602 bruce 2611 429 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2612 :
3183 tgl 2613 429 : if (offset != 0)
2614 42 : return true; /* OFFSET with a nonzero value */
2615 : }
2616 : }
2617 : else
3678 tgl 2618 CBC 118 : return true; /* non-constant OFFSET */
2619 : }
2620 :
3678 tgl 2621 GIC 459220 : return false; /* don't need a Limit plan node */
3678 tgl 2622 ECB : }
2623 :
2624 :
2614 2625 : /*
2626 : * remove_useless_groupby_columns
2627 : * Remove any columns in the GROUP BY clause that are redundant due to
2628 : * being functionally dependent on other GROUP BY columns.
2629 : *
2630 : * Since some other DBMSes do not allow references to ungrouped columns, it's
2631 : * not unusual to find all columns listed in GROUP BY even though listing the
2632 : * primary-key columns would be sufficient. Deleting such excess columns
2633 : * avoids redundant sorting work, so it's worth doing.
2634 : *
1087 drowley 2635 : * Relcache invalidations will ensure that cached plans become invalidated
2636 : * when the underlying index of the pkey constraint is dropped.
2637 : *
2638 : * Currently, we only make use of pkey constraints for this, however, we may
2639 : * wish to take this further in the future and also use unique constraints
2640 : * which have NOT NULL columns. In that case, plan invalidation will still
2641 : * work since relations will receive a relcache invalidation when a NOT NULL
2642 : * constraint is dropped.
2614 tgl 2643 : */
2644 : static void
2614 tgl 2645 CBC 1598 : remove_useless_groupby_columns(PlannerInfo *root)
2614 tgl 2646 ECB : {
2614 tgl 2647 GIC 1598 : Query *parse = root->parse;
2648 : Bitmapset **groupbyattnos;
2649 : Bitmapset **surplusvars;
2614 tgl 2650 ECB : ListCell *lc;
2651 : int relid;
2652 :
2653 : /* No chance to do anything if there are less than two GROUP BY items */
81 tgl 2654 GNC 1598 : if (list_length(root->processed_groupClause) < 2)
2614 tgl 2655 GIC 1006 : return;
2656 :
2657 : /* Don't fiddle with the GROUP BY clause if the query has grouping sets */
2658 592 : if (parse->groupingSets)
2614 tgl 2659 UIC 0 : return;
2660 :
2661 : /*
2662 : * Scan the GROUP BY clause to find GROUP BY items that are simple Vars.
2663 : * Fill groupbyattnos[k] with a bitmapset of the column attnos of RTE k
2664 : * that are GROUP BY items.
2665 : */
2614 tgl 2666 GIC 592 : groupbyattnos = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
2667 592 : (list_length(parse->rtable) + 1));
81 tgl 2668 GNC 2101 : foreach(lc, root->processed_groupClause)
2669 : {
2042 tgl 2670 GIC 1509 : SortGroupClause *sgc = lfirst_node(SortGroupClause, lc);
2614 2671 1509 : TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
2672 1509 : Var *var = (Var *) tle->expr;
2673 :
2674 : /*
2675 : * Ignore non-Vars and Vars from other query levels.
2676 : *
2614 tgl 2677 ECB : * XXX in principle, stable expressions containing Vars could also be
2678 : * removed, if all the Vars are functionally dependent on other GROUP
2679 : * BY items. But it's not clear that such cases occur often enough to
2680 : * be worth troubling over.
2681 : */
2614 tgl 2682 GIC 1509 : if (!IsA(var, Var) ||
2683 1082 : var->varlevelsup > 0)
2684 427 : continue;
2685 :
2614 tgl 2686 ECB : /* OK, remember we have this Var */
2614 tgl 2687 CBC 1082 : relid = var->varno;
2614 tgl 2688 GIC 1082 : Assert(relid <= list_length(parse->rtable));
2689 1082 : groupbyattnos[relid] = bms_add_member(groupbyattnos[relid],
2118 tgl 2690 CBC 1082 : var->varattno - FirstLowInvalidHeapAttributeNumber);
2614 tgl 2691 EUB : }
2692 :
2693 : /*
2694 : * Consider each relation and see if it is possible to remove some of its
2695 : * Vars from GROUP BY. For simplicity and speed, we do the actual removal
2696 : * in a separate pass. Here, we just fill surplusvars[k] with a bitmapset
2697 : * of the column attnos of RTE k that are removable GROUP BY items.
2614 tgl 2698 ECB : */
2614 tgl 2699 CBC 592 : surplusvars = NULL; /* don't allocate array unless required */
2700 592 : relid = 0;
2614 tgl 2701 GIC 1588 : foreach(lc, parse->rtable)
2614 tgl 2702 ECB : {
2042 tgl 2703 CBC 996 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
2614 tgl 2704 ECB : Bitmapset *relattnos;
2705 : Bitmapset *pkattnos;
2706 : Oid constraintOid;
2707 :
2614 tgl 2708 GIC 996 : relid++;
2709 :
2710 : /* Only plain relations could have primary-key constraints */
2711 996 : if (rte->rtekind != RTE_RELATION)
2712 917 : continue;
2713 :
1376 drowley 2714 ECB : /*
2715 : * We must skip inheritance parent tables as some of the child rels
2716 : * may cause duplicate rows. This cannot happen with partitioned
2717 : * tables, however.
2718 : */
1376 drowley 2719 CBC 749 : if (rte->inh && rte->relkind != RELKIND_PARTITIONED_TABLE)
2720 9 : continue;
1376 drowley 2721 ECB :
2614 tgl 2722 : /* Nothing to do unless this rel has multiple Vars in GROUP BY */
2614 tgl 2723 GIC 740 : relattnos = groupbyattnos[relid];
2724 740 : if (bms_membership(relattnos) != BMS_MULTIPLE)
2725 396 : continue;
2726 :
2727 : /*
2728 : * Can't remove any columns for this rel if there is no suitable
2729 : * (i.e., nondeferrable) primary key constraint.
2730 : */
2614 tgl 2731 CBC 344 : pkattnos = get_primary_key_attnos(rte->relid, false, &constraintOid);
2732 344 : if (pkattnos == NULL)
2733 265 : continue;
2734 :
2614 tgl 2735 ECB : /*
2736 : * If the primary key is a proper subset of relattnos then we have
2737 : * some items in the GROUP BY that can be removed.
2738 : */
2614 tgl 2739 GIC 79 : if (bms_subset_compare(pkattnos, relattnos) == BMS_SUBSET1)
2614 tgl 2740 ECB : {
2741 : /*
2742 : * To easily remember whether we've found anything to do, we don't
2743 : * allocate the surplusvars[] array until we find something.
2744 : */
2614 tgl 2745 GIC 70 : if (surplusvars == NULL)
2746 67 : surplusvars = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
2118 2747 67 : (list_length(parse->rtable) + 1));
2748 :
2749 : /* Remember the attnos of the removable columns */
2614 2750 70 : surplusvars[relid] = bms_difference(relattnos, pkattnos);
2614 tgl 2751 ECB : }
2752 : }
2753 :
2754 : /*
2755 : * If we found any surplus Vars, build a new GROUP BY clause without them.
2756 : * (Note: this may leave some TLEs with unreferenced ressortgroupref
2757 : * markings, but that's harmless.)
2758 : */
2614 tgl 2759 GIC 592 : if (surplusvars != NULL)
2760 : {
2761 67 : List *new_groupby = NIL;
2762 :
81 tgl 2763 GNC 291 : foreach(lc, root->processed_groupClause)
2614 tgl 2764 ECB : {
2042 tgl 2765 CBC 224 : SortGroupClause *sgc = lfirst_node(SortGroupClause, lc);
2614 tgl 2766 GIC 224 : TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
2767 224 : Var *var = (Var *) tle->expr;
2768 :
2769 : /*
2770 : * New list must include non-Vars, outer Vars, and anything not
2614 tgl 2771 ECB : * marked as surplus.
2772 : */
2614 tgl 2773 GIC 224 : if (!IsA(var, Var) ||
2774 224 : var->varlevelsup > 0 ||
2118 2775 224 : !bms_is_member(var->varattno - FirstLowInvalidHeapAttributeNumber,
2776 224 : surplusvars[var->varno]))
2614 tgl 2777 CBC 139 : new_groupby = lappend(new_groupby, sgc);
2614 tgl 2778 ECB : }
2779 :
81 tgl 2780 GNC 67 : root->processed_groupClause = new_groupby;
2781 : }
2614 tgl 2782 ECB : }
2783 :
2784 : /*
2785 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2786 : *
2787 : * The idea here is to adjust the ordering of the GROUP BY elements
2788 : * (which in itself is semantically insignificant) to match ORDER BY,
2789 : * thereby allowing a single sort operation to both implement the ORDER BY
2790 : * requirement and set up for a Unique step that implements GROUP BY.
5363 2791 : *
2792 : * In principle it might be interesting to consider other orderings of the
2793 : * GROUP BY elements, which could match the sort ordering of other
2794 : * possible plans (eg an indexscan) and thereby reduce cost. We don't
188 2795 : * bother with that, though. Hashed grouping will frequently win anyway.
2796 : *
5362 2797 : * Note: we need no comparable processing of the distinctClause because
2798 : * the parser already enforced that that matches ORDER BY.
2885 andres 2799 : *
2800 : * Note: we return a fresh List, but its elements are the same
2801 : * SortGroupClauses appearing in parse->groupClause. This is important
2802 : * because later processing may modify the processed_groupClause list.
2803 : *
2804 : * For grouping sets, the order of items is instead forced to agree with that
2805 : * of the grouping set (and items not in the grouping set are skipped). The
2806 : * work of sorting the order of grouping set elements to match the ORDER BY if
2807 : * possible is done elsewhere.
2808 : */
2809 : static List *
2885 andres 2810 CBC 3353 : preprocess_groupclause(PlannerInfo *root, List *force)
5363 tgl 2811 ECB : {
5363 tgl 2812 CBC 3353 : Query *parse = root->parse;
2885 andres 2813 3353 : List *new_groupclause = NIL;
2814 : bool partial_match;
2815 : ListCell *sl;
5363 tgl 2816 ECB : ListCell *gl;
2817 :
2818 : /* For grouping sets, we need to force the ordering */
2885 andres 2819 GIC 3353 : if (force)
2820 : {
2821 4382 : foreach(sl, force)
2822 : {
2878 bruce 2823 2627 : Index ref = lfirst_int(sl);
2885 andres 2824 2627 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2825 :
2826 2627 : new_groupclause = lappend(new_groupclause, cl);
2827 : }
2828 :
2829 1755 : return new_groupclause;
2830 : }
2831 :
2832 : /* If no ORDER BY, nothing useful to do here */
5363 tgl 2833 1598 : if (parse->sortClause == NIL)
81 tgl 2834 GNC 912 : return list_copy(parse->groupClause);
2835 :
2836 : /*
2837 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2838 : * items, but only as far as we can make a matching prefix.
2839 : *
2840 : * This code assumes that the sortClause contains no duplicate items.
2841 : */
5363 tgl 2842 GIC 1367 : foreach(sl, parse->sortClause)
2843 : {
2042 2844 953 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2845 :
5363 tgl 2846 CBC 1423 : foreach(gl, parse->groupClause)
2847 : {
2042 2848 1151 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
5363 tgl 2849 ECB :
5363 tgl 2850 GIC 1151 : if (equal(gc, sc))
2851 : {
2852 681 : new_groupclause = lappend(new_groupclause, gc);
2853 681 : break;
2854 : }
5363 tgl 2855 ECB : }
5363 tgl 2856 GIC 953 : if (gl == NULL)
5363 tgl 2857 CBC 272 : break; /* no match, so stop scanning */
2858 : }
5363 tgl 2859 ECB :
2860 : /* Did we match all of the ORDER BY list, or just some of it? */
5363 tgl 2861 GIC 686 : partial_match = (sl != NULL);
5363 tgl 2862 ECB :
2863 : /* If no match at all, no point in reordering GROUP BY */
5363 tgl 2864 GIC 686 : if (new_groupclause == NIL)
81 tgl 2865 GNC 98 : return list_copy(parse->groupClause);
2866 :
2867 : /*
2868 : * Add any remaining GROUP BY items to the new list, but only if we were
5050 bruce 2869 ECB : * able to make a complete match. In other words, we only rearrange the
2870 : * GROUP BY list if the result is that one list is a prefix of the other
2871 : * --- otherwise there's no possibility of a common sort. Also, give up
2872 : * if there are any non-sortable GROUP BY items, since then there's no
2873 : * hope anyway.
2874 : */
5363 tgl 2875 GIC 1289 : foreach(gl, parse->groupClause)
2876 : {
2042 2877 737 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
5363 tgl 2878 ECB :
5363 tgl 2879 GIC 737 : if (list_member_ptr(new_groupclause, gc))
5363 tgl 2880 CBC 669 : continue; /* it matched an ORDER BY item */
81 tgl 2881 GNC 68 : if (partial_match) /* give up, no common sort possible */
2882 36 : return list_copy(parse->groupClause);
2883 32 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
81 tgl 2884 UNC 0 : return list_copy(parse->groupClause);
5363 tgl 2885 GIC 32 : new_groupclause = lappend(new_groupclause, gc);
5363 tgl 2886 ECB : }
2887 :
2888 : /* Success --- install the rearranged GROUP BY list */
5363 tgl 2889 CBC 552 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2885 andres 2890 GIC 552 : return new_groupclause;
2891 : }
2885 andres 2892 ECB :
2893 : /*
2894 : * Extract lists of grouping sets that can be implemented using a single
2895 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2896 : *
2897 : * Input must be sorted with smallest sets first. Result has each sublist
2898 : * sorted with smallest sets first.
2899 : *
2900 : * We want to produce the absolute minimum possible number of lists here to
2901 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2902 : * of finding the minimal partition of a partially-ordered set into chains
2903 : * (which is what we need, taking the list of grouping sets as a poset ordered
2904 : * by set inclusion) can be mapped to the problem of finding the maximum
2905 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2906 : * time with a worst case of no worse than O(n^2.5) and usually much
2907 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2908 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2909 : * half a second on my modest system even with optimization off and assertions
2910 : * on.)
2911 : */
2912 : static List *
2885 andres 2913 CBC 361 : extract_rollup_sets(List *groupingSets)
2914 : {
2915 361 : int num_sets_raw = list_length(groupingSets);
2916 361 : int num_empty = 0;
2878 bruce 2917 361 : int num_sets = 0; /* distinct sets */
2885 andres 2918 361 : int num_chains = 0;
2919 361 : List *result = NIL;
2885 andres 2920 EUB : List **results;
2885 andres 2921 ECB : List **orig_sets;
2922 : Bitmapset **set_masks;
2923 : int *chains;
2924 : short **adjacency;
2925 : short *adjacency_buf;
2926 : BipartiteMatchState *state;
2927 : int i;
2928 : int j;
2929 : int j_size;
2885 andres 2930 GIC 361 : ListCell *lc1 = list_head(groupingSets);
2931 : ListCell *lc;
2932 :
2933 : /*
2934 : * Start by stripping out empty sets. The algorithm doesn't require this,
2935 : * but the planner currently needs all empty sets to be returned in the
2936 : * first list, so we strip them here and add them back after.
2937 : */
2938 648 : while (lc1 && lfirst(lc1) == NIL)
2939 : {
2940 287 : ++num_empty;
1364 tgl 2941 287 : lc1 = lnext(groupingSets, lc1);
2942 : }
2943 :
2944 : /* bail out now if it turns out that all we had were empty sets. */
2885 andres 2945 361 : if (!lc1)
2946 21 : return list_make1(groupingSets);
2947 :
2948 : /*----------
2878 bruce 2949 ECB : * We don't strictly need to remove duplicate sets here, but if we don't,
2950 : * they tend to become scattered through the result, which is a bit
2877 tgl 2951 : * confusing (and irritating if we ever decide to optimize them out).
2952 : * So we remove them here and add them back after.
2885 andres 2953 : *
2954 : * For each non-duplicate set, we fill in the following:
2955 : *
2956 : * orig_sets[i] = list of the original set lists
2957 : * set_masks[i] = bitmapset for testing inclusion
2958 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2959 : *
2960 : * chains[i] will be the result group this set is assigned to.
2961 : *
2962 : * We index all of these from 1 rather than 0 because it is convenient
2963 : * to leave 0 free for the NIL node in the graph algorithm.
2964 : *----------
2965 : */
2878 bruce 2966 CBC 340 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2885 andres 2967 GIC 340 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2968 340 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2969 340 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2970 :
2971 340 : j_size = 0;
2972 340 : j = 0;
2973 340 : i = 1;
2885 andres 2974 ECB :
1364 tgl 2975 GIC 1258 : for_each_cell(lc, groupingSets, lc1)
2885 andres 2976 ECB : {
2042 tgl 2977 CBC 918 : List *candidate = (List *) lfirst(lc);
2885 andres 2978 GIC 918 : Bitmapset *candidate_set = NULL;
2979 : ListCell *lc2;
2980 918 : int dup_of = 0;
2885 andres 2981 ECB :
2885 andres 2982 CBC 2253 : foreach(lc2, candidate)
2983 : {
2885 andres 2984 GIC 1335 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2985 : }
2986 :
2987 : /* we can only be a dup if we're the same length as a previous set */
2988 918 : if (j_size == list_length(candidate))
2989 : {
2990 : int k;
2991 :
2992 832 : for (k = j; k < i; ++k)
2993 : {
2994 540 : if (bms_equal(set_masks[k], candidate_set))
2995 : {
2996 79 : dup_of = k;
2997 79 : break;
2998 : }
2999 : }
3000 : }
3001 547 : else if (j_size < list_length(candidate))
2885 andres 3002 ECB : {
2885 andres 3003 CBC 547 : j_size = list_length(candidate);
3004 547 : j = i;
2885 andres 3005 ECB : }
3006 :
2885 andres 3007 CBC 918 : if (dup_of > 0)
2885 andres 3008 ECB : {
2885 andres 3009 CBC 79 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2885 andres 3010 GIC 79 : bms_free(candidate_set);
2885 andres 3011 ECB : }
3012 : else
3013 : {
2878 bruce 3014 : int k;
2878 bruce 3015 GIC 839 : int n_adj = 0;
2885 andres 3016 ECB :
2885 andres 3017 GIC 839 : orig_sets[i] = list_make1(candidate);
2885 andres 3018 CBC 839 : set_masks[i] = candidate_set;
3019 :
2885 andres 3020 ECB : /* fill in adjacency list; no need to compare equal-size sets */
3021 :
2885 andres 3022 GIC 1445 : for (k = j - 1; k > 0; --k)
3023 : {
2885 andres 3024 CBC 606 : if (bms_is_subset(set_masks[k], candidate_set))
2885 andres 3025 GIC 525 : adjacency_buf[++n_adj] = k;
3026 : }
3027 :
2885 andres 3028 CBC 839 : if (n_adj > 0)
3029 : {
3030 269 : adjacency_buf[0] = n_adj;
2885 andres 3031 GIC 269 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2885 andres 3032 CBC 269 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2885 andres 3033 ECB : }
3034 : else
2885 andres 3035 GIC 570 : adjacency[i] = NULL;
3036 :
2885 andres 3037 CBC 839 : ++i;
3038 : }
2885 andres 3039 ECB : }
3040 :
2885 andres 3041 GIC 340 : num_sets = i - 1;
3042 :
2885 andres 3043 ECB : /*
3044 : * Apply the graph matching algorithm to do the work.
3045 : */
2885 andres 3046 CBC 340 : state = BipartiteMatch(num_sets, num_sets, adjacency);
3047 :
3048 : /*
3049 : * Now, the state->pair* fields have the info we need to assign sets to
3050 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
2885 andres 3051 ECB : * pair_vu[v] = u (both will be true, but we check both so that we can do
3052 : * it in one pass)
3053 : */
2885 andres 3054 CBC 340 : chains = palloc0((num_sets + 1) * sizeof(int));
3055 :
2885 andres 3056 GIC 1179 : for (i = 1; i <= num_sets; ++i)
3057 : {
2878 bruce 3058 CBC 839 : int u = state->pair_vu[i];
2878 bruce 3059 GIC 839 : int v = state->pair_uv[i];
2885 andres 3060 ECB :
2885 andres 3061 CBC 839 : if (u > 0 && u < i)
2885 andres 3062 UIC 0 : chains[i] = chains[u];
2885 andres 3063 GIC 839 : else if (v > 0 && v < i)
2885 andres 3064 CBC 255 : chains[i] = chains[v];
3065 : else
3066 584 : chains[i] = ++num_chains;
2885 andres 3067 ECB : }
3068 :
3069 : /* build result lists. */
2878 bruce 3070 GIC 340 : results = palloc0((num_chains + 1) * sizeof(List *));
2885 andres 3071 ECB :
2885 andres 3072 GIC 1179 : for (i = 1; i <= num_sets; ++i)
2885 andres 3073 ECB : {
2878 bruce 3074 GIC 839 : int c = chains[i];
3075 :
2885 andres 3076 839 : Assert(c > 0);
2885 andres 3077 ECB :
2885 andres 3078 GIC 839 : results[c] = list_concat(results[c], orig_sets[i]);
3079 : }
3080 :
3081 : /* push any empty sets back on the first list. */
2885 andres 3082 CBC 582 : while (num_empty-- > 0)
2885 andres 3083 GIC 242 : results[1] = lcons(NIL, results[1]);
3084 :
3085 : /* make result list */
3086 924 : for (i = 1; i <= num_chains; ++i)
3087 584 : result = lappend(result, results[i]);
3088 :
3089 : /*
2885 andres 3090 ECB : * Free all the things.
3091 : *
3092 : * (This is over-fussy for small sets but for large sets we could have
3093 : * tied up a nontrivial amount of memory.)
3094 : */
2885 andres 3095 CBC 340 : BipartiteMatchFree(state);
2885 andres 3096 GIC 340 : pfree(results);
2885 andres 3097 CBC 340 : pfree(chains);
2885 andres 3098 GBC 1179 : for (i = 1; i <= num_sets; ++i)
2885 andres 3099 CBC 839 : if (adjacency[i])
3100 269 : pfree(adjacency[i]);
2885 andres 3101 GIC 340 : pfree(adjacency);
2885 andres 3102 CBC 340 : pfree(adjacency_buf);
2885 andres 3103 GIC 340 : pfree(orig_sets);
3104 1179 : for (i = 1; i <= num_sets; ++i)
3105 839 : bms_free(set_masks[i]);
2885 andres 3106 CBC 340 : pfree(set_masks);
3107 :
3108 340 : return result;
3109 : }
2885 andres 3110 ECB :
3111 : /*
3112 : * Reorder the elements of a list of grouping sets such that they have correct
3113 : * prefix relationships. Also inserts the GroupingSetData annotations.
3114 : *
3115 : * The input must be ordered with smallest sets first; the result is returned
3116 : * with largest sets first. Note that the result shares no list substructure
3117 : * with the input, so it's safe for the caller to modify it later.
3118 : *
3119 : * If we're passed in a sortclause, we follow its order of columns to the
3120 : * extent possible, to minimize the chance that we add unnecessary sorts.
3121 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3122 : * gets implemented in one pass.)
3123 : */
3124 : static List *
201 pg 3125 GNC 605 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3126 : {
3127 : ListCell *lc;
2885 andres 3128 GIC 605 : List *previous = NIL;
3129 605 : List *result = NIL;
3130 :
201 pg 3131 GNC 1810 : foreach(lc, groupingSets)
2885 andres 3132 ECB : {
2042 tgl 3133 CBC 1205 : List *candidate = (List *) lfirst(lc);
2878 bruce 3134 1205 : List *new_elems = list_difference_int(candidate, previous);
2204 rhodiumtoad 3135 1205 : GroupingSetData *gs = makeNode(GroupingSetData);
2885 andres 3136 ECB :
1379 rhodiumtoad 3137 CBC 1245 : while (list_length(sortclause) > list_length(previous) &&
3138 : new_elems != NIL)
2885 andres 3139 ECB : {
1379 rhodiumtoad 3140 CBC 94 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3141 94 : int ref = sc->tleSortGroupRef;
2878 bruce 3142 ECB :
1379 rhodiumtoad 3143 GIC 94 : if (list_member_int(new_elems, ref))
1379 rhodiumtoad 3144 ECB : {
1379 rhodiumtoad 3145 GIC 40 : previous = lappend_int(previous, ref);
3146 40 : new_elems = list_delete_int(new_elems, ref);
3147 : }
3148 : else
3149 : {
3150 : /* diverged from the sortclause; give up on it */
3151 54 : sortclause = NIL;
3152 54 : break;
3153 : }
3154 : }
3155 :
3156 1205 : previous = list_concat(previous, new_elems);
3157 :
2204 3158 1205 : gs->set = list_copy(previous);
3159 1205 : result = lcons(gs, result);
3160 : }
2885 andres 3161 ECB :
2885 andres 3162 GIC 605 : list_free(previous);
3163 :
2885 andres 3164 CBC 605 : return result;
5363 tgl 3165 ECB : }
3166 :
3167 : /*
3168 : * has_volatile_pathkey
3169 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3170 : * containing a volatile function. Otherwise returns false.
3171 : */
3172 : static bool
82 drowley 3173 GNC 1021 : has_volatile_pathkey(List *keys)
3174 : {
3175 : ListCell *lc;
3176 :
3177 2123 : foreach(lc, keys)
3178 : {
3179 1108 : PathKey *pathkey = lfirst_node(PathKey, lc);
3180 :
3181 1108 : if (pathkey->pk_eclass->ec_has_volatile)
3182 6 : return true;
3183 : }
3184 :
3185 1015 : return false;
3186 : }
3187 :
3188 : /*
3189 : * adjust_group_pathkeys_for_groupagg
3190 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3191 : * pre-ordered input for ordered aggregates.
3192 : *
3193 : * We define "best" as the pathkeys that suit the largest number of
3194 : * aggregate functions. We find these by looking at the first ORDER BY /
3195 : * DISTINCT aggregate and take the pathkeys for that before searching for
3196 : * other aggregates that require the same or a more strict variation of the
3197 : * same pathkeys. We then repeat that process for any remaining aggregates
3198 : * with different pathkeys and if we find another set of pathkeys that suits a
3199 : * larger number of aggregates then we select those pathkeys instead.
3200 : *
3201 : * When the best pathkeys are found we also mark each Aggref that can use
3202 : * those pathkeys as aggpresorted = true.
3203 : *
3204 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3205 : * volatile functions, we never make use of these pathkeys. We want to ensure
3206 : * that sorts using volatile functions are done independently in each Aggref
3207 : * rather than once at the query level. If we were to allow this then Aggrefs
3208 : * with compatible sort orders would all transition their rows in the same
3209 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3210 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3211 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3212 : * left to perform their sorts individually. To avoid this inconsistent
3213 : * behavior which could make Aggref results depend on what other Aggrefs the
3214 : * query contains, we always force Aggrefs with volatile functions to perform
3215 : * their own sorts.
3216 : */
3217 : static void
81 tgl 3218 811 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3219 : {
3220 811 : List *grouppathkeys = root->group_pathkeys;
3221 : List *bestpathkeys;
3222 : Bitmapset *bestaggs;
3223 : Bitmapset *unprocessed_aggs;
3224 : ListCell *lc;
3225 : int i;
3226 :
3227 : /* Shouldn't be here if there are grouping sets */
3228 811 : Assert(root->parse->groupingSets == NIL);
3229 : /* Shouldn't be here unless there are some ordered aggregates */
3230 811 : Assert(root->numOrderedAggs > 0);
3231 :
3232 : /* Do nothing if disabled */
3233 811 : if (!enable_presorted_aggregate)
3234 3 : return;
3235 :
3236 : /*
3237 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3238 : * the indexes of all AggInfos to be processed below.
3239 : */
250 drowley 3240 808 : unprocessed_aggs = NULL;
3241 1955 : foreach(lc, root->agginfos)
3242 : {
3243 1147 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3244 1147 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3245 :
3246 1147 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3247 132 : continue;
3248 :
3249 : /* only add aggregates with a DISTINCT or ORDER BY */
3250 1015 : if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3251 868 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3252 : foreach_current_index(lc));
3253 : }
3254 :
3255 : /*
3256 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3257 : * for the given set of aggregates.
3258 : *
3259 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3260 : * this during the first loop using the pathkeys for the very first
3261 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3262 : * a more strict set of compatible pathkeys. Once the outer loop is
3263 : * complete, we mark off all the aggregates with compatible pathkeys then
3264 : * remove those from the unprocessed_aggs and repeat the process to try to
3265 : * find another set of pathkeys that are suitable for a larger number of
3266 : * aggregates. The outer loop will stop when there are not enough
3267 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3268 : * to suit a larger number of aggregates.
3269 : */
3270 808 : bestpathkeys = NIL;
3271 808 : bestaggs = NULL;
3272 1592 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3273 : {
3274 784 : Bitmapset *aggindexes = NULL;
3275 784 : List *currpathkeys = NIL;
3276 :
3277 784 : i = -1;
3278 1805 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3279 : {
3280 1021 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3281 1021 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3282 : List *sortlist;
3283 : List *pathkeys;
3284 :
3285 1021 : if (aggref->aggdistinct != NIL)
3286 341 : sortlist = aggref->aggdistinct;
3287 : else
3288 680 : sortlist = aggref->aggorder;
3289 :
82 3290 1021 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3291 : aggref->args);
3292 :
3293 : /*
3294 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3295 : * or DISTINCT clause.
3296 : */
3297 1021 : if (has_volatile_pathkey(pathkeys))
3298 : {
3299 6 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3300 6 : continue;
3301 : }
3302 :
3303 : /*
3304 : * When not set yet, take the pathkeys from the first unprocessed
3305 : * aggregate.
3306 : */
250 3307 1015 : if (currpathkeys == NIL)
3308 : {
82 3309 784 : currpathkeys = pathkeys;
3310 :
3311 : /* include the GROUP BY pathkeys, if they exist */
250 3312 784 : if (grouppathkeys != NIL)
3313 111 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3314 : currpathkeys);
3315 :
3316 : /* record that we found pathkeys for this aggregate */
3317 784 : aggindexes = bms_add_member(aggindexes, i);
3318 : }
3319 : else
3320 : {
3321 : /* now look for a stronger set of matching pathkeys */
3322 :
3323 : /* include the GROUP BY pathkeys, if they exist */
3324 231 : if (grouppathkeys != NIL)
3325 144 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3326 : pathkeys);
3327 :
3328 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3329 231 : switch (compare_pathkeys(currpathkeys, pathkeys))
3330 : {
3331 6 : case PATHKEYS_BETTER2:
3332 : /* 'pathkeys' are stronger, use these ones instead */
3333 6 : currpathkeys = pathkeys;
3334 : /* FALLTHROUGH */
3335 :
3336 36 : case PATHKEYS_BETTER1:
3337 : /* 'pathkeys' are less strict */
3338 : /* FALLTHROUGH */
3339 :
3340 : case PATHKEYS_EQUAL:
3341 : /* mark this aggregate as covered by 'currpathkeys' */
3342 36 : aggindexes = bms_add_member(aggindexes, i);
3343 36 : break;
3344 :
3345 195 : case PATHKEYS_DIFFERENT:
3346 195 : break;
3347 : }
3348 : }
3349 : }
3350 :
3351 : /* remove the aggregates that we've just processed */
3352 784 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3353 :
3354 : /*
3355 : * If this pass included more aggregates than the previous best then
3356 : * use these ones as the best set.
3357 : */
3358 784 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3359 : {
3360 733 : bestaggs = aggindexes;
3361 733 : bestpathkeys = currpathkeys;
3362 : }
3363 : }
3364 :
3365 : /*
3366 : * If we found any ordered aggregates, update root->group_pathkeys to add
3367 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3368 : * the original GROUP BY pathkeys already.
3369 : */
81 tgl 3370 808 : if (bestpathkeys != NIL)
3371 715 : root->group_pathkeys = bestpathkeys;
3372 :
3373 : /*
3374 : * Now that we've found the best set of aggregates we can set the
3375 : * presorted flag to indicate to the executor that it needn't bother
3376 : * performing a sort for these Aggrefs. We're able to do this now as
3377 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3378 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3379 : * of ordered aggregates.
3380 : */
250 drowley 3381 808 : i = -1;
3382 1562 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3383 : {
3384 754 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3385 :
3386 1517 : foreach(lc, agginfo->aggrefs)
3387 : {
3388 763 : Aggref *aggref = lfirst_node(Aggref, lc);
3389 :
3390 763 : aggref->aggpresorted = true;
3391 : }
3392 : }
3393 : }
3394 :
3632 tgl 3395 ECB : /*
3396 : * Compute query_pathkeys and other pathkeys during plan generation
3397 : */
3398 : static void
3632 tgl 3399 CBC 222891 : standard_qp_callback(PlannerInfo *root, void *extra)
3400 : {
3401 222891 : Query *parse = root->parse;
3632 tgl 3402 GIC 222891 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
1474 3403 222891 : List *tlist = root->processed_tlist;
3632 tgl 3404 CBC 222891 : List *activeWindows = qp_extra->activeWindows;
3632 tgl 3405 ECB :
3406 : /*
3407 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3408 : * aggregate requirements.
3409 : */
81 tgl 3410 GNC 222891 : if (qp_extra->gset_data)
3411 : {
3412 : /*
3413 : * With grouping sets, just use the first RollupData's groupClause. We
3414 : * don't make any effort to optimize grouping clauses when there are
3415 : * grouping sets, nor can we combine aggregate ordering keys with
3416 : * grouping.
3417 : */
3418 364 : List *rollups = qp_extra->gset_data->rollups;
3419 364 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3420 :
3421 364 : if (grouping_is_sortable(groupClause))
3422 : {
3423 364 : root->group_pathkeys = make_pathkeys_for_sortclauses(root,
3424 : groupClause,
3425 : tlist);
3426 364 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3427 : }
3428 : else
3429 : {
81 tgl 3430 UNC 0 : root->group_pathkeys = NIL;
3431 0 : root->num_groupby_pathkeys = 0;
3432 : }
3433 : }
81 tgl 3434 GNC 222527 : else if (parse->groupClause || root->numOrderedAggs > 0)
3435 2315 : {
3436 : /*
3437 : * With a plain GROUP BY list, we can remove any grouping items that
3438 : * are proven redundant by EquivalenceClass processing. For example,
3439 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3440 : * especially common cases, but they're nearly free to detect. Note
3441 : * that we remove redundant items from processed_groupClause but not
3442 : * the original parse->groupClause.
3443 : */
3444 : bool sortable;
3445 :
81 tgl 3446 GIC 2315 : root->group_pathkeys =
81 tgl 3447 GNC 2315 : make_pathkeys_for_sortclauses_extended(root,
3448 : &root->processed_groupClause,
3449 : tlist,
3450 : true,
3451 : &sortable);
3452 2315 : if (!sortable)
3453 : {
3454 : /* Can't sort; no point in considering aggregate ordering either */
81 tgl 3455 UNC 0 : root->group_pathkeys = NIL;
3456 0 : root->num_groupby_pathkeys = 0;
3457 : }
3458 : else
3459 : {
81 tgl 3460 GNC 2315 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3461 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3462 2315 : if (root->numOrderedAggs > 0)
3463 811 : adjust_group_pathkeys_for_groupagg(root);
3464 : }
3465 : }
3466 : else
3467 : {
3632 tgl 3468 GIC 220212 : root->group_pathkeys = NIL;
250 drowley 3469 GNC 220212 : root->num_groupby_pathkeys = 0;
3470 : }
3471 :
3632 tgl 3472 ECB : /* We consider only the first (bottom) window in pathkeys logic */
3632 tgl 3473 GIC 222891 : if (activeWindows != NIL)
3632 tgl 3474 ECB : {
2042 tgl 3475 CBC 1017 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3476 :
3632 tgl 3477 GIC 1017 : root->window_pathkeys = make_pathkeys_for_window(root,
3632 tgl 3478 ECB : wc,
3479 : tlist);
3480 : }
3481 : else
3632 tgl 3482 GIC 221874 : root->window_pathkeys = NIL;
3483 :
3484 : /*
3485 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3486 : * redundant by EquivalenceClass processing. The non-redundant list is
3487 : * kept in root->processed_distinctClause, leaving the original
3488 : * parse->distinctClause alone.
3489 : */
81 tgl 3490 GNC 222891 : if (parse->distinctClause)
3491 : {
3492 : bool sortable;
3493 :
3494 : /* Make a copy since pathkey processing can modify the list */
3495 970 : root->processed_distinctClause = list_copy(parse->distinctClause);
3632 tgl 3496 GIC 970 : root->distinct_pathkeys =
81 tgl 3497 GNC 970 : make_pathkeys_for_sortclauses_extended(root,
3498 : &root->processed_distinctClause,
3499 : tlist,
3500 : true,
3501 : &sortable);
3502 970 : if (!sortable)
3503 3 : root->distinct_pathkeys = NIL;
3504 : }
3505 : else
3632 tgl 3506 GIC 221921 : root->distinct_pathkeys = NIL;
3507 :
3632 tgl 3508 CBC 222891 : root->sort_pathkeys =
3632 tgl 3509 GIC 222891 : make_pathkeys_for_sortclauses(root,
3632 tgl 3510 ECB : parse->sortClause,
3511 : tlist);
3512 :
3513 : /*
3514 : * Figure out whether we want a sorted result from query_planner.
3515 : *
3516 : * If we have a sortable GROUP BY clause, then we want a result sorted
3517 : * properly for grouping. Otherwise, if we have window functions to
3518 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3519 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3520 : * we try to produce output that's sufficiently well sorted for the
3521 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3522 : * by the ORDER BY clause.
3523 : *
3524 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3525 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3526 : * that might just leave us failing to exploit an available sort order at
3527 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3528 : * much easier, since we know that the parser ensured that one is a
3529 : * superset of the other.
3530 : */
3632 tgl 3531 GIC 222891 : if (root->group_pathkeys)
3532 2518 : root->query_pathkeys = root->group_pathkeys;
3533 220373 : else if (root->window_pathkeys)
3534 882 : root->query_pathkeys = root->window_pathkeys;
3535 438982 : else if (list_length(root->distinct_pathkeys) >
3536 219491 : list_length(root->sort_pathkeys))
3537 784 : root->query_pathkeys = root->distinct_pathkeys;
3538 218707 : else if (root->sort_pathkeys)
3539 22278 : root->query_pathkeys = root->sort_pathkeys;
3540 : else
3541 196429 : root->query_pathkeys = NIL;
3542 222891 : }
3543 :
3544 : /*
3545 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3546 : *
3547 : * path_rows: number of output rows from scan/join step
3548 : * gd: grouping sets data including list of grouping sets and their clauses
1851 rhaas 3549 ECB : * target_list: target list containing group clause references
3550 : *
2204 rhodiumtoad 3551 : * If doing grouping sets, we also annotate the gsets data with the estimates
3552 : * for each set and each individual rollup list, with a view to later
3553 : * determining whether some combination of them could be hashed instead.
3554 : */
3555 : static double
2589 tgl 3556 GIC 17798 : get_number_of_groups(PlannerInfo *root,
3557 : double path_rows,
3558 : grouping_sets_data *gd,
1851 rhaas 3559 ECB : List *target_list)
3560 : {
4806 tgl 3561 CBC 17798 : Query *parse = root->parse;
3562 : double dNumGroups;
3563 :
2589 3564 17798 : if (parse->groupClause)
4806 tgl 3565 ECB : {
3566 : List *groupExprs;
3567 :
2589 tgl 3568 GIC 3140 : if (parse->groupingSets)
3569 : {
3570 : /* Add up the estimates for each grouping set */
2204 rhodiumtoad 3571 ECB : ListCell *lc;
3572 :
2153 bruce 3573 CBC 343 : Assert(gd); /* keep Coverity happy */
2197 rhodiumtoad 3574 ECB :
2589 tgl 3575 GIC 343 : dNumGroups = 0;
2204 rhodiumtoad 3576 ECB :
2204 rhodiumtoad 3577 CBC 927 : foreach(lc, gd->rollups)
3578 : {
2042 tgl 3579 GIC 584 : RollupData *rollup = lfirst_node(RollupData, lc);
3580 : ListCell *lc2;
3581 : ListCell *lc3;
2589 tgl 3582 ECB :
2204 rhodiumtoad 3583 GIC 584 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3584 : target_list);
3585 :
3586 584 : rollup->numGroups = 0.0;
3587 :
186 drowley 3588 GNC 1744 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3589 : {
3590 1160 : List *gset = (List *) lfirst(lc2);
3591 1160 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
2204 rhodiumtoad 3592 GIC 1160 : double numGroups = estimate_num_groups(root,
3593 : groupExprs,
3594 : path_rows,
3595 : &gset,
3596 : NULL);
3597 :
3598 1160 : gs->numGroups = numGroups;
3599 1160 : rollup->numGroups += numGroups;
3600 : }
2204 rhodiumtoad 3601 ECB :
2204 rhodiumtoad 3602 CBC 584 : dNumGroups += rollup->numGroups;
2204 rhodiumtoad 3603 ECB : }
3604 :
2204 rhodiumtoad 3605 CBC 343 : if (gd->hash_sets_idx)
2204 rhodiumtoad 3606 ECB : {
3607 : ListCell *lc2;
186 drowley 3608 :
2204 rhodiumtoad 3609 CBC 18 : gd->dNumHashGroups = 0;
3610 :
3611 18 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
1851 rhaas 3612 ECB : target_list);
3613 :
2204 rhodiumtoad 3614 GIC 39 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3615 : {
2204 rhodiumtoad 3616 CBC 21 : List *gset = (List *) lfirst(lc);
2042 tgl 3617 21 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
2204 rhodiumtoad 3618 GIC 21 : double numGroups = estimate_num_groups(root,
2204 rhodiumtoad 3619 ECB : groupExprs,
3620 : path_rows,
740 drowley 3621 : &gset,
3622 : NULL);
3623 :
2204 rhodiumtoad 3624 GIC 21 : gs->numGroups = numGroups;
3625 21 : gd->dNumHashGroups += numGroups;
3626 : }
3627 :
2204 rhodiumtoad 3628 CBC 18 : dNumGroups += gd->dNumHashGroups;
3629 : }
2589 tgl 3630 ECB : }
4806 3631 : else
3632 : {
3633 : /* Plain GROUP BY -- estimate based on optimized groupClause */
81 tgl 3634 GNC 2797 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3635 : target_list);
3636 :
2589 tgl 3637 GIC 2797 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
740 drowley 3638 ECB : NULL, NULL);
3639 : }
2589 tgl 3640 : }
2589 tgl 3641 GIC 14658 : else if (parse->groupingSets)
3642 : {
2589 tgl 3643 ECB : /* Empty grouping sets ... one result row for each one */
2589 tgl 3644 CBC 21 : dNumGroups = list_length(parse->groupingSets);
3645 : }
2589 tgl 3646 GIC 14637 : else if (parse->hasAggs || root->hasHavingQual)
3647 : {
2589 tgl 3648 ECB : /* Plain aggregation, one result row */
2589 tgl 3649 GIC 14637 : dNumGroups = 1;
3650 : }
3651 : else
3652 : {
3653 : /* Not grouping */
2589 tgl 3654 UIC 0 : dNumGroups = 1;
4806 tgl 3655 ECB : }
3656 :
2589 tgl 3657 GIC 17798 : return dNumGroups;
3658 : }
3659 :
2589 tgl 3660 ECB : /*
3661 : * create_grouping_paths
3662 : *
3663 : * Build a new upperrel containing Paths for grouping and/or aggregation.
1868 rhaas 3664 : * Along the way, we also build an upperrel for Paths which are partially
3665 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3666 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3667 : * the only partially grouped paths we build are also partial paths; that
3668 : * is, they need a Gather and then a FinalizeAggregate.
3669 : *
3670 : * input_rel: contains the source-data Paths
3671 : * target: the pathtarget for the result Paths to compute
3672 : * gd: grouping sets data including list of grouping sets and their clauses
2589 tgl 3673 : *
2587 3674 : * Note: all Paths in input_rel are expected to return the target computed
3675 : * by make_group_input_target.
2589 3676 : */
3677 : static RelOptInfo *
2589 tgl 3678 GIC 16240 : create_grouping_paths(PlannerInfo *root,
3679 : RelOptInfo *input_rel,
3680 : PathTarget *target,
3681 : bool target_parallel_safe,
3682 : grouping_sets_data *gd)
2589 tgl 3683 ECB : {
2589 tgl 3684 GIC 16240 : Query *parse = root->parse;
3685 : RelOptInfo *grouped_rel;
3686 : RelOptInfo *partially_grouped_rel;
3687 : AggClauseCosts agg_costs;
3688 :
866 heikki.linnakangas 3689 CBC 97440 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
866 heikki.linnakangas 3690 GIC 16240 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
2575 rhaas 3691 ECB :
1868 3692 : /*
3693 : * Create grouping relation to hold fully aggregated grouping and/or
3694 : * aggregation paths.
3695 : */
1844 rhaas 3696 GIC 16240 : grouped_rel = make_grouping_rel(root, input_rel, target,
3697 : target_parallel_safe, parse->havingQual);
3698 :
3699 : /*
3700 : * Create either paths for a degenerate grouping or paths for ordinary
1851 rhaas 3701 ECB : * grouping, as appropriate.
6573 tgl 3702 : */
1851 rhaas 3703 GIC 16240 : if (is_degenerate_grouping(root))
1846 3704 9 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3705 : else
3706 : {
3707 16231 : int flags = 0;
3708 : GroupPathExtraData extra;
3709 :
3710 : /*
3711 : * Determine whether it's possible to perform sort-based
3712 : * implementations of grouping. (Note that if processed_groupClause
3713 : * is empty, grouping_is_sortable() is trivially true, and all the
3714 : * pathkeys_contained_in() tests will succeed too, so that we'll
1846 rhaas 3715 ECB : * consider every surviving input path.)
3716 : *
3717 : * If we have grouping sets, we might be able to sort some but not all
3718 : * of them; in this case, we need can_sort to be true as long as we
3719 : * must consider any sorted-input plan.
3720 : */
1846 rhaas 3721 CBC 16231 : if ((gd && gd->rollups != NIL)
81 tgl 3722 GNC 15870 : || grouping_is_sortable(root->processed_groupClause))
1846 rhaas 3723 GIC 16228 : flags |= GROUPING_CAN_USE_SORT;
3724 :
3725 : /*
3726 : * Determine whether we should consider hash-based implementations of
3727 : * grouping.
3728 : *
3729 : * Hashed aggregation only applies if we're grouping. If we have
1846 rhaas 3730 ECB : * grouping sets, some groups might be hashable but others not; in
3731 : * this case we set can_hash true as long as there is nothing globally
3732 : * preventing us from hashing (and we should therefore consider plans
3733 : * with hashes).
3734 : *
3735 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3736 : * BY aggregates. (Doing so would imply storing *all* the input
3737 : * values in the hash table, and/or running many sorts in parallel,
3738 : * either of which seems like a certain loser.) We similarly don't
3739 : * support ordered-set aggregates in hashed aggregation, but that case
3740 : * is also included in the numOrderedAggs count.
3741 : *
3742 : * Note: grouping_is_hashable() is much more expensive to check than
3743 : * the other gating conditions, so we want to do it last.
3744 : */
1846 rhaas 3745 GIC 16231 : if ((parse->groupClause != NIL &&
866 heikki.linnakangas 3746 3770 : root->numOrderedAggs == 0 &&
81 tgl 3747 GNC 1829 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
1846 rhaas 3748 GIC 1827 : flags |= GROUPING_CAN_USE_HASH;
1846 rhaas 3749 ECB :
3750 : /*
3751 : * Determine whether partial aggregation is possible.
3752 : */
866 heikki.linnakangas 3753 GIC 16231 : if (can_partial_agg(root))
1846 rhaas 3754 CBC 14428 : flags |= GROUPING_CAN_PARTIAL_AGG;
3755 :
1844 rhaas 3756 GIC 16231 : extra.flags = flags;
1844 rhaas 3757 CBC 16231 : extra.target_parallel_safe = target_parallel_safe;
1844 rhaas 3758 GIC 16231 : extra.havingQual = parse->havingQual;
3759 16231 : extra.targetList = parse->targetList;
3760 16231 : extra.partial_costs_set = false;
1844 rhaas 3761 EUB :
3762 : /*
3763 : * Determine whether partitionwise aggregation is in theory possible.
3764 : * It can be disabled by the user, and for now, we don't try to
1844 rhaas 3765 ECB : * support grouping sets. create_ordinary_grouping_paths() will check
3766 : * additional conditions, such as whether input_rel is partitioned.
3767 : */
1844 rhaas 3768 GIC 16231 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3769 224 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3770 : else
3771 16007 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3772 :
1846 3773 16231 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3774 : &agg_costs, gd, &extra,
3775 : &partially_grouped_rel);
3776 : }
1851 rhaas 3777 ECB :
1851 rhaas 3778 CBC 16237 : set_cheapest(grouped_rel);
1851 rhaas 3779 GIC 16237 : return grouped_rel;
3780 : }
3781 :
3782 : /*
1844 rhaas 3783 ECB : * make_grouping_rel
3784 : *
3785 : * Create a new grouping rel and set basic properties.
1844 rhaas 3786 EUB : *
3787 : * input_rel represents the underlying scan/join relation.
3788 : * target is the output expected from the grouping relation.
3789 : */
3790 : static RelOptInfo *
1844 rhaas 3791 CBC 16939 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3792 : PathTarget *target, bool target_parallel_safe,
1844 rhaas 3793 ECB : Node *havingQual)
3794 : {
3795 : RelOptInfo *grouped_rel;
3796 :
1844 rhaas 3797 GIC 16939 : if (IS_OTHER_REL(input_rel))
3798 : {
1844 rhaas 3799 CBC 699 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
1844 rhaas 3800 ECB : input_rel->relids);
1844 rhaas 3801 GIC 699 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3802 : }
3803 : else
1844 rhaas 3804 ECB : {
3805 : /*
3806 : * By tradition, the relids set for the main grouping relation is
3807 : * NULL. (This could be changed, but might require adjustments
3808 : * elsewhere.)
3809 : */
1844 rhaas 3810 GIC 16240 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3811 : }
3812 :
1844 rhaas 3813 ECB : /* Set target. */
1844 rhaas 3814 GIC 16939 : grouped_rel->reltarget = target;
3815 :
3816 : /*
3817 : * If the input relation is not parallel-safe, then the grouped relation
3818 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3819 : * target list and HAVING quals are parallel-safe.
3820 : */
1844 rhaas 3821 CBC 29157 : if (input_rel->consider_parallel && target_parallel_safe &&
1844 rhaas 3822 GIC 12218 : is_parallel_safe(root, (Node *) havingQual))
3823 12209 : grouped_rel->consider_parallel = true;
3824 :
3825 : /*
1844 rhaas 3826 ECB : * If the input rel belongs to a single FDW, so does the grouped rel.
3827 : */
1844 rhaas 3828 CBC 16939 : grouped_rel->serverid = input_rel->serverid;
1844 rhaas 3829 GIC 16939 : grouped_rel->userid = input_rel->userid;
3830 16939 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3831 16939 : grouped_rel->fdwroutine = input_rel->fdwroutine;
3832 :
1844 rhaas 3833 CBC 16939 : return grouped_rel;
1844 rhaas 3834 ECB : }
3835 :
3836 : /*
1851 3837 : * is_degenerate_grouping
3838 : *
3839 : * A degenerate grouping is one in which the query has a HAVING qual and/or
3840 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
3841 : * grouping sets are all empty).
3842 : */
3843 : static bool
1851 rhaas 3844 GIC 16240 : is_degenerate_grouping(PlannerInfo *root)
3845 : {
3846 16240 : Query *parse = root->parse;
3847 :
3848 15942 : return (root->hasHavingQual || parse->groupingSets) &&
3849 32182 : !parse->hasAggs && parse->groupClause == NIL;
3850 : }
3851 :
3852 : /*
3853 : * create_degenerate_grouping_paths
3854 : *
3855 : * When the grouping is degenerate (see is_degenerate_grouping), we are
3856 : * supposed to emit either zero or one row for each grouping set depending on
3857 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
3858 : * either HAVING or the targetlist, so we actually do not need the FROM table
3859 : * at all! We can just throw away the plan-so-far and generate a Result node.
3860 : * This is a sufficiently unusual corner case that it's not worth contorting
3861 : * the structure of this module to avoid having to generate the earlier paths
1851 rhaas 3862 ECB : * in the first place.
3863 : */
3864 : static void
1851 rhaas 3865 CBC 9 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
1846 rhaas 3866 ECB : RelOptInfo *grouped_rel)
1851 3867 : {
1851 rhaas 3868 CBC 9 : Query *parse = root->parse;
1851 rhaas 3869 ECB : int nrows;
3870 : Path *path;
3871 :
1851 rhaas 3872 CBC 9 : nrows = list_length(parse->groupingSets);
3873 9 : if (nrows > 1)
3874 : {
3875 : /*
3876 : * Doesn't seem worthwhile writing code to cons up a generate_series
3877 : * or a values scan to emit multiple rows. Instead just make N clones
3878 : * and append them. (With a volatile HAVING clause, this means you
3879 : * might get between 0 and N output rows. Offhand I think that's
3880 : * desired.)
3881 : */
1851 rhaas 3882 UIC 0 : List *paths = NIL;
3883 :
3884 0 : while (--nrows >= 0)
3885 : {
3886 : path = (Path *)
1532 tgl 3887 LBC 0 : create_group_result_path(root, grouped_rel,
1532 tgl 3888 UIC 0 : grouped_rel->reltarget,
3889 0 : (List *) parse->havingQual);
1851 rhaas 3890 0 : paths = lappend(paths, path);
3891 : }
1851 rhaas 3892 ECB : path = (Path *)
1828 alvherre 3893 UIC 0 : create_append_path(root,
3894 : grouped_rel,
1851 rhaas 3895 ECB : paths,
3896 : NIL,
3897 : NIL,
3898 : NULL,
3899 : 0,
3900 : false,
3901 : -1);
3902 : }
3903 : else
3904 : {
3905 : /* No grouping sets, or just one, so one output row */
3906 : path = (Path *)
1532 tgl 3907 GIC 9 : create_group_result_path(root, grouped_rel,
1532 tgl 3908 CBC 9 : grouped_rel->reltarget,
1532 tgl 3909 GIC 9 : (List *) parse->havingQual);
1851 rhaas 3910 ECB : }
3911 :
1851 rhaas 3912 GIC 9 : add_path(grouped_rel, path);
3913 9 : }
2589 tgl 3914 ECB :
3915 : /*
3916 : * create_ordinary_grouping_paths
1851 rhaas 3917 : *
3918 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
3919 : *
3920 : * We need to consider sorted and hashed aggregation in the same function,
3921 : * because otherwise (1) it would be harder to throw an appropriate error
3922 : * message if neither way works, and (2) we should not allow hashtable size
3923 : * considerations to dissuade us from using hashing if sorting is not possible.
3924 : *
3925 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
3926 : * function creates, or to NULL if it doesn't create one.
3927 : */
3928 : static void
1851 rhaas 3929 CBC 16930 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
1846 rhaas 3930 ECB : RelOptInfo *grouped_rel,
3931 : const AggClauseCosts *agg_costs,
3932 : grouping_sets_data *gd,
1844 3933 : GroupPathExtraData *extra,
3934 : RelOptInfo **partially_grouped_rel_p)
3935 : {
1851 rhaas 3936 CBC 16930 : Path *cheapest_path = input_rel->cheapest_total_path;
1846 rhaas 3937 GIC 16930 : RelOptInfo *partially_grouped_rel = NULL;
3938 : double dNumGroups;
1844 3939 16930 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
6573 tgl 3940 ECB :
3941 : /*
1844 rhaas 3942 : * If this is the topmost grouping relation or if the parent relation is
3943 : * doing some form of partitionwise aggregation, then we may be able to do
3944 : * it at this level also. However, if the input relation is not
1494 tgl 3945 : * partitioned, partitionwise aggregate is impossible.
3946 : */
1844 rhaas 3947 CBC 16930 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
1494 tgl 3948 923 : IS_PARTITIONED_REL(input_rel))
1844 rhaas 3949 ECB : {
3950 : /*
3951 : * If this is the topmost relation or if the parent relation is doing
3952 : * full partitionwise aggregation, then we can do full partitionwise
3953 : * aggregation provided that the GROUP BY clause contains all of the
3954 : * partitioning columns at this level. Otherwise, we can do at most
3955 : * partial partitionwise aggregation. But if partial aggregation is
3956 : * not supported in general then we can't use it for partitionwise
3957 : * aggregation either.
3958 : *
3959 : * Check parse->groupClause not processed_groupClause, because it's
3960 : * okay if some of the partitioning columns were proved redundant.
3961 : */
1844 rhaas 3962 CBC 532 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
1844 rhaas 3963 GIC 254 : group_by_has_partkey(input_rel, extra->targetList,
3964 254 : root->parse->groupClause))
3965 142 : patype = PARTITIONWISE_AGGREGATE_FULL;
3966 136 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3967 115 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
1844 rhaas 3968 ECB : else
1844 rhaas 3969 GIC 21 : patype = PARTITIONWISE_AGGREGATE_NONE;
3970 : }
2589 tgl 3971 ECB :
3972 : /*
3973 : * Before generating paths for grouped_rel, we first generate any possible
3974 : * partially grouped paths; that way, later code can easily consider both
1846 rhaas 3975 : * parallel and non-parallel approaches to grouping.
3976 : */
1844 rhaas 3977 GIC 16930 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
2575 rhaas 3978 ECB : {
3979 : bool force_rel_creation;
1844 3980 :
3981 : /*
3982 : * If we're doing partitionwise aggregation at this level, force
3983 : * creation of a partially_grouped_rel so we can add partitionwise
3984 : * paths to it.
3985 : */
1844 rhaas 3986 GIC 15091 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3987 :
1846 rhaas 3988 EUB : partially_grouped_rel =
1846 rhaas 3989 GIC 15091 : create_partial_grouping_paths(root,
3990 : grouped_rel,
1846 rhaas 3991 ECB : input_rel,
3992 : gd,
3993 : extra,
3994 : force_rel_creation);
3995 : }
3996 :
3997 : /* Set out parameter. */
1844 rhaas 3998 GIC 16930 : *partially_grouped_rel_p = partially_grouped_rel;
3999 :
4000 : /* Apply partitionwise aggregation technique, if possible. */
4001 16930 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4002 257 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4003 : partially_grouped_rel, agg_costs,
4004 : gd, patype, extra);
4005 :
4006 : /* If we are doing partial aggregation only, return. */
4007 16930 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4008 : {
4009 297 : Assert(partially_grouped_rel);
4010 :
4011 297 : if (partially_grouped_rel->pathlist)
1844 rhaas 4012 CBC 297 : set_cheapest(partially_grouped_rel);
4013 :
1844 rhaas 4014 GIC 297 : return;
4015 : }
4016 :
4017 : /* Gather any partially grouped partial paths. */
1844 rhaas 4018 CBC 16633 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4019 : {
1846 rhaas 4020 GIC 700 : gather_grouping_paths(root, partially_grouped_rel);
4021 700 : set_cheapest(partially_grouped_rel);
4022 : }
2575 rhaas 4023 ECB :
1844 4024 : /*
4025 : * Estimate number of groups.
4026 : */
1844 rhaas 4027 GIC 16633 : dNumGroups = get_number_of_groups(root,
4028 : cheapest_path->rows,
4029 : gd,
1844 rhaas 4030 ECB : extra->targetList);
4031 :
4032 : /* Build final grouping paths */
1846 rhaas 4033 GIC 16633 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4034 : partially_grouped_rel, agg_costs, gd,
4035 : dNumGroups, extra);
4036 :
1899 rhaas 4037 ECB : /* Give a helpful error if we failed to find any implementation */
1899 rhaas 4038 CBC 16633 : if (grouped_rel->pathlist == NIL)
1899 rhaas 4039 GIC 3 : ereport(ERROR,
4040 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1899 rhaas 4041 ECB : errmsg("could not implement GROUP BY"),
4042 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4043 :
4044 : /*
4045 : * If there is an FDW that's responsible for all baserels of the query,
4046 : * let it consider adding ForeignPaths.
4047 : */
1899 rhaas 4048 GIC 16630 : if (grouped_rel->fdwroutine &&
4049 162 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4050 162 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4051 : input_rel, grouped_rel,
4052 : extra);
4053 :
4054 : /* Let extensions possibly add some more paths */
1899 rhaas 4055 CBC 16630 : if (create_upper_paths_hook)
1899 rhaas 4056 LBC 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
1833 rhaas 4057 ECB : input_rel, grouped_rel,
4058 : extra);
4059 : }
4060 :
4061 : /*
4062 : * For a given input path, consider the possible ways of doing grouping sets on
4063 : * it, by combinations of hashing and sorting. This can be called multiple
4064 : * times, so it's important that it not scribble on input. No result is
4065 : * returned, but any generated paths are added to grouped_rel.
4066 : */
4067 : static void
1899 rhaas 4068 GIC 722 : consider_groupingsets_paths(PlannerInfo *root,
4069 : RelOptInfo *grouped_rel,
4070 : Path *path,
4071 : bool is_sorted,
4072 : bool can_hash,
4073 : grouping_sets_data *gd,
4074 : const AggClauseCosts *agg_costs,
4075 : double dNumGroups)
4076 : {
4077 722 : Query *parse = root->parse;
623 tgl 4078 722 : Size hash_mem_limit = get_hash_memory_limit();
1899 rhaas 4079 ECB :
4080 : /*
4081 : * If we're not being offered sorted input, then only consider plans that
4082 : * can be done entirely by hashing.
4083 : *
4084 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4085 : * the input is actually sorted despite not being advertised as such, we
4086 : * prefer to make use of that in order to use less memory.
4087 : *
984 pg 4088 : * If none of the grouping sets are sortable, then ignore the hash_mem
4089 : * limit and generate a path anyway, since otherwise we'll just fail.
1899 rhaas 4090 : */
1899 rhaas 4091 CBC 722 : if (!is_sorted)
2589 tgl 4092 ECB : {
1899 rhaas 4093 CBC 325 : List *new_rollups = NIL;
4094 325 : RollupData *unhashed_rollup = NULL;
4095 : List *sets_data;
1899 rhaas 4096 GIC 325 : List *empty_sets_data = NIL;
4097 325 : List *empty_sets = NIL;
4098 : ListCell *lc;
4099 325 : ListCell *l_start = list_head(gd->rollups);
4100 325 : AggStrategy strat = AGG_HASHED;
4101 : double hashsize;
1899 rhaas 4102 CBC 325 : double exclude_groups = 0.0;
2589 tgl 4103 ECB :
1899 rhaas 4104 GIC 325 : Assert(can_hash);
2589 tgl 4105 ECB :
4106 : /*
1845 rhodiumtoad 4107 : * If the input is coincidentally sorted usefully (which can happen
4108 : * even if is_sorted is false, since that only means that our caller
4109 : * has set up the sorting for us), then save some hashtable space by
4110 : * making use of that. But we need to watch out for degenerate cases:
4111 : *
4112 : * 1) If there are any empty grouping sets, then group_pathkeys might
4113 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4114 : * there will be a rollup containing only empty groups, and the
4115 : * pathkeys_contained_in test is vacuously true; this is ok.
4116 : *
4117 : * XXX: the above relies on the fact that group_pathkeys is generated
4118 : * from the first rollup. If we add the ability to consider multiple
4119 : * sort orders for grouping input, this assumption might fail.
4120 : *
4121 : * 2) If there are no empty sets and only unsortable sets, then the
4122 : * rollups list will be empty (and thus l_start == NULL), and
4123 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4124 : * pathkeys_contained_in test doesn't cause us to crash.
4125 : */
1845 rhodiumtoad 4126 GIC 647 : if (l_start != NULL &&
4127 322 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4128 : {
1899 rhaas 4129 12 : unhashed_rollup = lfirst_node(RollupData, l_start);
4130 12 : exclude_groups = unhashed_rollup->numGroups;
1364 tgl 4131 CBC 12 : l_start = lnext(gd->rollups, l_start);
4132 : }
2589 tgl 4133 ECB :
866 heikki.linnakangas 4134 GIC 325 : hashsize = estimate_hashagg_tablesize(root,
866 heikki.linnakangas 4135 ECB : path,
4136 : agg_costs,
4137 : dNumGroups - exclude_groups);
4138 :
4139 : /*
4140 : * gd->rollups is empty if we have only unsortable columns to work
4141 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4142 : * sorted-input case to generate usable mixed paths.
4143 : */
623 tgl 4144 CBC 325 : if (hashsize > hash_mem_limit && gd->rollups)
1899 rhaas 4145 GIC 9 : return; /* nope, won't fit */
4146 :
4147 : /*
1899 rhaas 4148 ECB : * We need to burst the existing rollups list into individual grouping
4149 : * sets and recompute a groupClause for each set.
4150 : */
1899 rhaas 4151 GIC 316 : sets_data = list_copy(gd->unsortable_sets);
4152 :
1364 tgl 4153 798 : for_each_cell(lc, gd->rollups, l_start)
4154 : {
1899 rhaas 4155 CBC 494 : RollupData *rollup = lfirst_node(RollupData, lc);
2589 tgl 4156 ECB :
2575 rhaas 4157 : /*
4158 : * If we find an unhashable rollup that's not been skipped by the
4159 : * "actually sorted" check above, we can't cope; we'd need sorted
4160 : * input (with a different sort order) but we can't get that here.
4161 : * So bail out; we'll get a valid path from the is_sorted case
1899 4162 : * instead.
4163 : *
4164 : * The mere presence of empty grouping sets doesn't make a rollup
4165 : * unhashable (see preprocess_grouping_sets), we handle those
4166 : * specially below.
2204 rhodiumtoad 4167 : */
2204 rhodiumtoad 4168 GIC 494 : if (!rollup->hashable)
4169 12 : return;
4170 :
1336 tgl 4171 482 : sets_data = list_concat(sets_data, rollup->gsets_data);
4172 : }
2204 rhodiumtoad 4173 1341 : foreach(lc, sets_data)
4174 : {
2042 tgl 4175 1037 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2204 rhodiumtoad 4176 1037 : List *gset = gs->set;
4177 : RollupData *rollup;
2204 rhodiumtoad 4178 ECB :
2204 rhodiumtoad 4179 GIC 1037 : if (gset == NIL)
2204 rhodiumtoad 4180 ECB : {
4181 : /* Empty grouping sets can't be hashed. */
2204 rhodiumtoad 4182 CBC 224 : empty_sets_data = lappend(empty_sets_data, gs);
4183 224 : empty_sets = lappend(empty_sets, NIL);
4184 : }
4185 : else
4186 : {
2204 rhodiumtoad 4187 GIC 813 : rollup = makeNode(RollupData);
4188 :
4189 813 : rollup->groupClause = preprocess_groupclause(root, gset);
4190 813 : rollup->gsets_data = list_make1(gs);
4191 813 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4192 : rollup->gsets_data,
4193 : gd->tleref_to_colnum_map);
4194 813 : rollup->numGroups = gs->numGroups;
4195 813 : rollup->hashable = true;
4196 813 : rollup->is_hashed = true;
4197 813 : new_rollups = lappend(new_rollups, rollup);
4198 : }
2204 rhodiumtoad 4199 ECB : }
4200 :
4201 : /*
4202 : * If we didn't find anything nonempty to hash, then bail. We'll
4203 : * generate a path from the is_sorted case.
4204 : */
2204 rhodiumtoad 4205 GIC 304 : if (new_rollups == NIL)
2204 rhodiumtoad 4206 LBC 0 : return;
2204 rhodiumtoad 4207 ECB :
4208 : /*
4209 : * If there were empty grouping sets they should have been in the
4210 : * first rollup.
4211 : */
2204 rhodiumtoad 4212 GIC 304 : Assert(!unhashed_rollup || !empty_sets);
4213 :
4214 304 : if (unhashed_rollup)
4215 : {
2204 rhodiumtoad 4216 GBC 12 : new_rollups = lappend(new_rollups, unhashed_rollup);
2204 rhodiumtoad 4217 GIC 12 : strat = AGG_MIXED;
2204 rhodiumtoad 4218 EUB : }
2204 rhodiumtoad 4219 GIC 292 : else if (empty_sets)
4220 : {
2204 rhodiumtoad 4221 GBC 200 : RollupData *rollup = makeNode(RollupData);
2204 rhodiumtoad 4222 EUB :
2204 rhodiumtoad 4223 GBC 200 : rollup->groupClause = NIL;
4224 200 : rollup->gsets_data = empty_sets_data;
2204 rhodiumtoad 4225 GIC 200 : rollup->gsets = empty_sets;
4226 200 : rollup->numGroups = list_length(empty_sets);
2204 rhodiumtoad 4227 GBC 200 : rollup->hashable = false;
2204 rhodiumtoad 4228 GIC 200 : rollup->is_hashed = false;
4229 200 : new_rollups = lappend(new_rollups, rollup);
4230 200 : strat = AGG_MIXED;
4231 : }
4232 :
4233 304 : add_path(grouped_rel, (Path *)
4234 304 : create_groupingsets_path(root,
4235 : grouped_rel,
4236 : path,
4237 304 : (List *) parse->havingQual,
4238 : strat,
4239 : new_rollups,
4240 : agg_costs));
2204 rhodiumtoad 4241 CBC 304 : return;
2204 rhodiumtoad 4242 ECB : }
4243 :
4244 : /*
4245 : * If we have sorted input but nothing we can do with it, bail.
4246 : */
235 tgl 4247 GNC 397 : if (gd->rollups == NIL)
2204 rhodiumtoad 4248 UIC 0 : return;
4249 :
4250 : /*
4251 : * Given sorted input, we try and make two paths: one sorted and one mixed
4252 : * sort/hash. (We need to try both because hashagg might be disabled, or
4253 : * some columns might not be sortable.)
4254 : *
4255 : * can_hash is passed in as false if some obstacle elsewhere (such as
4256 : * ordered aggs) means that we shouldn't consider hashing at all.
4257 : */
2204 rhodiumtoad 4258 GIC 397 : if (can_hash && gd->any_hashable)
4259 : {
4260 358 : List *rollups = NIL;
4261 358 : List *hash_sets = list_copy(gd->unsortable_sets);
623 tgl 4262 CBC 358 : double availspace = hash_mem_limit;
4263 : ListCell *lc;
4264 :
4265 : /*
4266 : * Account first for space needed for groups we can't sort at all.
4267 : */
866 heikki.linnakangas 4268 GIC 358 : availspace -= estimate_hashagg_tablesize(root,
866 heikki.linnakangas 4269 ECB : path,
1508 tgl 4270 : agg_costs,
4271 : gd->dNumHashGroups);
2204 rhodiumtoad 4272 :
2204 rhodiumtoad 4273 GIC 358 : if (availspace > 0 && list_length(gd->rollups) > 1)
4274 : {
4275 : double scale;
4276 198 : int num_rollups = list_length(gd->rollups);
4277 : int k_capacity;
4278 198 : int *k_weights = palloc(num_rollups * sizeof(int));
4279 198 : Bitmapset *hash_items = NULL;
2204 rhodiumtoad 4280 ECB : int i;
4281 :
4282 : /*
4283 : * We treat this as a knapsack problem: the knapsack capacity
4284 : * represents hash_mem, the item weights are the estimated memory
4285 : * usage of the hashtables needed to implement a single rollup,
4286 : * and we really ought to use the cost saving as the item value;
4287 : * however, currently the costs assigned to sort nodes don't
4288 : * reflect the comparison costs well, and so we treat all items as
4289 : * of equal value (each rollup we hash instead saves us one sort).
4290 : *
4291 : * To use the discrete knapsack, we need to scale the values to a
4292 : * reasonably small bounded range. We choose to allow a 5% error
4293 : * margin; we have no more than 4096 rollups in the worst possible
4294 : * case, which with a 5% error margin will require a bit over 42MB
4295 : * of workspace. (Anyone wanting to plan queries that complex had
4296 : * better have the memory for it. In more reasonable cases, with
4297 : * no more than a couple of dozen rollups, the memory usage will
4298 : * be negligible.)
4299 : *
4300 : * k_capacity is naturally bounded, but we clamp the values for
4301 : * scale and weight (below) to avoid overflows or underflows (or
4302 : * uselessly trying to use a scale factor less than 1 byte).
4303 : */
2204 rhodiumtoad 4304 GIC 198 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4305 198 : k_capacity = (int) floor(availspace / scale);
4306 :
4307 : /*
4308 : * We leave the first rollup out of consideration since it's the
4309 : * one that matches the input sort order. We assign indexes "i"
2204 rhodiumtoad 4310 ECB : * to only those entries considered for hashing; the second loop,
4311 : * below, must use the same condition.
4312 : */
2204 rhodiumtoad 4313 GIC 198 : i = 0;
923 tgl 4314 504 : for_each_from(lc, gd->rollups, 1)
4315 : {
2042 4316 306 : RollupData *rollup = lfirst_node(RollupData, lc);
4317 :
2204 rhodiumtoad 4318 306 : if (rollup->hashable)
2204 rhodiumtoad 4319 ECB : {
866 heikki.linnakangas 4320 GIC 306 : double sz = estimate_hashagg_tablesize(root,
4321 : path,
2204 rhodiumtoad 4322 ECB : agg_costs,
4323 : rollup->numGroups);
4324 :
4325 : /*
4326 : * If sz is enormous, but hash_mem (and hence scale) is
4327 : * small, avoid integer overflow here.
4328 : */
2204 rhodiumtoad 4329 GIC 306 : k_weights[i] = (int) Min(floor(sz / scale),
4330 : k_capacity + 1.0);
2204 rhodiumtoad 4331 CBC 306 : ++i;
4332 : }
4333 : }
2204 rhodiumtoad 4334 ECB :
4335 : /*
4336 : * Apply knapsack algorithm; compute the set of items which
4337 : * maximizes the value stored (in this case the number of sorts
4338 : * saved) while keeping the total size (approximately) within
4339 : * capacity.
4340 : */
2204 rhodiumtoad 4341 GIC 198 : if (i > 0)
2204 rhodiumtoad 4342 CBC 198 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4343 :
4344 198 : if (!bms_is_empty(hash_items))
2204 rhodiumtoad 4345 ECB : {
2204 rhodiumtoad 4346 GIC 198 : rollups = list_make1(linitial(gd->rollups));
2204 rhodiumtoad 4347 ECB :
2204 rhodiumtoad 4348 GIC 198 : i = 0;
923 tgl 4349 504 : for_each_from(lc, gd->rollups, 1)
4350 : {
2042 tgl 4351 CBC 306 : RollupData *rollup = lfirst_node(RollupData, lc);
4352 :
2204 rhodiumtoad 4353 306 : if (rollup->hashable)
2204 rhodiumtoad 4354 ECB : {
2204 rhodiumtoad 4355 GIC 306 : if (bms_is_member(i, hash_items))
4356 288 : hash_sets = list_concat(hash_sets,
1336 tgl 4357 288 : rollup->gsets_data);
4358 : else
2204 rhodiumtoad 4359 18 : rollups = lappend(rollups, rollup);
2204 rhodiumtoad 4360 CBC 306 : ++i;
4361 : }
4362 : else
2204 rhodiumtoad 4363 UIC 0 : rollups = lappend(rollups, rollup);
4364 : }
4365 : }
2204 rhodiumtoad 4366 ECB : }
4367 :
2204 rhodiumtoad 4368 GIC 358 : if (!rollups && hash_sets)
4369 12 : rollups = list_copy(gd->rollups);
4370 :
2204 rhodiumtoad 4371 CBC 716 : foreach(lc, hash_sets)
2204 rhodiumtoad 4372 ECB : {
2042 tgl 4373 GIC 358 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2204 rhodiumtoad 4374 358 : RollupData *rollup = makeNode(RollupData);
4375 :
4376 358 : Assert(gs->set != NIL);
4377 :
4378 358 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4379 358 : rollup->gsets_data = list_make1(gs);
4380 358 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2204 rhodiumtoad 4381 ECB : rollup->gsets_data,
2118 tgl 4382 : gd->tleref_to_colnum_map);
2204 rhodiumtoad 4383 CBC 358 : rollup->numGroups = gs->numGroups;
2204 rhodiumtoad 4384 GIC 358 : rollup->hashable = true;
4385 358 : rollup->is_hashed = true;
4386 358 : rollups = lcons(rollup, rollups);
4387 : }
2204 rhodiumtoad 4388 ECB :
2204 rhodiumtoad 4389 GBC 358 : if (rollups)
4390 : {
2204 rhodiumtoad 4391 GIC 210 : add_path(grouped_rel, (Path *)
4392 210 : create_groupingsets_path(root,
4393 : grouped_rel,
4394 : path,
4395 210 : (List *) parse->havingQual,
4396 : AGG_MIXED,
4397 : rollups,
4398 : agg_costs));
4399 : }
2204 rhodiumtoad 4400 ECB : }
4401 :
4402 : /*
4403 : * Now try the simple sorted case.
4404 : */
2204 rhodiumtoad 4405 GIC 397 : if (!gd->unsortable_sets)
4406 382 : add_path(grouped_rel, (Path *)
4407 382 : create_groupingsets_path(root,
4408 : grouped_rel,
2204 rhodiumtoad 4409 ECB : path,
2204 rhodiumtoad 4410 CBC 382 : (List *) parse->havingQual,
4411 : AGG_SORTED,
4412 : gd->rollups,
4413 : agg_costs));
4414 : }
4415 :
4416 : /*
4417 : * create_window_paths
4418 : *
4419 : * Build a new upperrel containing Paths for window-function evaluation.
4420 : *
4421 : * input_rel: contains the source-data Paths
2587 tgl 4422 ECB : * input_target: result of make_window_input_target
4423 : * output_target: what the topmost WindowAggPath should return
2589 4424 : * wflists: result of find_window_functions
4425 : * activeWindows: result of select_active_windows
4426 : *
2587 4427 : * Note: all Paths in input_rel are expected to return input_target.
2589 4428 : */
4429 : static RelOptInfo *
2589 tgl 4430 CBC 1017 : create_window_paths(PlannerInfo *root,
2589 tgl 4431 ECB : RelOptInfo *input_rel,
4432 : PathTarget *input_target,
2587 4433 : PathTarget *output_target,
4434 : bool output_target_parallel_safe,
2589 4435 : WindowFuncLists *wflists,
4436 : List *activeWindows)
4437 : {
4438 : RelOptInfo *window_rel;
4439 : ListCell *lc;
4440 :
4441 : /* For now, do all work in the (WINDOW, NULL) upperrel */
2589 tgl 4442 GIC 1017 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4443 :
4444 : /*
4445 : * If the input relation is not parallel-safe, then the window relation
4446 : * can't be parallel-safe, either. Otherwise, we need to examine the
4447 : * target list and active windows for non-parallel-safe constructs.
4448 : */
1858 rhaas 4449 1017 : if (input_rel->consider_parallel && output_target_parallel_safe &&
2424 tgl 4450 UIC 0 : is_parallel_safe(root, (Node *) activeWindows))
2473 rhaas 4451 0 : window_rel->consider_parallel = true;
4452 :
4453 : /*
4454 : * If the input rel belongs to a single FDW, so does the window rel.
4455 : */
2473 tgl 4456 GIC 1017 : window_rel->serverid = input_rel->serverid;
2459 tgl 4457 CBC 1017 : window_rel->userid = input_rel->userid;
4458 1017 : window_rel->useridiscurrent = input_rel->useridiscurrent;
2473 tgl 4459 GIC 1017 : window_rel->fdwroutine = input_rel->fdwroutine;
2473 tgl 4460 ECB :
2589 4461 : /*
4462 : * Consider computing window functions starting from the existing
4463 : * cheapest-total path (which will likely require a sort) as well as any
4464 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4465 : */
2589 tgl 4466 GIC 2139 : foreach(lc, input_rel->pathlist)
4467 : {
4468 1122 : Path *path = (Path *) lfirst(lc);
4469 : int presorted_keys;
4470 :
4471 1227 : if (path == input_rel->cheapest_total_path ||
936 drowley 4472 105 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4473 48 : &presorted_keys) ||
4474 48 : presorted_keys > 0)
2589 tgl 4475 CBC 1083 : create_one_window_path(root,
2589 tgl 4476 ECB : window_rel,
4477 : path,
4478 : input_target,
4479 : output_target,
4480 : wflists,
4481 : activeWindows);
6573 4482 : }
4483 :
2473 4484 : /*
4485 : * If there is an FDW that's responsible for all baserels of the query,
4486 : * let it consider adding ForeignPaths.
4487 : */
2473 tgl 4488 GIC 1017 : if (window_rel->fdwroutine &&
4489 6 : window_rel->fdwroutine->GetForeignUpperPaths)
4490 6 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4491 : input_rel, window_rel,
4492 : NULL);
4493 :
4494 : /* Let extensions possibly add some more paths */
2553 4495 1017 : if (create_upper_paths_hook)
2553 tgl 4496 UIC 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4497 : input_rel, window_rel, NULL);
4498 :
2589 tgl 4499 ECB : /* Now choose the best path(s) */
2589 tgl 4500 CBC 1017 : set_cheapest(window_rel);
4501 :
4502 1017 : return window_rel;
4503 : }
2589 tgl 4504 ECB :
4505 : /*
4506 : * Stack window-function implementation steps atop the given Path, and
4507 : * add the result to window_rel.
4508 : *
4509 : * window_rel: upperrel to contain result
2587 4510 : * path: input Path to use (must return input_target)
4511 : * input_target: result of make_window_input_target
4512 : * output_target: what the topmost WindowAggPath should return
2589 4513 : * wflists: result of find_window_functions
4514 : * activeWindows: result of select_active_windows
4515 : */
4516 : static void
2589 tgl 4517 GIC 1083 : create_one_window_path(PlannerInfo *root,
2589 tgl 4518 ECB : RelOptInfo *window_rel,
4519 : Path *path,
2587 4520 : PathTarget *input_target,
4521 : PathTarget *output_target,
2589 4522 : WindowFuncLists *wflists,
4523 : List *activeWindows)
4524 : {
2587 4525 : PathTarget *window_target;
2589 4526 : ListCell *l;
366 drowley 4527 CBC 1083 : List *topqual = NIL;
6573 tgl 4528 ECB :
4529 : /*
4530 : * Since each window clause could require a different sort order, we stack
4531 : * up a WindowAgg node for each clause, with sort steps between them as
4532 : * needed. (We assume that select_active_windows chose a good order for
4533 : * executing the clauses in.)
4534 : *
4535 : * input_target should contain all Vars and Aggs needed for the result.
2587 4536 : * (In some cases we wouldn't need to propagate all of these all the way
2587 tgl 4537 EUB : * to the top, since they might only be needed as inputs to WindowFuncs.
4538 : * It's probably not worth trying to optimize that though.) It must also
4539 : * contain all window partitioning and sorting expressions, to ensure
4540 : * they're computed only once at the bottom of the stack (that's critical
4541 : * for volatile functions). As we climb up the stack, we'll add outputs
4542 : * for the WindowFuncs computed at each level.
2589 tgl 4543 ECB : */
2587 tgl 4544 GIC 1083 : window_target = input_target;
2589 tgl 4545 ECB :
2589 tgl 4546 GIC 2238 : foreach(l, activeWindows)
6573 tgl 4547 ECB : {
2042 tgl 4548 CBC 1155 : WindowClause *wc = lfirst_node(WindowClause, l);
4549 : List *window_pathkeys;
936 drowley 4550 ECB : int presorted_keys;
4551 : bool is_sorted;
366 4552 : bool topwindow;
4553 :
2589 tgl 4554 CBC 1155 : window_pathkeys = make_pathkeys_for_window(root,
2589 tgl 4555 ECB : wc,
1474 4556 : root->processed_tlist);
2589 4557 :
936 drowley 4558 CBC 1155 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
936 drowley 4559 ECB : path->pathkeys,
4560 : &presorted_keys);
4561 :
4562 : /* Sort if necessary */
936 drowley 4563 GIC 1155 : if (!is_sorted)
2589 tgl 4564 ECB : {
936 drowley 4565 : /*
4566 : * No presorted keys or incremental sort disabled, just perform a
4567 : * complete sort.
4568 : */
936 drowley 4569 GIC 906 : if (presorted_keys == 0 || !enable_incremental_sort)
4570 879 : path = (Path *) create_sort_path(root, window_rel,
4571 : path,
936 drowley 4572 ECB : window_pathkeys,
4573 : -1.0);
4574 : else
4575 : {
4576 : /*
4577 : * Since we have presorted keys and incremental sort is
4578 : * enabled, just use incremental sort.
936 drowley 4579 EUB : */
936 drowley 4580 GIC 27 : path = (Path *) create_incremental_sort_path(root,
4581 : window_rel,
4582 : path,
4583 : window_pathkeys,
4584 : presorted_keys,
4585 : -1.0);
4586 : }
4587 : }
4588 :
1364 tgl 4589 CBC 1155 : if (lnext(activeWindows, l))
4590 : {
2587 tgl 4591 ECB : /*
4592 : * Add the current WindowFuncs to the output target for this
4593 : * intermediate WindowAggPath. We must copy window_target to
4594 : * avoid changing the previous path's target.
4595 : *
4596 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4597 : * we do need to account for the increase in tlist width.
4598 : */
4599 : ListCell *lc2;
4600 :
2587 tgl 4601 GIC 72 : window_target = copy_pathtarget(window_target);
4602 162 : foreach(lc2, wflists->windowFuncs[wc->winref])
4603 : {
2190 tgl 4604 CBC 90 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4605 :
2587 tgl 4606 GIC 90 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
2587 tgl 4607 CBC 90 : window_target->width += get_typavgwidth(wfunc->wintype, -1);
4608 : }
2589 tgl 4609 ECB : }
4610 : else
4611 : {
4612 : /* Install the goal target in the topmost WindowAgg */
2587 tgl 4613 GIC 1083 : window_target = output_target;
4614 : }
4615 :
4616 : /* mark the final item in the list as the top-level window */
366 drowley 4617 1155 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4618 :
4619 : /*
4620 : * Accumulate all of the runConditions from each intermediate
4621 : * WindowClause. The top-level WindowAgg must pass these as a qual so
4622 : * that it filters out unwanted tuples correctly.
4623 : */
4624 1155 : if (!topwindow)
4625 72 : topqual = list_concat(topqual, wc->runCondition);
4626 :
4627 : path = (Path *)
2587 tgl 4628 1155 : create_windowagg_path(root, window_rel, path, window_target,
2589 4629 1155 : wflists->windowFuncs[wc->winref],
4630 : wc, topwindow ? topqual : NIL, topwindow);
4631 : }
4632 :
4633 1083 : add_path(window_rel, path);
6573 4634 1083 : }
6573 tgl 4635 ECB :
5360 4636 : /*
4637 : * create_distinct_paths
4638 : *
4639 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4640 : *
4641 : * input_rel: contains the source-data Paths
4642 : *
4643 : * Note: input paths should already compute the desired pathtarget, since
2589 4644 : * Sort/Unique won't project anything.
5360 4645 : */
4646 : static RelOptInfo *
595 drowley 4647 CBC 970 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel)
4648 : {
2589 tgl 4649 ECB : RelOptInfo *distinct_rel;
4650 :
4651 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
2589 tgl 4652 GIC 970 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4653 :
4654 : /*
4655 : * We don't compute anything at this level, so distinct_rel will be
4656 : * parallel-safe if the input rel is parallel-safe. In particular, if
4657 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4658 : * output those expressions, and will not be parallel-safe unless those
4659 : * expressions are parallel-safe.
2473 rhaas 4660 ECB : */
2473 rhaas 4661 GIC 970 : distinct_rel->consider_parallel = input_rel->consider_parallel;
2473 rhaas 4662 ECB :
4663 : /*
4664 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4665 : */
2473 tgl 4666 GIC 970 : distinct_rel->serverid = input_rel->serverid;
2459 4667 970 : distinct_rel->userid = input_rel->userid;
4668 970 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
2473 4669 970 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4670 :
4671 : /* build distinct paths based on input_rel's pathlist */
595 drowley 4672 CBC 970 : create_final_distinct_paths(root, input_rel, distinct_rel);
595 drowley 4673 ECB :
4674 : /* now build distinct paths based on input_rel's partial_pathlist */
595 drowley 4675 CBC 970 : create_partial_distinct_paths(root, input_rel, distinct_rel);
4676 :
595 drowley 4677 ECB : /* Give a helpful error if we failed to create any paths */
595 drowley 4678 GIC 970 : if (distinct_rel->pathlist == NIL)
595 drowley 4679 LBC 0 : ereport(ERROR,
595 drowley 4680 ECB : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4681 : errmsg("could not implement DISTINCT"),
4682 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4683 :
4684 : /*
4685 : * If there is an FDW that's responsible for all baserels of the query,
4686 : * let it consider adding ForeignPaths.
4687 : */
595 drowley 4688 CBC 970 : if (distinct_rel->fdwroutine &&
595 drowley 4689 GIC 8 : distinct_rel->fdwroutine->GetForeignUpperPaths)
595 drowley 4690 CBC 8 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
595 drowley 4691 ECB : UPPERREL_DISTINCT,
4692 : input_rel,
4693 : distinct_rel,
595 drowley 4694 EUB : NULL);
4695 :
4696 : /* Let extensions possibly add some more paths */
595 drowley 4697 GIC 970 : if (create_upper_paths_hook)
595 drowley 4698 UIC 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
595 drowley 4699 ECB : distinct_rel, NULL);
4700 :
4701 : /* Now choose the best path(s) */
595 drowley 4702 CBC 970 : set_cheapest(distinct_rel);
4703 :
4704 970 : return distinct_rel;
595 drowley 4705 ECB : }
4706 :
4707 : /*
4708 : * create_partial_distinct_paths
4709 : *
4710 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4711 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4712 : * paths on top and add a final unique/aggregate path to remove any duplicate
4713 : * produced from combining rows from parallel workers.
4714 : */
4715 : static void
595 drowley 4716 CBC 970 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
595 drowley 4717 ECB : RelOptInfo *final_distinct_rel)
4718 : {
4719 : RelOptInfo *partial_distinct_rel;
4720 : Query *parse;
4721 : List *distinctExprs;
4722 : double numDistinctRows;
4723 : Path *cheapest_partial_path;
4724 : ListCell *lc;
4725 :
4726 : /* nothing to do when there are no partial paths in the input rel */
595 drowley 4727 GIC 970 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4728 925 : return;
4729 :
4730 45 : parse = root->parse;
4731 :
4732 : /* can't do parallel DISTINCT ON */
4733 45 : if (parse->hasDistinctOn)
595 drowley 4734 UIC 0 : return;
4735 :
595 drowley 4736 CBC 45 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
595 drowley 4737 ECB : NULL);
595 drowley 4738 CBC 45 : partial_distinct_rel->reltarget = root->upper_targets[UPPERREL_PARTIAL_DISTINCT];
595 drowley 4739 GIC 45 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4740 :
595 drowley 4741 ECB : /*
4742 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4743 : */
595 drowley 4744 GIC 45 : partial_distinct_rel->serverid = input_rel->serverid;
4745 45 : partial_distinct_rel->userid = input_rel->userid;
4746 45 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4747 45 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4748 :
4749 45 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4750 :
81 tgl 4751 GNC 45 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4752 : parse->targetList);
4753 :
4754 : /* estimate how many distinct rows we'll get from each worker */
595 drowley 4755 GIC 45 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4756 : cheapest_partial_path->rows,
4757 : NULL, NULL);
4758 :
4759 : /*
4760 : * Try sorting the cheapest path and incrementally sorting any paths with
4761 : * presorted keys and put a unique paths atop of those.
4762 : */
81 tgl 4763 GNC 45 : if (grouping_is_sortable(root->processed_distinctClause))
595 drowley 4764 ECB : {
595 drowley 4765 GIC 93 : foreach(lc, input_rel->partial_pathlist)
4766 : {
88 drowley 4767 GNC 48 : Path *input_path = (Path *) lfirst(lc);
4768 : Path *sorted_path;
4769 : bool is_sorted;
4770 : int presorted_keys;
4771 :
4772 48 : is_sorted = pathkeys_count_contained_in(root->distinct_pathkeys,
4773 : input_path->pathkeys,
4774 : &presorted_keys);
4775 :
4776 48 : if (is_sorted)
88 drowley 4777 UNC 0 : sorted_path = input_path;
4778 : else
4779 : {
4780 : /*
4781 : * Try at least sorting the cheapest path and also try
4782 : * incrementally sorting any path which is partially sorted
4783 : * already (no need to deal with paths which have presorted
4784 : * keys when incremental sort is disabled unless it's the
4785 : * cheapest partial path).
4786 : */
88 drowley 4787 GNC 48 : if (input_path != cheapest_partial_path &&
4788 3 : (presorted_keys == 0 || !enable_incremental_sort))
88 drowley 4789 UNC 0 : continue;
4790 :
4791 : /*
4792 : * We've no need to consider both a sort and incremental sort.
4793 : * We'll just do a sort if there are no presorted keys and an
4794 : * incremental sort when there are presorted keys.
4795 : */
88 drowley 4796 GNC 48 : if (presorted_keys == 0 || !enable_incremental_sort)
4797 45 : sorted_path = (Path *) create_sort_path(root,
4798 : partial_distinct_rel,
4799 : input_path,
4800 : root->distinct_pathkeys,
4801 : -1.0);
4802 : else
4803 3 : sorted_path = (Path *) create_incremental_sort_path(root,
4804 : partial_distinct_rel,
4805 : input_path,
4806 : root->distinct_pathkeys,
4807 : presorted_keys,
4808 : -1.0);
4809 : }
4810 :
4811 48 : add_partial_path(partial_distinct_rel, (Path *)
4812 48 : create_upper_unique_path(root, partial_distinct_rel,
4813 : sorted_path,
4814 48 : list_length(root->distinct_pathkeys),
4815 : numDistinctRows));
4816 : }
4817 : }
4818 :
4819 : /*
4820 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
595 drowley 4821 ECB : * we're not on the hook to ensure we do our best to create at least one
595 drowley 4822 EUB : * path here, we treat enable_hashagg as a hard off-switch rather than the
4823 : * slightly softer variant in create_final_distinct_paths.
4824 : */
81 tgl 4825 GNC 45 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4826 : {
595 drowley 4827 GIC 36 : add_partial_path(partial_distinct_rel, (Path *)
595 drowley 4828 CBC 36 : create_agg_path(root,
595 drowley 4829 ECB : partial_distinct_rel,
4830 : cheapest_partial_path,
4831 : cheapest_partial_path->pathtarget,
4832 : AGG_HASHED,
4833 : AGGSPLIT_SIMPLE,
4834 : root->processed_distinctClause,
4835 : NIL,
4836 : NULL,
4837 : numDistinctRows));
4838 : }
4839 :
4840 : /*
4841 : * If there is an FDW that's responsible for all baserels of the query,
4842 : * let it consider adding ForeignPaths.
4843 : */
595 drowley 4844 CBC 45 : if (partial_distinct_rel->fdwroutine &&
595 drowley 4845 LBC 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4846 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
595 drowley 4847 ECB : UPPERREL_PARTIAL_DISTINCT,
4848 : input_rel,
4849 : partial_distinct_rel,
4850 : NULL);
4851 :
4852 : /* Let extensions possibly add some more partial paths */
595 drowley 4853 GIC 45 : if (create_upper_paths_hook)
595 drowley 4854 UIC 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4855 : input_rel, partial_distinct_rel, NULL);
4856 :
595 drowley 4857 GIC 45 : if (partial_distinct_rel->partial_pathlist != NIL)
4858 : {
4859 45 : generate_gather_paths(root, partial_distinct_rel, true);
595 drowley 4860 CBC 45 : set_cheapest(partial_distinct_rel);
595 drowley 4861 ECB :
4862 : /*
4863 : * Finally, create paths to distinctify the final result. This step
4864 : * is needed to remove any duplicates due to combining rows from
4865 : * parallel workers.
4866 : */
595 drowley 4867 CBC 45 : create_final_distinct_paths(root, partial_distinct_rel,
595 drowley 4868 EUB : final_distinct_rel);
4869 : }
4870 : }
4871 :
595 drowley 4872 ECB : /*
4873 : * create_final_distinct_paths
4874 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
4875 : *
4876 : * input_rel: contains the source-data paths
4877 : * distinct_rel: destination relation for storing created paths
4878 : */
4879 : static RelOptInfo *
595 drowley 4880 GIC 1015 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4881 : RelOptInfo *distinct_rel)
4882 : {
4883 1015 : Query *parse = root->parse;
4884 1015 : Path *cheapest_input_path = input_rel->cheapest_total_path;
4885 : double numDistinctRows;
4886 : bool allow_hash;
595 drowley 4887 ECB :
4888 : /* Estimate number of distinct rows there will be */
2589 tgl 4889 GIC 1015 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4890 996 : root->hasHavingQual)
4891 : {
4892 : /*
4893 : * If there was grouping or aggregation, use the number of input rows
4894 : * as the estimated number of DISTINCT rows (ie, assume the input is
4895 : * already mostly unique).
4896 : */
2589 tgl 4897 CBC 19 : numDistinctRows = cheapest_input_path->rows;
4898 : }
4899 : else
4900 : {
4901 : /*
4902 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4903 : */
4904 : List *distinctExprs;
4905 :
81 tgl 4906 GNC 996 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4907 : parse->targetList);
2589 tgl 4908 GIC 996 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4909 : cheapest_input_path->rows,
4910 : NULL, NULL);
4911 : }
4912 :
4913 : /*
2589 tgl 4914 ECB : * Consider sort-based implementations of DISTINCT, if possible.
4915 : */
81 tgl 4916 GNC 1015 : if (grouping_is_sortable(root->processed_distinctClause))
4917 : {
2589 tgl 4918 ECB : /*
4919 : * Firstly, if we have any adequately-presorted paths, just stick a
4920 : * Unique node on those. We also, consider doing an explicit sort of
4921 : * the cheapest input path and Unique'ing that. If any paths have
4922 : * presorted keys then we'll create an incremental sort atop of those
4923 : * before adding a unique node on the top.
4924 : *
4925 : * When we have DISTINCT ON, we must sort by the more rigorous of
4926 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
4927 : * Also, if we do have to do an explicit sort, we might as well use
4928 : * the more rigorous ordering to avoid a second sort later. (Note
4929 : * that the parser will have ensured that one clause is a prefix of
4930 : * the other.)
4931 : */
4932 : List *needed_pathkeys;
4933 : ListCell *lc;
88 drowley 4934 GNC 1012 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
4935 :
2589 tgl 4936 GIC 1088 : if (parse->hasDistinctOn &&
2589 tgl 4937 CBC 76 : list_length(root->distinct_pathkeys) <
2589 tgl 4938 GIC 76 : list_length(root->sort_pathkeys))
4939 21 : needed_pathkeys = root->sort_pathkeys;
4940 : else
4941 991 : needed_pathkeys = root->distinct_pathkeys;
4942 :
2589 tgl 4943 CBC 2423 : foreach(lc, input_rel->pathlist)
2589 tgl 4944 ECB : {
88 drowley 4945 GNC 1411 : Path *input_path = (Path *) lfirst(lc);
4946 : Path *sorted_path;
4947 : bool is_sorted;
4948 : int presorted_keys;
4949 :
4950 1411 : is_sorted = pathkeys_count_contained_in(needed_pathkeys,
4951 : input_path->pathkeys,
4952 : &presorted_keys);
4953 :
4954 1411 : if (is_sorted)
4955 273 : sorted_path = input_path;
4956 : else
4957 : {
4958 : /*
4959 : * Try at least sorting the cheapest path and also try
4960 : * incrementally sorting any path which is partially sorted
4961 : * already (no need to deal with paths which have presorted
4962 : * keys when incremental sort is disabled unless it's the
4963 : * cheapest input path).
4964 : */
4965 1138 : if (input_path != cheapest_input_path &&
4966 196 : (presorted_keys == 0 || !enable_incremental_sort))
4967 53 : continue;
4968 :
4969 : /*
4970 : * We've no need to consider both a sort and incremental sort.
4971 : * We'll just do a sort if there are no presorted keys and an
4972 : * incremental sort when there are presorted keys.
4973 : */
4974 1085 : if (presorted_keys == 0 || !enable_incremental_sort)
4975 933 : sorted_path = (Path *) create_sort_path(root,
4976 : distinct_rel,
4977 : input_path,
4978 : needed_pathkeys,
4979 : limittuples);
4980 : else
4981 152 : sorted_path = (Path *) create_incremental_sort_path(root,
4982 : distinct_rel,
4983 : input_path,
4984 : needed_pathkeys,
4985 : presorted_keys,
4986 : limittuples);
4987 : }
4988 :
4989 : /*
4990 : * distinct_pathkeys may have become empty if all of the pathkeys
4991 : * were determined to be redundant. If all of the pathkeys are
4992 : * redundant then each DISTINCT target must only allow a single
4993 : * value, therefore all resulting tuples must be identical (or at
4994 : * least indistinguishable by an equality check). We can uniquify
4995 : * these tuples simply by just taking the first tuple. All we do
4996 : * here is add a path to do "LIMIT 1" atop of 'sorted_path'. When
4997 : * doing a DISTINCT ON we may still have a non-NIL sort_pathkeys
4998 : * list, so we must still only do this with paths which are
4999 : * correctly sorted by sort_pathkeys.
5000 : */
5001 1358 : if (root->distinct_pathkeys == NIL)
5002 : {
5003 : Node *limitCount;
5004 :
5005 40 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5006 : sizeof(int64),
5007 : Int64GetDatum(1), false,
5008 : FLOAT8PASSBYVAL);
5009 :
5010 : /*
5011 : * If the query already has a LIMIT clause, then we could end
5012 : * up with a duplicate LimitPath in the final plan. That does
5013 : * not seem worth troubling over too much.
5014 : */
5015 40 : add_path(distinct_rel, (Path *)
5016 40 : create_limit_path(root, distinct_rel, sorted_path,
5017 : NULL, limitCount,
5018 : LIMIT_OPTION_COUNT, 0, 1));
5019 : }
5020 : else
5021 : {
88 drowley 5022 GIC 1318 : add_path(distinct_rel, (Path *)
5023 1318 : create_upper_unique_path(root, distinct_rel,
5024 : sorted_path,
5025 1318 : list_length(root->distinct_pathkeys),
5026 : numDistinctRows));
88 drowley 5027 ECB : }
5028 : }
2589 tgl 5029 : }
5030 :
5031 : /*
5032 : * Consider hash-based implementations of DISTINCT, if possible.
5033 : *
5034 : * If we were not able to make any other types of path, we *must* hash or
986 pg 5035 : * die trying. If we do have other choices, there are two things that
5036 : * should prevent selection of hashing: if the query uses DISTINCT ON
5037 : * (because it won't really have the expected behavior if we hash), or if
5038 : * enable_hashagg is off.
5360 tgl 5039 : *
5040 : * Note: grouping_is_hashable() is much more expensive to check than the
5041 : * other gating conditions, so we want to do it last.
5042 : */
2589 tgl 5043 GIC 1015 : if (distinct_rel->pathlist == NIL)
5044 3 : allow_hash = true; /* we have no alternatives */
5045 1012 : else if (parse->hasDistinctOn || !enable_hashagg)
2589 tgl 5046 CBC 118 : allow_hash = false; /* policy-based decision not to hash */
2589 tgl 5047 ECB : else
986 pg 5048 GIC 894 : allow_hash = true; /* default */
5049 :
81 tgl 5050 GNC 1015 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5360 tgl 5051 ECB : {
5052 : /* Generate hashed aggregate path --- no sort needed */
2589 tgl 5053 GIC 897 : add_path(distinct_rel, (Path *)
5054 897 : create_agg_path(root,
2589 tgl 5055 ECB : distinct_rel,
5056 : cheapest_input_path,
5057 : cheapest_input_path->pathtarget,
5058 : AGG_HASHED,
5059 : AGGSPLIT_SIMPLE,
5060 : root->processed_distinctClause,
5061 : NIL,
5062 : NULL,
5063 : numDistinctRows));
5064 : }
5065 :
2589 tgl 5066 GIC 1015 : return distinct_rel;
5067 : }
5068 :
2589 tgl 5069 ECB : /*
5070 : * create_ordered_paths
5071 : *
5072 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5073 : *
5074 : * All paths in the result must satisfy the ORDER BY ordering.
5075 : * The only new paths we need consider are an explicit full sort
5076 : * and incremental sort on the cheapest-total existing path.
5077 : *
5078 : * input_rel: contains the source-data Paths
5079 : * target: the output tlist the result Paths must emit
5080 : * limit_tuples: estimated bound on the number of output tuples,
5081 : * or -1 if no LIMIT or couldn't estimate
5082 : *
1098 tomas.vondra 5083 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5084 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5085 : */
5086 : static RelOptInfo *
2589 tgl 5087 GIC 24802 : create_ordered_paths(PlannerInfo *root,
2589 tgl 5088 ECB : RelOptInfo *input_rel,
2585 5089 : PathTarget *target,
1858 rhaas 5090 : bool target_parallel_safe,
2589 tgl 5091 : double limit_tuples)
5092 : {
2589 tgl 5093 GIC 24802 : Path *cheapest_input_path = input_rel->cheapest_total_path;
2589 tgl 5094 ECB : RelOptInfo *ordered_rel;
5095 : ListCell *lc;
5096 :
5097 : /* For now, do all work in the (ORDERED, NULL) upperrel */
2589 tgl 5098 GIC 24802 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5099 :
2473 rhaas 5100 ECB : /*
2473 rhaas 5101 EUB : * If the input relation is not parallel-safe, then the ordered relation
5102 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5103 : * target list is parallel-safe.
5104 : */
1858 rhaas 5105 GIC 24802 : if (input_rel->consider_parallel && target_parallel_safe)
2473 5106 15917 : ordered_rel->consider_parallel = true;
5107 :
5108 : /*
5109 : * If the input rel belongs to a single FDW, so does the ordered_rel.
2473 tgl 5110 ECB : */
2473 tgl 5111 CBC 24802 : ordered_rel->serverid = input_rel->serverid;
2459 5112 24802 : ordered_rel->userid = input_rel->userid;
2459 tgl 5113 GIC 24802 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
2473 5114 24802 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5115 :
2589 5116 60924 : foreach(lc, input_rel->pathlist)
5117 : {
1098 tomas.vondra 5118 36122 : Path *input_path = (Path *) lfirst(lc);
5119 : Path *sorted_path;
2589 tgl 5120 EUB : bool is_sorted;
5121 : int presorted_keys;
5122 :
1098 tomas.vondra 5123 GIC 36122 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
1060 tgl 5124 ECB : input_path->pathkeys, &presorted_keys);
5125 :
1098 tomas.vondra 5126 CBC 36122 : if (is_sorted)
114 drowley 5127 GNC 12319 : sorted_path = input_path;
5128 : else
5129 : {
5130 : /*
5131 : * Try at least sorting the cheapest path and also try
5132 : * incrementally sorting any path which is partially sorted
5133 : * already (no need to deal with paths which have presorted keys
5134 : * when incremental sort is disabled unless it's the cheapest
5135 : * input path).
5136 : */
5137 23803 : if (input_path != cheapest_input_path &&
5138 2694 : (presorted_keys == 0 || !enable_incremental_sort))
5139 989 : continue;
5140 :
5141 : /*
5142 : * We've no need to consider both a sort and incremental sort.
5143 : * We'll just do a sort if there are no presorted keys and an
5144 : * incremental sort when there are presorted keys.
5145 : */
5146 22814 : if (presorted_keys == 0 || !enable_incremental_sort)
1098 tomas.vondra 5147 GIC 20884 : sorted_path = (Path *) create_sort_path(root,
1098 tomas.vondra 5148 ECB : ordered_rel,
5149 : input_path,
5150 : root->sort_pathkeys,
5151 : limit_tuples);
5152 : else
114 drowley 5153 GNC 1930 : sorted_path = (Path *) create_incremental_sort_path(root,
5154 : ordered_rel,
5155 : input_path,
5156 : root->sort_pathkeys,
5157 : presorted_keys,
5158 : limit_tuples);
114 drowley 5159 ECB : }
5160 :
5161 : /* Add projection step if needed */
114 drowley 5162 GNC 35133 : if (sorted_path->pathtarget != target)
5163 7102 : sorted_path = apply_projection_to_path(root, ordered_rel,
5164 : sorted_path, target);
5165 :
5166 35133 : add_path(ordered_rel, sorted_path);
5167 : }
5168 :
5169 : /*
5170 : * generate_gather_paths() will have already generated a simple Gather
2222 rhaas 5171 ECB : * path for the best parallel path, if any, and the loop above will have
5172 : * considered sorting it. Similarly, generate_gather_paths() will also
5173 : * have generated order-preserving Gather Merge plans which can be used
5174 : * without sorting if they happen to match the sort_pathkeys, and the loop
5175 : * above will have handled those as well. However, there's one more
2222 rhaas 5176 EUB : * possibility: it may make sense to sort the cheapest partial path
5177 : * according to the required output order and then use Gather Merge.
5178 : */
2222 rhaas 5179 GIC 24802 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5180 15850 : input_rel->partial_pathlist != NIL)
5181 : {
5182 : Path *cheapest_partial_path;
5183 :
5184 1007 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5185 :
2222 rhaas 5186 ECB : /*
5187 : * If cheapest partial path doesn't need a sort, this is redundant
2222 rhaas 5188 EUB : * with what's already been tried.
5189 : */
2222 rhaas 5190 GIC 1007 : if (!pathkeys_contained_in(root->sort_pathkeys,
5191 : cheapest_partial_path->pathkeys))
5192 : {
5193 : Path *path;
5194 : double total_groups;
2222 rhaas 5195 ECB :
2222 rhaas 5196 CBC 1001 : path = (Path *) create_sort_path(root,
5197 : ordered_rel,
5198 : cheapest_partial_path,
5199 : root->sort_pathkeys,
5200 : limit_tuples);
5201 :
5202 1001 : total_groups = cheapest_partial_path->rows *
2222 rhaas 5203 GIC 1001 : cheapest_partial_path->parallel_workers;
5204 : path = (Path *)
5205 1001 : create_gather_merge_path(root, ordered_rel,
5206 : path,
5207 : path->pathtarget,
5208 : root->sort_pathkeys, NULL,
5209 : &total_groups);
2222 rhaas 5210 ECB :
5211 : /* Add projection step if needed */
2222 rhaas 5212 GIC 1001 : if (path->pathtarget != target)
2222 rhaas 5213 CBC 198 : path = apply_projection_to_path(root, ordered_rel,
5214 : path, target);
5215 :
2222 rhaas 5216 GIC 1001 : add_path(ordered_rel, path);
5217 : }
5218 :
5219 : /*
5220 : * Consider incremental sort with a gather merge on partial paths.
5221 : *
5222 : * We can also skip the entire loop when we only have a single-item
5223 : * sort_pathkeys because then we can't possibly have a presorted
1097 tomas.vondra 5224 ECB : * prefix of the list without having the list be fully sorted.
5225 : */
1008 peter 5226 CBC 1007 : if (enable_incremental_sort && list_length(root->sort_pathkeys) > 1)
1097 tomas.vondra 5227 ECB : {
1097 tomas.vondra 5228 GIC 736 : foreach(lc, input_rel->partial_pathlist)
5229 : {
5230 377 : Path *input_path = (Path *) lfirst(lc);
5231 : Path *sorted_path;
5232 : bool is_sorted;
5233 : int presorted_keys;
5234 : double total_groups;
5235 :
5236 : /*
5237 : * We don't care if this is the cheapest partial path - we
5238 : * can't simply skip it, because it may be partially sorted in
5239 : * which case we want to consider adding incremental sort
5240 : * (instead of full sort, which is what happens above).
1097 tomas.vondra 5241 ECB : */
1097 tomas.vondra 5242 EUB :
1097 tomas.vondra 5243 GBC 377 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5244 : input_path->pathkeys,
5245 : &presorted_keys);
5246 :
5247 : /* No point in adding incremental sort on fully sorted paths. */
1097 tomas.vondra 5248 GIC 377 : if (is_sorted)
5249 353 : continue;
1097 tomas.vondra 5250 ECB :
1097 tomas.vondra 5251 GBC 377 : if (presorted_keys == 0)
1097 tomas.vondra 5252 GIC 353 : continue;
5253 :
1097 tomas.vondra 5254 ECB : /* Since we have presorted keys, consider incremental sort. */
1097 tomas.vondra 5255 GIC 24 : sorted_path = (Path *) create_incremental_sort_path(root,
1097 tomas.vondra 5256 ECB : ordered_rel,
5257 : input_path,
5258 : root->sort_pathkeys,
5259 : presorted_keys,
5260 : limit_tuples);
1097 tomas.vondra 5261 GIC 24 : total_groups = input_path->rows *
5262 24 : input_path->parallel_workers;
5263 : sorted_path = (Path *)
1097 tomas.vondra 5264 CBC 24 : create_gather_merge_path(root, ordered_rel,
5265 : sorted_path,
5266 : sorted_path->pathtarget,
5267 : root->sort_pathkeys, NULL,
5268 : &total_groups);
5269 :
5270 : /* Add projection step if needed */
1097 tomas.vondra 5271 GIC 24 : if (sorted_path->pathtarget != target)
5272 18 : sorted_path = apply_projection_to_path(root, ordered_rel,
5273 : sorted_path, target);
5274 :
5275 24 : add_path(ordered_rel, sorted_path);
5276 : }
1097 tomas.vondra 5277 ECB : }
5278 : }
5279 :
2473 tgl 5280 : /*
5281 : * If there is an FDW that's responsible for all baserels of the query,
5282 : * let it consider adding ForeignPaths.
5283 : */
2473 tgl 5284 GIC 24802 : if (ordered_rel->fdwroutine &&
5285 172 : ordered_rel->fdwroutine->GetForeignUpperPaths)
2473 tgl 5286 CBC 165 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
1833 rhaas 5287 ECB : input_rel, ordered_rel,
5288 : NULL);
5289 :
5290 : /* Let extensions possibly add some more paths */
2553 tgl 5291 GIC 24802 : if (create_upper_paths_hook)
2553 tgl 5292 UIC 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5293 : input_rel, ordered_rel, NULL);
2553 tgl 5294 ECB :
5295 : /*
5296 : * No need to bother with set_cheapest here; grouping_planner does not
5297 : * need us to do it.
5298 : */
2589 tgl 5299 GIC 24802 : Assert(ordered_rel->pathlist != NIL);
5300 :
5301 24802 : return ordered_rel;
5302 : }
5360 tgl 5303 ECB :
5304 :
4806 5305 : /*
5306 : * make_group_input_target
5307 : * Generate appropriate PathTarget for initial input to grouping nodes.
5308 : *
5309 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5310 : * the query's final targetlist; for example, it certainly can't emit any
5311 : * aggregate function calls. This routine generates the correct target
5312 : * for the scan/join subplan.
8742 5313 : *
5314 : * The query target list passed from the parser already contains entries
5315 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5316 : * for variables used only in HAVING clauses; so we need to add those
5317 : * variables to the subplan target list. Also, we flatten all expressions
5318 : * except GROUP BY items into their component variables; other expressions
5319 : * will be computed by the upper plan nodes rather than by the subplan.
5320 : * For example, given a query like
5321 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5322 : * we want to pass this targetlist to the subplan:
5323 : * a+b,c,d
5324 : * where the a+b target will be used by the Sort/Group steps, and the
5325 : * other targets will be used for computing the final results.
5326 : *
5327 : * 'final_target' is the query's final target list (in PathTarget form)
5328 : *
5329 : * The result is the PathTarget to be computed by the Paths returned from
5330 : * query_planner().
5331 : */
5332 : static PathTarget *
2585 tgl 5333 CBC 16240 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
8742 tgl 5334 ECB : {
6517 tgl 5335 CBC 16240 : Query *parse = root->parse;
2585 tgl 5336 ECB : PathTarget *input_target;
5337 : List *non_group_cols;
4285 5338 : List *non_group_vars;
5339 : int i;
2585 5340 : ListCell *lc;
5341 :
8720 bruce 5342 : /*
5343 : * We must build a target containing all grouping columns, plus any other
5344 : * Vars mentioned in the query's targetlist and HAVING qual.
5345 : */
2585 tgl 5346 GIC 16240 : input_target = create_empty_pathtarget();
4285 tgl 5347 CBC 16240 : non_group_cols = NIL;
5348 :
2585 tgl 5349 GIC 16240 : i = 0;
5350 39336 : foreach(lc, final_target->exprs)
8742 tgl 5351 ECB : {
2585 tgl 5352 CBC 23096 : Expr *expr = (Expr *) lfirst(lc);
2491 tgl 5353 GIC 23096 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5354 :
81 tgl 5355 GNC 26900 : if (sgref && root->processed_groupClause &&
5356 3804 : get_sortgroupref_clause_noerr(sgref,
5357 : root->processed_groupClause) != NULL)
5358 : {
5359 : /*
5360 : * It's a grouping column, so add it to the input target as-is.
5361 : */
2585 tgl 5362 GIC 3053 : add_column_to_pathtarget(input_target, expr, sgref);
2588 tgl 5363 ECB : }
5364 : else
5365 : {
5366 : /*
5367 : * Non-grouping column, so just remember the expression for later
5368 : * call to pull_var_clause.
5369 : */
2585 tgl 5370 GIC 20043 : non_group_cols = lappend(non_group_cols, expr);
5371 : }
2585 tgl 5372 ECB :
2585 tgl 5373 CBC 23096 : i++;
5374 : }
5375 :
5376 : /*
5377 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5378 : */
4285 5379 16240 : if (parse->havingQual)
4285 tgl 5380 GIC 254 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5381 :
5382 : /*
5383 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5384 : * add them to the input target if not already present. (A Var used
5385 : * directly as a GROUP BY item will be present already.) Note this
5386 : * includes Vars used in resjunk items, so we are covering the needs of
5387 : * ORDER BY and window specifications. Vars used within Aggrefs and
5388 : * WindowFuncs will be pulled out here, too.
5389 : */
5390 16240 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5391 : PVC_RECURSE_AGGREGATES |
5392 : PVC_RECURSE_WINDOWFUNCS |
5393 : PVC_INCLUDE_PLACEHOLDERS);
2585 5394 16240 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5395 :
5396 : /* clean up cruft */
4285 5397 16240 : list_free(non_group_vars);
5398 16240 : list_free(non_group_cols);
8742 tgl 5399 ECB :
5400 : /* XXX this causes some redundant cost calculation ... */
2585 tgl 5401 GIC 16240 : return set_pathtarget_cost_width(root, input_target);
5402 : }
8742 tgl 5403 ECB :
5404 : /*
5405 : * make_partial_grouping_target
5406 : * Generate appropriate PathTarget for output of partial aggregate
5407 : * (or partial grouping, if there are no aggregates) nodes.
5408 : *
5409 : * A partial aggregation node needs to emit all the same aggregates that
5410 : * a regular aggregation node would, plus any aggregates used in HAVING;
5411 : * except that the Aggref nodes should be marked as partial aggregates.
5412 : *
1343 michael 5413 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
2478 tgl 5414 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5415 : * these would be Vars that are grouped by or used in grouping expressions.)
5416 : *
5417 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5418 : * havingQual represents the HAVING clause.
5419 : */
2575 rhaas 5420 : static PathTarget *
1851 rhaas 5421 CBC 1040 : make_partial_grouping_target(PlannerInfo *root,
5422 : PathTarget *grouping_target,
1851 rhaas 5423 ECB : Node *havingQual)
5424 : {
5425 : PathTarget *partial_target;
5426 : List *non_group_cols;
5427 : List *non_group_exprs;
5428 : int i;
5429 : ListCell *lc;
5430 :
2478 tgl 5431 GIC 1040 : partial_target = create_empty_pathtarget();
2575 rhaas 5432 1040 : non_group_cols = NIL;
5433 :
5434 1040 : i = 0;
2478 tgl 5435 3741 : foreach(lc, grouping_target->exprs)
5436 : {
2575 rhaas 5437 2701 : Expr *expr = (Expr *) lfirst(lc);
2478 tgl 5438 2701 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5439 :
81 tgl 5440 GNC 4558 : if (sgref && root->processed_groupClause &&
5441 1857 : get_sortgroupref_clause_noerr(sgref,
5442 : root->processed_groupClause) != NULL)
2575 rhaas 5443 ECB : {
5444 : /*
5445 : * It's a grouping column, so add it to the partial_target as-is.
2478 tgl 5446 : * (This allows the upper agg step to repeat the grouping calcs.)
5447 : */
2478 tgl 5448 CBC 918 : add_column_to_pathtarget(partial_target, expr, sgref);
5449 : }
5450 : else
2575 rhaas 5451 ECB : {
5452 : /*
5453 : * Non-grouping column, so just remember the expression for later
5454 : * call to pull_var_clause.
5455 : */
2575 rhaas 5456 GIC 1783 : non_group_cols = lappend(non_group_cols, expr);
5457 : }
5458 :
5459 2701 : i++;
5460 : }
5461 :
5462 : /*
5463 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
2575 rhaas 5464 ECB : */
1851 rhaas 5465 GIC 1040 : if (havingQual)
5466 412 : non_group_cols = lappend(non_group_cols, havingQual);
5467 :
5468 : /*
5469 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5470 : * non-group cols (plus HAVING), and add them to the partial_target if not
5471 : * already present. (An expression used directly as a GROUP BY item will
5472 : * be present already.) Note this includes Vars used in resjunk items, so
5473 : * we are covering the needs of ORDER BY and window specifications.
5474 : */
2575 5475 1040 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5476 : PVC_INCLUDE_AGGREGATES |
5477 : PVC_RECURSE_WINDOWFUNCS |
5478 : PVC_INCLUDE_PLACEHOLDERS);
5479 :
2478 tgl 5480 1040 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5481 :
5482 : /*
5483 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5484 : * are at the top level of the target list, so we can just scan the list
2478 tgl 5485 ECB : * rather than recursing through the expression trees.
5486 : */
2478 tgl 5487 GIC 4045 : foreach(lc, partial_target->exprs)
5488 : {
5489 3005 : Aggref *aggref = (Aggref *) lfirst(lc);
5490 :
2478 tgl 5491 CBC 3005 : if (IsA(aggref, Aggref))
5492 : {
5493 : Aggref *newaggref;
5494 :
5495 : /*
2478 tgl 5496 ECB : * We shouldn't need to copy the substructure of the Aggref node,
5497 : * but flat-copy the node itself to avoid damaging other trees.
5498 : */
2478 tgl 5499 GIC 2072 : newaggref = makeNode(Aggref);
5500 2072 : memcpy(newaggref, aggref, sizeof(Aggref));
5501 :
5502 : /* For now, assume serialization is required */
2478 tgl 5503 CBC 2072 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
2478 tgl 5504 ECB :
2478 tgl 5505 GIC 2072 : lfirst(lc) = newaggref;
5506 : }
5507 : }
5508 :
2575 rhaas 5509 ECB : /* clean up cruft */
2575 rhaas 5510 CBC 1040 : list_free(non_group_exprs);
5511 1040 : list_free(non_group_cols);
2575 rhaas 5512 ECB :
5513 : /* XXX this causes some redundant cost calculation ... */
2478 tgl 5514 CBC 1040 : return set_pathtarget_cost_width(root, partial_target);
5515 : }
2478 tgl 5516 ECB :
5517 : /*
5518 : * mark_partial_aggref
5519 : * Adjust an Aggref to make it represent a partial-aggregation step.
5520 : *
5521 : * The Aggref node is modified in-place; caller must do any copying required.
5522 : */
5523 : void
2478 tgl 5524 CBC 3462 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
2478 tgl 5525 ECB : {
5526 : /* aggtranstype should be computed by this point */
2478 tgl 5527 GIC 3462 : Assert(OidIsValid(agg->aggtranstype));
5528 : /* ... but aggsplit should still be as the parser left it */
5529 3462 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5530 :
5531 : /* Mark the Aggref with the intended partial-aggregation mode */
5532 3462 : agg->aggsplit = aggsplit;
5533 :
5534 : /*
2478 tgl 5535 ECB : * Adjust result type if needed. Normally, a partial aggregate returns
5536 : * the aggregate's transition type; but if that's INTERNAL and we're
5537 : * serializing, it returns BYTEA instead.
5538 : */
2478 tgl 5539 GIC 3462 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5540 : {
5541 2767 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5542 121 : agg->aggtype = BYTEAOID;
5543 : else
2478 tgl 5544 CBC 2646 : agg->aggtype = agg->aggtranstype;
2478 tgl 5545 ECB : }
2575 rhaas 5546 GIC 3462 : }
5547 :
5548 : /*
5549 : * postprocess_setop_tlist
5550 : * Fix up targetlist returned by plan_set_operations().
8186 tgl 5551 ECB : *
5552 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5553 : * NOTE: this would not be good enough if we supported resjunk sort keys
5554 : * for results of set operations --- then, we'd need to project a whole
5555 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5556 : * find any resjunk columns in orig_tlist.
5557 : */
5558 : static List *
8186 tgl 5559 GIC 2588 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
8186 tgl 5560 ECB : {
6892 neilc 5561 : ListCell *l;
6892 neilc 5562 GIC 2588 : ListCell *orig_tlist_item = list_head(orig_tlist);
5563 :
8186 tgl 5564 CBC 9365 : foreach(l, new_tlist)
5565 : {
2042 tgl 5566 GIC 6777 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5567 : TargetEntry *orig_tle;
5568 :
5569 : /* ignore resjunk columns in setop result */
6577 5570 6777 : if (new_tle->resjunk)
8186 5571 258 : continue;
5572 :
6892 neilc 5573 6519 : Assert(orig_tlist_item != NULL);
2042 tgl 5574 6519 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
1364 5575 6519 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
6385 bruce 5576 6519 : if (orig_tle->resjunk) /* should not happen */
7198 tgl 5577 LBC 0 : elog(ERROR, "resjunk output columns are not implemented");
6577 tgl 5578 CBC 6519 : Assert(new_tle->resno == orig_tle->resno);
6577 tgl 5579 GIC 6519 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5580 : }
6892 neilc 5581 2588 : if (orig_tlist_item != NULL)
7198 tgl 5582 LBC 0 : elog(ERROR, "resjunk output columns are not implemented");
8186 tgl 5583 GIC 2588 : return new_tlist;
5584 : }
5585 :
5586 : /*
5587 : * optimize_window_clauses
5588 : * Call each WindowFunc's prosupport function to see if we're able to
5589 : * make any adjustments to any of the WindowClause's so that the executor
5590 : * can execute the window functions in a more optimal way.
5591 : *
5592 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5593 : * may allow more things to be done here in the future.
5594 : */
5595 : static void
107 drowley 5596 GNC 1017 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5597 : {
5598 1017 : List *windowClause = root->parse->windowClause;
5599 : ListCell *lc;
5600 :
5601 2136 : foreach(lc, windowClause)
5602 : {
5603 1119 : WindowClause *wc = lfirst_node(WindowClause, lc);
5604 : ListCell *lc2;
5605 1119 : int optimizedFrameOptions = 0;
5606 :
5607 1119 : Assert(wc->winref <= wflists->maxWinRef);
5608 :
5609 : /* skip any WindowClauses that have no WindowFuncs */
5610 1119 : if (wflists->windowFuncs[wc->winref] == NIL)
5611 12 : continue;
5612 :
5613 1347 : foreach(lc2, wflists->windowFuncs[wc->winref])
5614 : {
5615 : SupportRequestOptimizeWindowClause req;
5616 : SupportRequestOptimizeWindowClause *res;
5617 1125 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5618 : Oid prosupport;
5619 :
5620 1125 : prosupport = get_func_support(wfunc->winfnoid);
5621 :
5622 : /* Check if there's a support function for 'wfunc' */
5623 1125 : if (!OidIsValid(prosupport))
5624 885 : break; /* can't optimize this WindowClause */
5625 :
5626 338 : req.type = T_SupportRequestOptimizeWindowClause;
5627 338 : req.window_clause = wc;
5628 338 : req.window_func = wfunc;
5629 338 : req.frameOptions = wc->frameOptions;
5630 :
5631 : /* call the support function */
5632 : res = (SupportRequestOptimizeWindowClause *)
5633 338 : DatumGetPointer(OidFunctionCall1(prosupport,
5634 : PointerGetDatum(&req)));
5635 :
5636 : /*
5637 : * Skip to next WindowClause if the support function does not
5638 : * support this request type.
5639 : */
5640 338 : if (res == NULL)
5641 98 : break;
5642 :
5643 : /*
5644 : * Save these frameOptions for the first WindowFunc for this
5645 : * WindowClause.
5646 : */
5647 240 : if (foreach_current_index(lc2) == 0)
5648 228 : optimizedFrameOptions = res->frameOptions;
5649 :
5650 : /*
5651 : * On subsequent WindowFuncs, if the frameOptions are not the same
5652 : * then we're unable to optimize the frameOptions for this
5653 : * WindowClause.
5654 : */
5655 12 : else if (optimizedFrameOptions != res->frameOptions)
107 drowley 5656 UNC 0 : break; /* skip to the next WindowClause, if any */
5657 : }
5658 :
5659 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
107 drowley 5660 GNC 1107 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5661 : {
5662 : ListCell *lc3;
5663 :
5664 : /* apply the new frame options */
5665 222 : wc->frameOptions = optimizedFrameOptions;
5666 :
5667 : /*
5668 : * We now check to see if changing the frameOptions has caused
5669 : * this WindowClause to be a duplicate of some other WindowClause.
5670 : * This can only happen if we have multiple WindowClauses, so
5671 : * don't bother if there's only 1.
5672 : */
5673 222 : if (list_length(windowClause) == 1)
5674 177 : continue;
5675 :
5676 : /*
5677 : * Do the duplicate check and reuse the existing WindowClause if
5678 : * we find a duplicate.
5679 : */
5680 114 : foreach(lc3, windowClause)
5681 : {
5682 87 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5683 :
5684 : /* skip over the WindowClause we're currently editing */
5685 87 : if (existing_wc == wc)
5686 27 : continue;
5687 :
5688 : /*
5689 : * Perform the same duplicate check that is done in
5690 : * transformWindowFuncCall.
5691 : */
5692 120 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5693 60 : equal(wc->orderClause, existing_wc->orderClause) &&
5694 60 : wc->frameOptions == existing_wc->frameOptions &&
5695 36 : equal(wc->startOffset, existing_wc->startOffset) &&
5696 18 : equal(wc->endOffset, existing_wc->endOffset))
5697 : {
5698 : ListCell *lc4;
5699 :
5700 : /*
5701 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5702 : * This required adjusting each WindowFunc's winref and
5703 : * moving the WindowFuncs in 'wc' to the list of
5704 : * WindowFuncs in 'existing_wc'.
5705 : */
5706 39 : foreach(lc4, wflists->windowFuncs[wc->winref])
5707 : {
5708 21 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5709 :
5710 21 : wfunc->winref = existing_wc->winref;
5711 : }
5712 :
5713 : /* move list items */
5714 36 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5715 18 : wflists->windowFuncs[wc->winref]);
5716 18 : wflists->windowFuncs[wc->winref] = NIL;
5717 :
5718 : /*
5719 : * transformWindowFuncCall() should have made sure there
5720 : * are no other duplicates, so we needn't bother looking
5721 : * any further.
5722 : */
5723 18 : break;
5724 : }
5725 : }
5726 : }
5727 : }
5728 1017 : }
5729 :
5730 : /*
5731 : * select_active_windows
5215 tgl 5732 ECB : * Create a list of the "active" window clauses (ie, those referenced
5733 : * by non-deleted WindowFuncs) in the order they are to be executed.
5734 : */
5735 : static List *
5215 tgl 5736 GIC 1017 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
5737 : {
1668 rhodiumtoad 5738 CBC 1017 : List *windowClause = root->parse->windowClause;
1668 rhodiumtoad 5739 GIC 1017 : List *result = NIL;
5740 : ListCell *lc;
5741 1017 : int nActive = 0;
5742 1017 : WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData)
5743 1017 : * list_length(windowClause));
5215 tgl 5744 ECB :
1668 rhodiumtoad 5745 : /* First, construct an array of the active windows */
1668 rhodiumtoad 5746 GIC 2136 : foreach(lc, windowClause)
5215 tgl 5747 ECB : {
2042 tgl 5748 GIC 1119 : WindowClause *wc = lfirst_node(WindowClause, lc);
5749 :
5750 : /* It's only active if wflists shows some related WindowFuncs */
5215 5751 1119 : Assert(wc->winref <= wflists->maxWinRef);
1668 rhodiumtoad 5752 1119 : if (wflists->windowFuncs[wc->winref] == NIL)
5753 30 : continue;
1668 rhodiumtoad 5754 ECB :
1668 rhodiumtoad 5755 CBC 1089 : actives[nActive].wc = wc; /* original clause */
5756 :
5757 : /*
1668 rhodiumtoad 5758 ECB : * For sorting, we want the list of partition keys followed by the
5759 : * list of sort keys. But pathkeys construction will remove duplicates
5760 : * between the two, so we can as well (even though we can't detect all
5761 : * of the duplicates, since some may come from ECs - that might mean
5762 : * we miss optimization chances here). We must, however, ensure that
5763 : * the order of entries is preserved with respect to the ones we do
5764 : * keep.
5765 : *
5766 : * partitionClause and orderClause had their own duplicates removed in
5767 : * parse analysis, so we're only concerned here with removing
5768 : * orderClause entries that also appear in partitionClause.
5769 : */
1668 rhodiumtoad 5770 CBC 2178 : actives[nActive].uniqueOrder =
1668 rhodiumtoad 5771 GIC 1089 : list_concat_unique(list_copy(wc->partitionClause),
1668 rhodiumtoad 5772 CBC 1089 : wc->orderClause);
1668 rhodiumtoad 5773 GIC 1089 : nActive++;
5774 : }
5775 :
5776 : /*
5777 : * Sort active windows by their partitioning/ordering clauses, ignoring
5778 : * any framing clauses, so that the windows that need the same sorting are
5779 : * adjacent in the list. When we come to generate paths, this will avoid
5780 : * inserting additional Sort nodes.
5781 : *
5782 : * This is how we implement a specific requirement from the SQL standard,
5783 : * which says that when two or more windows are order-equivalent (i.e.
5784 : * have matching partition and order clauses, even if their names or
1668 rhodiumtoad 5785 ECB : * framing clauses differ), then all peer rows must be presented in the
5786 : * same order in all of them. If we allowed multiple sort nodes for such
5787 : * cases, we'd risk having the peer rows end up in different orders in
5788 : * equivalent windows due to sort instability. (See General Rule 4 of
5789 : * <window clause> in SQL2008 - SQL2016.)
5790 : *
5791 : * Additionally, if the entire list of clauses of one window is a prefix
5792 : * of another, put first the window with stronger sorting requirements.
5793 : * This way we will first sort for stronger window, and won't have to sort
5794 : * again for the weaker one.
5795 : */
1668 rhodiumtoad 5796 GIC 1017 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5215 tgl 5797 ECB :
5798 : /* build ordered list of the original WindowClause nodes */
1668 rhodiumtoad 5799 GIC 2106 : for (int i = 0; i < nActive; i++)
5800 1089 : result = lappend(result, actives[i].wc);
5801 :
5802 1017 : pfree(actives);
5215 tgl 5803 ECB :
1668 rhodiumtoad 5804 CBC 1017 : return result;
5805 : }
1668 rhodiumtoad 5806 ECB :
5807 : /*
5808 : * common_prefix_cmp
5809 : * QSort comparison function for WindowClauseSortData
5810 : *
5811 : * Sort the windows by the required sorting clauses. First, compare the sort
5812 : * clauses themselves. Second, if one window's clauses are a prefix of another
5813 : * one's clauses, put the window with more sort clauses first.
5814 : *
5815 : * We purposefully sort by the highest tleSortGroupRef first. Since
5816 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
5817 : * and because here we sort the lowest tleSortGroupRefs last, if a
5818 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
5819 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
5820 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
5821 : * reducing the total number of sorts required for the query.
5822 : */
5823 : static int
1668 rhodiumtoad 5824 GIC 78 : common_prefix_cmp(const void *a, const void *b)
1668 rhodiumtoad 5825 ECB : {
1668 rhodiumtoad 5826 GIC 78 : const WindowClauseSortData *wcsa = a;
5827 78 : const WindowClauseSortData *wcsb = b;
5828 : ListCell *item_a;
5829 : ListCell *item_b;
5830 :
5831 135 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5832 : {
5833 108 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
1668 rhodiumtoad 5834 CBC 108 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
1668 rhodiumtoad 5835 ECB :
1668 rhodiumtoad 5836 CBC 108 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
1668 rhodiumtoad 5837 GIC 51 : return -1;
5838 102 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5839 33 : return 1;
5840 69 : else if (sca->sortop > scb->sortop)
1668 rhodiumtoad 5841 LBC 0 : return -1;
1668 rhodiumtoad 5842 GBC 69 : else if (sca->sortop < scb->sortop)
1668 rhodiumtoad 5843 GIC 12 : return 1;
5844 57 : else if (sca->nulls_first && !scb->nulls_first)
1668 rhodiumtoad 5845 UIC 0 : return -1;
1668 rhodiumtoad 5846 GIC 57 : else if (!sca->nulls_first && scb->nulls_first)
1668 rhodiumtoad 5847 UIC 0 : return 1;
5848 : /* no need to compare eqop, since it is fully determined by sortop */
5215 tgl 5849 ECB : }
5850 :
1668 rhodiumtoad 5851 CBC 27 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
1668 rhodiumtoad 5852 GIC 3 : return -1;
5853 24 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5854 6 : return 1;
5855 :
5856 18 : return 0;
5857 : }
5858 :
5859 : /*
5860 : * make_window_input_target
5861 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
5862 : *
5863 : * When the query has window functions, this function computes the desired
5864 : * target to be computed by the node just below the first WindowAgg.
5865 : * This tlist must contain all values needed to evaluate the window functions,
5866 : * compute the final target list, and perform any required final sort step.
5867 : * If multiple WindowAggs are needed, each intermediate one adds its window
5868 : * function results onto this base tlist; only the topmost WindowAgg computes
5869 : * the actual desired target list.
5870 : *
5871 : * This function is much like make_group_input_target, though not quite enough
5872 : * like it to share code. As in that function, we flatten most expressions
5873 : * into their component variables. But we do not want to flatten window
5874 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
5875 : * evaluations of them, which would be bad (possibly even resulting in
5876 : * inconsistent answers, if they contain volatile functions).
5877 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
5878 : * make_group_input_target, because we may no longer have access to the
5879 : * individual Vars in them.
5880 : *
5881 : * Another key difference from make_group_input_target is that we don't
5882 : * flatten Aggref expressions, since those are to be computed below the
2587 tgl 5883 ECB : * window functions and just referenced like Vars above that.
5884 : *
2585 5885 : * 'final_target' is the query's final target list (in PathTarget form)
5886 : * 'activeWindows' is the list of active windows previously identified by
5887 : * select_active_windows.
5888 : *
5889 : * The result is the PathTarget to be computed by the plan node immediately
5890 : * below the first WindowAgg node.
5891 : */
5892 : static PathTarget *
2587 tgl 5893 GIC 1017 : make_window_input_target(PlannerInfo *root,
5894 : PathTarget *final_target,
5895 : List *activeWindows)
5123 tgl 5896 ECB : {
5897 : PathTarget *input_target;
3860 5898 : Bitmapset *sgrefs;
5899 : List *flattenable_cols;
5900 : List *flattenable_vars;
2585 5901 : int i;
5123 5902 : ListCell *lc;
5903 :
81 tgl 5904 GNC 1017 : Assert(root->parse->hasWindowFuncs);
3860 tgl 5905 ECB :
5906 : /*
5907 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
5908 : * into a bitmapset for convenient reference below.
5909 : */
3860 tgl 5910 GIC 1017 : sgrefs = NULL;
5123 tgl 5911 CBC 2106 : foreach(lc, activeWindows)
5912 : {
2042 tgl 5913 GIC 1089 : WindowClause *wc = lfirst_node(WindowClause, lc);
5914 : ListCell *lc2;
5915 :
5123 5916 1449 : foreach(lc2, wc->partitionClause)
5917 : {
2042 5918 360 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
5123 tgl 5919 ECB :
5123 tgl 5920 GIC 360 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5921 : }
5123 tgl 5922 CBC 2044 : foreach(lc2, wc->orderClause)
5923 : {
2042 tgl 5924 GIC 955 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
5925 :
5123 5926 955 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5927 : }
5123 tgl 5928 ECB : }
5929 :
5930 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
81 tgl 5931 GNC 1098 : foreach(lc, root->processed_groupClause)
5932 : {
2042 tgl 5933 GIC 81 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
5934 :
3860 5935 81 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
5936 : }
5937 :
5938 : /*
2585 tgl 5939 ECB : * Construct a target containing all the non-flattenable targetlist items,
5940 : * and save aside the others for a moment.
5941 : */
2585 tgl 5942 GIC 1017 : input_target = create_empty_pathtarget();
3860 tgl 5943 CBC 1017 : flattenable_cols = NIL;
5944 :
2585 tgl 5945 GIC 1017 : i = 0;
2585 tgl 5946 CBC 4430 : foreach(lc, final_target->exprs)
5123 tgl 5947 ECB : {
2585 tgl 5948 GIC 3413 : Expr *expr = (Expr *) lfirst(lc);
2491 5949 3413 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5123 tgl 5950 ECB :
5951 : /*
5952 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
5953 : * that such items can't contain window functions, so it's okay to
5954 : * compute them below the WindowAgg nodes.)
5955 : */
2585 tgl 5956 GIC 3413 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
5957 : {
5958 : /*
5959 : * Don't want to deconstruct this value, so add it to the input
5960 : * target as-is.
5961 : */
5962 1248 : add_column_to_pathtarget(input_target, expr, sgref);
5963 : }
5964 : else
5965 : {
5966 : /*
5967 : * Column is to be flattened, so just remember the expression for
5968 : * later call to pull_var_clause.
5969 : */
2585 tgl 5970 CBC 2165 : flattenable_cols = lappend(flattenable_cols, expr);
5971 : }
5972 :
2585 tgl 5973 GIC 3413 : i++;
5974 : }
5975 :
5976 : /*
5977 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
5978 : * add them to the input target if not already present. (Some might be
5979 : * there already because they're used directly as window/group clauses.)
3860 tgl 5980 ECB : *
2585 5981 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
5982 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
2586 5983 : * at higher levels. On the other hand, we should recurse into
5984 : * WindowFuncs to make sure their input expressions are available.
5985 : */
3860 tgl 5986 CBC 1017 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
2586 tgl 5987 ECB : PVC_INCLUDE_AGGREGATES |
5988 : PVC_RECURSE_WINDOWFUNCS |
3860 5989 : PVC_INCLUDE_PLACEHOLDERS);
2585 tgl 5990 CBC 1017 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
5991 :
5992 : /* clean up cruft */
3860 tgl 5993 GIC 1017 : list_free(flattenable_vars);
5994 1017 : list_free(flattenable_cols);
5995 :
5996 : /* XXX this causes some redundant cost calculation ... */
2585 tgl 5997 CBC 1017 : return set_pathtarget_cost_width(root, input_target);
5998 : }
5999 :
6000 : /*
6001 : * make_pathkeys_for_window
6002 : * Create a pathkeys list describing the required input ordering
6003 : * for the given WindowClause.
6004 : *
5215 tgl 6005 ECB : * The required ordering is first the PARTITION keys, then the ORDER keys.
6006 : * In the future we might try to implement windowing using hashing, in which
6007 : * case the ordering could be relaxed, but for now we always sort.
6008 : */
6009 : static List *
5215 tgl 6010 GIC 2172 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6011 : List *tlist)
6012 : {
6013 : List *window_pathkeys;
5215 tgl 6014 ECB : List *window_sortclauses;
6015 :
6016 : /* Throw error if can't sort */
5215 tgl 6017 GIC 2172 : if (!grouping_is_sortable(wc->partitionClause))
5215 tgl 6018 UIC 0 : ereport(ERROR,
6019 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6020 : errmsg("could not implement window PARTITION BY"),
6021 : errdetail("Window partitioning columns must be of sortable datatypes.")));
5215 tgl 6022 GIC 2172 : if (!grouping_is_sortable(wc->orderClause))
5215 tgl 6023 UIC 0 : ereport(ERROR,
5215 tgl 6024 ECB : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6025 : errmsg("could not implement window ORDER BY"),
6026 : errdetail("Window ordering columns must be of sortable datatypes.")));
6027 :
6028 : /* Okay, make the combined pathkeys */
1336 tgl 6029 CBC 2172 : window_sortclauses = list_concat_copy(wc->partitionClause, wc->orderClause);
5215 tgl 6030 GIC 2172 : window_pathkeys = make_pathkeys_for_sortclauses(root,
6031 : window_sortclauses,
6032 : tlist);
6033 2172 : list_free(window_sortclauses);
6034 2172 : return window_pathkeys;
6035 : }
5215 tgl 6036 ECB :
6037 : /*
2585 6038 : * make_sort_input_target
6039 : * Generate appropriate PathTarget for initial input to Sort step.
6040 : *
6041 : * If the query has ORDER BY, this function chooses the target to be computed
6042 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6043 : * project) steps. This might or might not be identical to the query's final
6044 : * output target.
6045 : *
6046 : * The main argument for keeping the sort-input tlist the same as the final
6047 : * is that we avoid a separate projection node (which will be needed if
6048 : * they're different, because Sort can't project). However, there are also
6049 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6050 : * a consistent order of evaluation for any volatile functions in the tlist,
6051 : * and if there's also a LIMIT, we can stop the query without ever computing
6052 : * tlist functions for later rows, which is beneficial for both volatile and
6053 : * expensive functions.
6054 : *
6055 : * Our current policy is to postpone volatile expressions till after the sort
6056 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6057 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6058 : * postpone set-returning expressions, because running them beforehand would
2571 6059 : * bloat the sort dataset, and because it might cause unexpected output order
6060 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6061 : * in the tlist should be evaluated at the same plan step, so that they can
6062 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6063 : * mustn't postpone any SRFs. (Note that in principle that policy should
6064 : * probably get applied to the group/window input targetlists too, but we
6065 : * have not done that historically.) Lastly, expensive expressions are
6066 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6067 : * partial evaluation of the query is possible (if neither is true, we expect
6068 : * to have to evaluate the expressions for every row anyway), or if there are
6069 : * any volatile or set-returning expressions (since once we've put in a
6070 : * projection at all, it won't cost any more to postpone more stuff).
6071 : *
6072 : * Another issue that could potentially be considered here is that
2585 6073 : * evaluating tlist expressions could result in data that's either wider
6074 : * or narrower than the input Vars, thus changing the volume of data that
6075 : * has to go through the Sort. However, we usually have only a very bad
6076 : * idea of the output width of any expression more complex than a Var,
6077 : * so for now it seems too risky to try to optimize on that basis.
6078 : *
6079 : * Note that if we do produce a modified sort-input target, and then the
6080 : * query ends up not using an explicit Sort, no particular harm is done:
6081 : * we'll initially use the modified target for the preceding path nodes,
6082 : * but then change them to the final target with apply_projection_to_path.
6083 : * Moreover, in such a case the guarantees about evaluation order of
6084 : * volatile functions still hold, since the rows are sorted already.
6085 : *
6086 : * This function has some things in common with make_group_input_target and
6087 : * make_window_input_target, though the detailed rules for what to do are
6088 : * different. We never flatten/postpone any grouping or ordering columns;
6089 : * those are needed before the sort. If we do flatten a particular
6090 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6091 : * computed earlier.
6092 : *
6093 : * 'final_target' is the query's final target list (in PathTarget form)
6094 : * 'have_postponed_srfs' is an output argument, see below
6095 : *
6096 : * The result is the PathTarget to be computed by the plan node immediately
6097 : * below the Sort step (and the Distinct step, if any). This will be
6098 : * exactly final_target if we decide a projection step wouldn't be helpful.
6099 : *
6100 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6101 : * any set-returning functions to after the Sort.
6102 : */
6103 : static PathTarget *
2585 tgl 6104 GIC 23324 : make_sort_input_target(PlannerInfo *root,
6105 : PathTarget *final_target,
6106 : bool *have_postponed_srfs)
6107 : {
2585 tgl 6108 CBC 23324 : Query *parse = root->parse;
6109 : PathTarget *input_target;
6110 : int ncols;
2571 tgl 6111 ECB : bool *col_is_srf;
6112 : bool *postpone_col;
2585 6113 : bool have_srf;
6114 : bool have_volatile;
6115 : bool have_expensive;
6116 : bool have_srf_sortcols;
6117 : bool postpone_srfs;
6118 : List *postponable_cols;
6119 : List *postponable_vars;
6120 : int i;
6121 : ListCell *lc;
6122 :
6123 : /* Shouldn't get here unless query has ORDER BY */
2585 tgl 6124 CBC 23324 : Assert(parse->sortClause);
2585 tgl 6125 ECB :
2118 tgl 6126 GBC 23324 : *have_postponed_srfs = false; /* default result */
2585 tgl 6127 ECB :
6128 : /* Inspect tlist and collect per-column information */
2585 tgl 6129 GIC 23324 : ncols = list_length(final_target->exprs);
2571 tgl 6130 CBC 23324 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
2585 tgl 6131 GBC 23324 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
2571 tgl 6132 CBC 23324 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6133 :
2585 tgl 6134 GIC 23324 : i = 0;
6135 137645 : foreach(lc, final_target->exprs)
6136 : {
6137 114321 : Expr *expr = (Expr *) lfirst(lc);
6138 :
6139 : /*
6140 : * If the column has a sortgroupref, assume it has to be evaluated
6141 : * before sorting. Generally such columns would be ORDER BY, GROUP
6142 : * BY, etc targets. One exception is columns that were removed from
6143 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6144 : * only be Vars anyway. There don't seem to be any cases where it
2585 tgl 6145 ECB : * would be worth the trouble to double-check.
6146 : */
2491 tgl 6147 CBC 114321 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6148 : {
6149 : /*
2571 tgl 6150 ECB : * Check for SRF or volatile functions. Check the SRF case first
6151 : * because we must know whether we have any postponed SRFs.
2585 6152 : */
2399 tgl 6153 GIC 80125 : if (parse->hasTargetSRFs &&
2399 tgl 6154 CBC 108 : expression_returns_set((Node *) expr))
6155 : {
2571 tgl 6156 ECB : /* We'll decide below whether these are postponable */
2571 tgl 6157 GIC 48 : col_is_srf[i] = true;
2585 6158 48 : have_srf = true;
2585 tgl 6159 ECB : }
2585 tgl 6160 CBC 79969 : else if (contain_volatile_functions((Node *) expr))
6161 : {
2571 tgl 6162 ECB : /* Unconditionally postpone */
2585 tgl 6163 GIC 83 : postpone_col[i] = true;
6164 83 : have_volatile = true;
6165 : }
2585 tgl 6166 ECB : else
6167 : {
6168 : /*
6169 : * Else check the cost. XXX it's annoying to have to do this
6170 : * when set_pathtarget_cost_width() just did it. Refactor to
6171 : * allow sharing the work?
6172 : */
6173 : QualCost cost;
6174 :
2585 tgl 6175 CBC 79886 : cost_qual_eval_node(&cost, (Node *) expr, root);
2585 tgl 6176 ECB :
6177 : /*
6178 : * We arbitrarily define "expensive" as "more than 10X
6179 : * cpu_operator_cost". Note this will take in any PL function
6180 : * with default cost.
6181 : */
2585 tgl 6182 CBC 79886 : if (cost.per_tuple > 10 * cpu_operator_cost)
6183 : {
2585 tgl 6184 GIC 6101 : postpone_col[i] = true;
6185 6101 : have_expensive = true;
6186 : }
6187 : }
6188 : }
2571 tgl 6189 ECB : else
6190 : {
6191 : /* For sortgroupref cols, just check if any contain SRFs */
2571 tgl 6192 GIC 34304 : if (!have_srf_sortcols &&
2399 6193 34459 : parse->hasTargetSRFs &&
2571 6194 155 : expression_returns_set((Node *) expr))
6195 62 : have_srf_sortcols = true;
2571 tgl 6196 ECB : }
2585 6197 :
2585 tgl 6198 GIC 114321 : i++;
6199 : }
6200 :
6201 : /*
6202 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6203 : */
2571 tgl 6204 CBC 23324 : postpone_srfs = (have_srf && !have_srf_sortcols);
2571 tgl 6205 EUB :
6206 : /*
6207 : * If we don't need a post-sort projection, just return final_target.
6208 : */
2571 tgl 6209 CBC 23324 : if (!(postpone_srfs || have_volatile ||
2585 tgl 6210 GIC 23219 : (have_expensive &&
6211 3705 : (parse->limitCount || root->tuple_fraction > 0))))
6212 23201 : return final_target;
6213 :
2585 tgl 6214 ECB : /*
6215 : * Report whether the post-sort projection will contain set-returning
6216 : * functions. This is important because it affects whether the Sort can
6217 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6218 : * to return.
6219 : */
2571 tgl 6220 GIC 123 : *have_postponed_srfs = postpone_srfs;
6221 :
2585 tgl 6222 ECB : /*
6223 : * Construct the sort-input target, taking all non-postponable columns and
6224 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6225 : * the postponable ones.
6226 : */
2585 tgl 6227 GIC 123 : input_target = create_empty_pathtarget();
6228 123 : postponable_cols = NIL;
2585 tgl 6229 ECB :
2585 tgl 6230 GIC 123 : i = 0;
2585 tgl 6231 CBC 1013 : foreach(lc, final_target->exprs)
6232 : {
2585 tgl 6233 GIC 890 : Expr *expr = (Expr *) lfirst(lc);
2585 tgl 6234 ECB :
2571 tgl 6235 CBC 890 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
2585 tgl 6236 GIC 155 : postponable_cols = lappend(postponable_cols, expr);
6237 : else
6238 735 : add_column_to_pathtarget(input_target, expr,
2118 6239 735 : get_pathtarget_sortgroupref(final_target, i));
6240 :
2585 tgl 6241 CBC 890 : i++;
2585 tgl 6242 ECB : }
6243 :
6244 : /*
6245 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6246 : * postponable columns, and add them to the sort-input target if not
6247 : * already present. (Some might be there already.) We mustn't
6248 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6249 : * would be unable to recompute them.
6250 : */
2585 tgl 6251 GIC 123 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6252 : PVC_INCLUDE_AGGREGATES |
6253 : PVC_INCLUDE_WINDOWFUNCS |
6254 : PVC_INCLUDE_PLACEHOLDERS);
2585 tgl 6255 CBC 123 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6256 :
2585 tgl 6257 ECB : /* clean up cruft */
2585 tgl 6258 GIC 123 : list_free(postponable_vars);
2585 tgl 6259 CBC 123 : list_free(postponable_cols);
6260 :
6261 : /* XXX this represents even more redundant cost calculation ... */
2585 tgl 6262 GIC 123 : return set_pathtarget_cost_width(root, input_target);
2585 tgl 6263 ECB : }
6264 :
2589 6265 : /*
6266 : * get_cheapest_fractional_path
6267 : * Find the cheapest path for retrieving a specified fraction of all
6268 : * the tuples expected to be returned by the given relation.
6269 : *
6270 : * We interpret tuple_fraction the same way as grouping_planner.
6271 : *
6272 : * We assume set_cheapest() has been run on the given rel.
6273 : */
6274 : Path *
2589 tgl 6275 GIC 220231 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6276 : {
2589 tgl 6277 CBC 220231 : Path *best_path = rel->cheapest_total_path;
6278 : ListCell *l;
6279 :
6280 : /* If all tuples will be retrieved, just return the cheapest-total path */
2589 tgl 6281 GIC 220231 : if (tuple_fraction <= 0.0)
6282 217576 : return best_path;
6283 :
6284 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
2570 tgl 6285 CBC 2655 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
2589 tgl 6286 GIC 970 : tuple_fraction /= best_path->rows;
2589 tgl 6287 ECB :
2589 tgl 6288 CBC 6390 : foreach(l, rel->pathlist)
6289 : {
6290 3735 : Path *path = (Path *) lfirst(l);
5215 tgl 6291 ECB :
2589 tgl 6292 CBC 4815 : if (path == rel->cheapest_total_path ||
2118 tgl 6293 GIC 1080 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
2589 6294 3594 : continue;
5215 tgl 6295 ECB :
2589 tgl 6296 GIC 141 : best_path = path;
5215 tgl 6297 ECB : }
6298 :
2589 tgl 6299 GIC 2655 : return best_path;
2589 tgl 6300 ECB : }
5203 6301 :
2272 andres 6302 : /*
6303 : * adjust_paths_for_srfs
6304 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6305 : *
6306 : * The executor can only handle set-returning functions that appear at the
6307 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6308 : * that are not at top level, we need to split up the evaluation into multiple
6309 : * plan levels in which each level satisfies this constraint. This function
6310 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6311 : * output tlist to insert appropriate projection steps.
6312 : *
6313 : * The given targets and targets_contain_srfs lists are from
6314 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6315 : * target in targets.
6316 : */
6317 : static void
2272 andres 6318 GIC 3522 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
2272 andres 6319 ECB : List *targets, List *targets_contain_srfs)
6320 : {
6321 : ListCell *lc;
6322 :
2272 andres 6323 GIC 3522 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6324 3522 : Assert(!linitial_int(targets_contain_srfs));
6325 :
6326 : /* If no SRFs appear at this plan level, nothing to do */
6327 3522 : if (list_length(targets) == 1)
6328 276 : return;
6329 :
6330 : /*
6331 : * Stack SRF-evaluation nodes atop each path for the rel.
6332 : *
6333 : * In principle we should re-run set_cheapest() here to identify the
6334 : * cheapest path, but it seems unlikely that adding the same tlist eval
6335 : * costs to all the paths would change that, so we don't bother. Instead,
6336 : * just assume that the cheapest-startup and cheapest-total paths remain
6337 : * so. (There should be no parameterized paths anymore, so we needn't
6338 : * worry about updating cheapest_parameterized_paths.)
6339 : */
6340 6511 : foreach(lc, rel->pathlist)
6341 : {
6342 3265 : Path *subpath = (Path *) lfirst(lc);
6343 3265 : Path *newpath = subpath;
6344 : ListCell *lc1,
2272 andres 6345 ECB : *lc2;
6346 :
2272 andres 6347 GIC 3265 : Assert(subpath->param_info == NULL);
2272 andres 6348 CBC 10330 : forboth(lc1, targets, lc2, targets_contain_srfs)
2272 andres 6349 ECB : {
2042 tgl 6350 GIC 7065 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
2272 andres 6351 CBC 7065 : bool contains_srfs = (bool) lfirst_int(lc2);
6352 :
2272 andres 6353 ECB : /* If this level doesn't contain SRFs, do regular projection */
2272 andres 6354 GIC 7065 : if (contains_srfs)
6355 3295 : newpath = (Path *) create_set_projection_path(root,
6356 : rel,
6357 : newpath,
6358 : thistarget);
6359 : else
6360 3770 : newpath = (Path *) apply_projection_to_path(root,
6361 : rel,
6362 : newpath,
6363 : thistarget);
6364 : }
6365 3265 : lfirst(lc) = newpath;
6366 3265 : if (subpath == rel->cheapest_startup_path)
6367 148 : rel->cheapest_startup_path = newpath;
6368 3265 : if (subpath == rel->cheapest_total_path)
6369 148 : rel->cheapest_total_path = newpath;
6370 : }
6371 :
6372 : /* Likewise for partial paths, if any */
1899 rhaas 6373 CBC 3249 : foreach(lc, rel->partial_pathlist)
6374 : {
6375 3 : Path *subpath = (Path *) lfirst(lc);
6376 3 : Path *newpath = subpath;
6377 : ListCell *lc1,
6378 : *lc2;
6379 :
6380 3 : Assert(subpath->param_info == NULL);
1899 rhaas 6381 GIC 12 : forboth(lc1, targets, lc2, targets_contain_srfs)
1899 rhaas 6382 ECB : {
1899 rhaas 6383 CBC 9 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
1899 rhaas 6384 GIC 9 : bool contains_srfs = (bool) lfirst_int(lc2);
1899 rhaas 6385 ECB :
6386 : /* If this level doesn't contain SRFs, do regular projection */
1899 rhaas 6387 CBC 9 : if (contains_srfs)
6388 3 : newpath = (Path *) create_set_projection_path(root,
1899 rhaas 6389 ECB : rel,
1899 rhaas 6390 EUB : newpath,
1899 rhaas 6391 ECB : thistarget);
6392 : else
6393 : {
1899 rhaas 6394 EUB : /* avoid apply_projection_to_path, in case of multiple refs */
1899 rhaas 6395 CBC 6 : newpath = (Path *) create_projection_path(root,
1899 rhaas 6396 EUB : rel,
6397 : newpath,
6398 : thistarget);
6399 : }
1899 rhaas 6400 ECB : }
1899 rhaas 6401 CBC 3 : lfirst(lc) = newpath;
1899 rhaas 6402 ECB : }
6403 : }
6404 :
6405 : /*
6406 : * expression_planner
6407 : * Perform planner's transformations on a standalone expression.
6408 : *
6409 : * Various utility commands need to evaluate expressions that are not part
6410 : * of a plannable query. They can do so using the executor's regular
6411 : * expression-execution machinery, but first the expression has to be fed
6412 : * through here to transform it from parser output to something executable.
6413 : *
6414 : * Currently, we disallow sublinks in standalone expressions, so there's no
6415 : * real "planning" involved here. (That might not always be true though.)
6416 : * What we must do is run eval_const_expressions to ensure that any function
6417 : * calls are converted to positional notation and function default arguments
6418 : * get inserted. The fact that constant subexpressions get simplified is a
6419 : * side-effect that is useful when the expression will get evaluated more than
6420 : * once. Also, we must fix operator function IDs.
6421 : *
6422 : * This does not return any information about dependencies of the expression.
6423 : * Hence callers should use the results only for the duration of the current
6424 : * query. Callers that would like to cache the results for longer should use
6425 : * expression_planner_with_deps, probably via the plancache.
6426 : *
6427 : * Note: this must not make any damaging changes to the passed-in expression
6428 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6429 : * we first do an expression_tree_mutator-based walk, what is returned will
6430 : * be a new node tree.) The result is constructed in the current memory
6431 : * context; beware that this can leak a lot of additional stuff there, too.
6432 : */
6433 : Expr *
1899 rhaas 6434 GIC 153163 : expression_planner(Expr *expr)
6435 : {
6436 : Node *result;
6437 :
6438 : /*
6439 : * Convert named-argument function calls, insert default arguments and
6440 : * simplify constant subexprs
6441 : */
1899 rhaas 6442 CBC 153163 : result = eval_const_expressions(NULL, (Node *) expr);
6443 :
6444 : /* Fill in opfuncid values if missing */
1899 rhaas 6445 GIC 153160 : fix_opfuncids(result);
6446 :
6447 153160 : return (Expr *) result;
6448 : }
6449 :
6450 : /*
6451 : * expression_planner_with_deps
6452 : * Perform planner's transformations on a standalone expression,
1578 tgl 6453 ECB : * returning expression dependency information along with the result.
6454 : *
6455 : * This is identical to expression_planner() except that it also returns
6456 : * information about possible dependencies of the expression, ie identities of
6457 : * objects whose definitions affect the result. As in a PlannedStmt, these
6458 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6459 : */
6460 : Expr *
1578 tgl 6461 GIC 177 : expression_planner_with_deps(Expr *expr,
1578 tgl 6462 ECB : List **relationOids,
6463 : List **invalItems)
6464 : {
6465 : Node *result;
6466 : PlannerGlobal glob;
6467 : PlannerInfo root;
6468 :
6469 : /* Make up dummy planner state so we can use setrefs machinery */
1578 tgl 6470 GIC 3363 : MemSet(&glob, 0, sizeof(glob));
1578 tgl 6471 CBC 177 : glob.type = T_PlannerGlobal;
1578 tgl 6472 GIC 177 : glob.relationOids = NIL;
1578 tgl 6473 CBC 177 : glob.invalItems = NIL;
6474 :
6475 15576 : MemSet(&root, 0, sizeof(root));
1578 tgl 6476 GIC 177 : root.type = T_PlannerInfo;
6477 177 : root.glob = &glob;
6478 :
6479 : /*
1578 tgl 6480 ECB : * Convert named-argument function calls, insert default arguments and
6481 : * simplify constant subexprs. Collect identities of inlined functions
6482 : * and elided domains, too.
6483 : */
1578 tgl 6484 CBC 177 : result = eval_const_expressions(&root, (Node *) expr);
6485 :
6486 : /* Fill in opfuncid values if missing */
1578 tgl 6487 GIC 177 : fix_opfuncids(result);
6488 :
6489 : /*
6490 : * Now walk the finished expression to find anything else we ought to
1578 tgl 6491 ECB : * record as an expression dependency.
6492 : */
1578 tgl 6493 GIC 177 : (void) extract_query_dependencies_walker(result, &root);
1578 tgl 6494 ECB :
1578 tgl 6495 CBC 177 : *relationOids = glob.relationOids;
1578 tgl 6496 GIC 177 : *invalItems = glob.invalItems;
1578 tgl 6497 ECB :
1578 tgl 6498 CBC 177 : return (Expr *) result;
6499 : }
6500 :
6501 :
6502 : /*
6503 : * plan_cluster_use_sort
6504 : * Use the planner to decide how CLUSTER should implement sorting
1899 rhaas 6505 ECB : *
6506 : * tableOid is the OID of a table to be clustered on its index indexOid
6507 : * (which is already known to be a btree index). Decide whether it's
6508 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6509 : * Return true to use sorting, false to use an indexscan.
6510 : *
6511 : * Note: caller had better already hold some type of lock on the table.
6512 : */
6513 : bool
1899 rhaas 6514 GIC 99 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6515 : {
6516 : PlannerInfo *root;
6517 : Query *query;
6518 : PlannerGlobal *glob;
1899 rhaas 6519 ECB : RangeTblEntry *rte;
6520 : RelOptInfo *rel;
6521 : IndexOptInfo *indexInfo;
6522 : QualCost indexExprCost;
6523 : Cost comparisonCost;
6524 : Path *seqScanPath;
6525 : Path seqScanAndSortPath;
6526 : IndexPath *indexScanPath;
6527 : ListCell *lc;
6528 :
6529 : /* We can short-circuit the cost comparison if indexscans are disabled */
1899 rhaas 6530 GIC 99 : if (!enable_indexscan)
6531 15 : return true; /* use sort */
6532 :
6533 : /* Set up mostly-dummy planner state */
6534 84 : query = makeNode(Query);
1899 rhaas 6535 CBC 84 : query->commandType = CMD_SELECT;
6536 :
1899 rhaas 6537 GIC 84 : glob = makeNode(PlannerGlobal);
6538 :
1899 rhaas 6539 CBC 84 : root = makeNode(PlannerInfo);
1899 rhaas 6540 GIC 84 : root->parse = query;
6541 84 : root->glob = glob;
1899 rhaas 6542 CBC 84 : root->query_level = 1;
6543 84 : root->planner_cxt = CurrentMemoryContext;
1899 rhaas 6544 GIC 84 : root->wt_param_id = -1;
69 tgl 6545 GNC 84 : root->join_domains = list_make1(makeNode(JoinDomain));
6546 :
1899 rhaas 6547 ECB : /* Build a minimal RTE for the rel */
1899 rhaas 6548 GIC 84 : rte = makeNode(RangeTblEntry);
6549 84 : rte->rtekind = RTE_RELATION;
6550 84 : rte->relid = tableOid;
6551 84 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
1652 tgl 6552 84 : rte->rellockmode = AccessShareLock;
1899 rhaas 6553 84 : rte->lateral = false;
6554 84 : rte->inh = false;
6555 84 : rte->inFromCl = true;
6556 84 : query->rtable = list_make1(rte);
124 alvherre 6557 GNC 84 : addRTEPermissionInfo(&query->rteperminfos, rte);
6558 :
6559 : /* Set up RTE/RelOptInfo arrays */
1899 rhaas 6560 GIC 84 : setup_simple_rel_arrays(root);
1899 rhaas 6561 ECB :
6562 : /* Build RelOptInfo */
1899 rhaas 6563 GIC 84 : rel = build_simple_rel(root, 1, NULL);
6564 :
6565 : /* Locate IndexOptInfo for the target index */
6566 84 : indexInfo = NULL;
6567 103 : foreach(lc, rel->indexlist)
1899 rhaas 6568 ECB : {
1899 rhaas 6569 GBC 103 : indexInfo = lfirst_node(IndexOptInfo, lc);
1899 rhaas 6570 GIC 103 : if (indexInfo->indexoid == indexOid)
6571 84 : break;
6572 : }
1899 rhaas 6573 ECB :
1899 rhaas 6574 EUB : /*
6575 : * It's possible that get_relation_info did not generate an IndexOptInfo
6576 : * for the desired index; this could happen if it's not yet reached its
6577 : * indcheckxmin usability horizon, or if it's a system index and we're
6578 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6579 : * trust the index contents but use seqscan-and-sort.
1899 rhaas 6580 ECB : */
1899 rhaas 6581 CBC 84 : if (lc == NULL) /* not in the list? */
1899 rhaas 6582 UIC 0 : return true; /* use sort */
6583 :
1899 rhaas 6584 ECB : /*
6585 : * Rather than doing all the pushups that would be needed to use
6586 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6587 : */
1899 rhaas 6588 GIC 84 : rel->rows = rel->tuples;
6589 84 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6590 :
6591 84 : root->total_table_pages = rel->pages;
6592 :
6593 : /*
6594 : * Determine eval cost of the index expressions, if any. We need to
6595 : * charge twice that amount for each tuple comparison that happens during
6596 : * the sort, since tuplesort.c will have to re-evaluate the index
6597 : * expressions each time. (XXX that's pretty inefficient...)
6598 : */
6599 84 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6600 84 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6601 :
6602 : /* Estimate the cost of seq scan + sort */
6603 84 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6604 84 : cost_sort(&seqScanAndSortPath, root, NIL,
6605 84 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6606 : comparisonCost, maintenance_work_mem, -1.0);
6607 :
6608 : /* Estimate the cost of index scan */
6609 84 : indexScanPath = create_index_path(root, indexInfo,
6610 : NIL, NIL, NIL, NIL,
6611 : ForwardScanDirection, false,
6612 : NULL, 1.0, false);
6613 :
6614 84 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6615 : }
6616 :
6617 : /*
6618 : * plan_create_index_workers
6619 : * Use the planner to decide how many parallel worker processes
6620 : * CREATE INDEX should request for use
6621 : *
6622 : * tableOid is the table on which the index is to be built. indexOid is the
6623 : * OID of an index to be created or reindexed (which must be a btree index).
6624 : *
6625 : * Return value is the number of parallel worker processes to request. It
6626 : * may be unsafe to proceed if this is 0. Note that this does not include the
6627 : * leader participating as a worker (value is always a number of parallel
6628 : * worker processes).
6629 : *
6630 : * Note: caller had better already hold some type of lock on the table and
6631 : * index.
6632 : */
6633 : int
1892 6634 15271 : plan_create_index_workers(Oid tableOid, Oid indexOid)
6635 : {
6636 : PlannerInfo *root;
6637 : Query *query;
6638 : PlannerGlobal *glob;
6639 : RangeTblEntry *rte;
6640 : Relation heap;
6641 : Relation index;
6642 : RelOptInfo *rel;
6643 : int parallel_workers;
6644 : BlockNumber heap_blocks;
6645 : double reltuples;
6646 : double allvisfrac;
6647 :
6648 : /*
6649 : * We don't allow performing parallel operation in standalone backend or
6650 : * when parallelism is disabled.
6651 : */
860 tgl 6652 15271 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
1892 rhaas 6653 1272 : return 0;
6654 :
1892 rhaas 6655 ECB : /* Set up largely-dummy planner state */
1892 rhaas 6656 GIC 13999 : query = makeNode(Query);
6657 13999 : query->commandType = CMD_SELECT;
6658 :
1892 rhaas 6659 CBC 13999 : glob = makeNode(PlannerGlobal);
6660 :
1892 rhaas 6661 GIC 13999 : root = makeNode(PlannerInfo);
6662 13999 : root->parse = query;
6663 13999 : root->glob = glob;
6664 13999 : root->query_level = 1;
6665 13999 : root->planner_cxt = CurrentMemoryContext;
6666 13999 : root->wt_param_id = -1;
69 tgl 6667 GNC 13999 : root->join_domains = list_make1(makeNode(JoinDomain));
6668 :
6669 : /*
6670 : * Build a minimal RTE.
6671 : *
6672 : * Mark the RTE with inh = true. This is a kludge to prevent
6673 : * get_relation_info() from fetching index info, which is necessary
6674 : * because it does not expect that any IndexOptInfo is currently
6675 : * undergoing REINDEX.
1892 rhaas 6676 ECB : */
1892 rhaas 6677 GIC 13999 : rte = makeNode(RangeTblEntry);
1892 rhaas 6678 CBC 13999 : rte->rtekind = RTE_RELATION;
1892 rhaas 6679 GIC 13999 : rte->relid = tableOid;
6680 13999 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
1652 tgl 6681 CBC 13999 : rte->rellockmode = AccessShareLock;
1892 rhaas 6682 13999 : rte->lateral = false;
6683 13999 : rte->inh = true;
6684 13999 : rte->inFromCl = true;
1892 rhaas 6685 GIC 13999 : query->rtable = list_make1(rte);
124 alvherre 6686 GNC 13999 : addRTEPermissionInfo(&query->rteperminfos, rte);
1892 rhaas 6687 ECB :
6688 : /* Set up RTE/RelOptInfo arrays */
1892 rhaas 6689 GIC 13999 : setup_simple_rel_arrays(root);
1892 rhaas 6690 ECB :
6691 : /* Build RelOptInfo */
1892 rhaas 6692 GIC 13999 : rel = build_simple_rel(root, 1, NULL);
6693 :
6694 : /* Rels are assumed already locked by the caller */
1539 andres 6695 13999 : heap = table_open(tableOid, NoLock);
1892 rhaas 6696 13999 : index = index_open(indexOid, NoLock);
6697 :
6698 : /*
6699 : * Determine if it's safe to proceed.
1892 rhaas 6700 ECB : *
6701 : * Currently, parallel workers can't access the leader's temporary tables.
6702 : * Furthermore, any index predicate or index expressions must be parallel
6703 : * safe.
6704 : */
1892 rhaas 6705 GIC 13999 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
1892 rhaas 6706 CBC 13110 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
6707 13065 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
6708 : {
1892 rhaas 6709 GIC 934 : parallel_workers = 0;
1892 rhaas 6710 CBC 934 : goto done;
1892 rhaas 6711 ECB : }
6712 :
6713 : /*
6714 : * If parallel_workers storage parameter is set for the table, accept that
6715 : * as the number of parallel worker processes to launch (though still cap
6716 : * at max_parallel_maintenance_workers). Note that we deliberately do not
6717 : * consider any other factor when parallel_workers is set. (e.g., memory
6718 : * use by workers.)
6719 : */
1892 rhaas 6720 GIC 13065 : if (rel->rel_parallel_workers != -1)
6721 : {
6722 7 : parallel_workers = Min(rel->rel_parallel_workers,
6723 : max_parallel_maintenance_workers);
6724 7 : goto done;
6725 : }
6726 :
6727 : /*
1892 rhaas 6728 ECB : * Estimate heap relation size ourselves, since rel->pages cannot be
6729 : * trusted (heap RTE was marked as inheritance parent)
6730 : */
1892 rhaas 6731 GIC 13058 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6732 :
6733 : /*
6734 : * Determine number of workers to scan the heap relation using generic
1892 rhaas 6735 ECB : * model
6736 : */
1892 rhaas 6737 CBC 13058 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
1892 rhaas 6738 ECB : max_parallel_maintenance_workers);
6739 :
6740 : /*
6741 : * Cap workers based on available maintenance_work_mem as needed.
6742 : *
6743 : * Note that each tuplesort participant receives an even share of the
6744 : * total maintenance_work_mem budget. Aim to leave participants
6745 : * (including the leader as a participant) with no less than 32MB of
6746 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
6747 : * immediately past the threshold of being capable of launching a single
6748 : * parallel worker to sort.
6749 : */
1892 rhaas 6750 GIC 13122 : while (parallel_workers > 0 &&
1892 rhaas 6751 CBC 135 : maintenance_work_mem / (parallel_workers + 1) < 32768L)
1892 rhaas 6752 GIC 64 : parallel_workers--;
6753 :
6754 13058 : done:
6755 13999 : index_close(index, NoLock);
1539 andres 6756 13999 : table_close(heap, NoLock);
1892 rhaas 6757 ECB :
1892 rhaas 6758 GIC 13999 : return parallel_workers;
6759 : }
6760 :
6761 : /*
1899 rhaas 6762 ECB : * add_paths_to_grouping_rel
4567 tgl 6763 : *
1899 rhaas 6764 : * Add non-partial paths to grouping relation.
4567 tgl 6765 : */
6766 : static void
1899 rhaas 6767 GIC 16633 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
6768 : RelOptInfo *grouped_rel,
6769 : RelOptInfo *partially_grouped_rel,
6770 : const AggClauseCosts *agg_costs,
6771 : grouping_sets_data *gd, double dNumGroups,
6772 : GroupPathExtraData *extra)
4567 tgl 6773 ECB : {
1899 rhaas 6774 GIC 16633 : Query *parse = root->parse;
6775 16633 : Path *cheapest_path = input_rel->cheapest_total_path;
6776 : ListCell *lc;
1844 6777 16633 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6778 16633 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6779 16633 : List *havingQual = (List *) extra->havingQual;
1844 rhaas 6780 CBC 16633 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
4567 tgl 6781 ECB :
1899 rhaas 6782 GIC 16633 : if (can_sort)
1899 rhaas 6783 ECB : {
6784 : /*
6785 : * Use any available suitably-sorted path as input, and also consider
6786 : * sorting the cheapest-total path and incremental sort on any paths
6787 : * with presorted keys.
6788 : */
1899 rhaas 6789 CBC 34401 : foreach(lc, input_rel->pathlist)
1899 rhaas 6790 ECB : {
1899 rhaas 6791 GIC 17771 : Path *path = (Path *) lfirst(lc);
188 tgl 6792 ECB : bool is_sorted;
6793 : int presorted_keys;
1097 tomas.vondra 6794 :
188 tgl 6795 GIC 17771 : is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6796 : path->pathkeys,
6797 : &presorted_keys);
6798 :
114 drowley 6799 GNC 17771 : if (!is_sorted)
6800 : {
6801 : /*
6802 : * Try at least sorting the cheapest path and also try
6803 : * incrementally sorting any path which is partially sorted
6804 : * already (no need to deal with paths which have presorted
6805 : * keys when incremental sort is disabled unless it's the
6806 : * cheapest input path).
6807 : */
6808 2824 : if (path != cheapest_path &&
6809 193 : (presorted_keys == 0 || !enable_incremental_sort))
6810 133 : continue;
6811 :
6812 : /*
6813 : * We've no need to consider both a sort and incremental sort.
6814 : * We'll just do a sort if there are no presorted keys and an
6815 : * incremental sort when there are presorted keys.
6816 : */
6817 2691 : if (presorted_keys == 0 || !enable_incremental_sort)
188 tgl 6818 GIC 2607 : path = (Path *) create_sort_path(root,
188 tgl 6819 ECB : grouped_rel,
6820 : path,
6821 : root->group_pathkeys,
6822 : -1.0);
6823 : else
114 drowley 6824 GNC 84 : path = (Path *) create_incremental_sort_path(root,
6825 : grouped_rel,
6826 : path,
6827 : root->group_pathkeys,
6828 : presorted_keys,
6829 : -1.0);
6830 : }
6831 :
6832 : /* Now decide what to stick atop it */
188 tgl 6833 GIC 17638 : if (parse->groupingSets)
6834 : {
6835 397 : consider_groupingsets_paths(root, grouped_rel,
6836 : path, true, can_hash,
6837 : gd, agg_costs, dNumGroups);
6838 : }
6839 17241 : else if (parse->hasAggs)
6840 : {
6841 : /*
188 tgl 6842 ECB : * We have aggregation, possibly with plain GROUP BY. Make an
6843 : * AggPath.
6844 : */
188 tgl 6845 CBC 16936 : add_path(grouped_rel, (Path *)
188 tgl 6846 GIC 16936 : create_agg_path(root,
6847 : grouped_rel,
6848 : path,
188 tgl 6849 CBC 16936 : grouped_rel->reltarget,
6850 16936 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6851 : AGGSPLIT_SIMPLE,
6852 : root->processed_groupClause,
188 tgl 6853 ECB : havingQual,
6854 : agg_costs,
6855 : dNumGroups));
6856 : }
188 tgl 6857 CBC 305 : else if (parse->groupClause)
6858 : {
6859 : /*
6860 : * We have GROUP BY without aggregation or grouping sets. Make
6861 : * a GroupPath.
188 tgl 6862 ECB : */
188 tgl 6863 GIC 305 : add_path(grouped_rel, (Path *)
6864 305 : create_group_path(root,
6865 : grouped_rel,
6866 : path,
6867 : root->processed_groupClause,
188 tgl 6868 ECB : havingQual,
6869 : dNumGroups));
6870 : }
6871 : else
6872 : {
6873 : /* Other cases should have been handled above */
188 tgl 6874 UIC 0 : Assert(false);
188 tgl 6875 ECB : }
6876 : }
4567 6877 :
1899 rhaas 6878 : /*
6879 : * Instead of operating directly on the input relation, we can
6880 : * consider finalizing a partially aggregated path.
6881 : */
1846 rhaas 6882 CBC 16630 : if (partially_grouped_rel != NULL)
1899 rhaas 6883 ECB : {
1846 rhaas 6884 GIC 1860 : foreach(lc, partially_grouped_rel->pathlist)
1868 rhaas 6885 ECB : {
1846 rhaas 6886 CBC 1117 : Path *path = (Path *) lfirst(lc);
6887 : bool is_sorted;
188 tgl 6888 ECB : int presorted_keys;
374 tomas.vondra 6889 :
188 tgl 6890 GIC 1117 : is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
6891 : path->pathkeys,
6892 : &presorted_keys);
6893 :
6894 1117 : if (!is_sorted)
6895 : {
6896 : /*
6897 : * Try at least sorting the cheapest path and also try
6898 : * incrementally sorting any path which is partially
6899 : * sorted already (no need to deal with paths which have
6900 : * presorted keys when incremental sort is disabled unless
6901 : * it's the cheapest input path).
6902 : */
114 drowley 6903 GNC 284 : if (path != partially_grouped_rel->cheapest_total_path &&
6904 48 : (presorted_keys == 0 || !enable_incremental_sort))
188 tgl 6905 GIC 48 : continue;
6906 :
6907 : /*
6908 : * We've no need to consider both a sort and incremental
6909 : * sort. We'll just do a sort if there are no pre-sorted
6910 : * keys and an incremental sort when there are presorted
6911 : * keys.
6912 : */
114 drowley 6913 GNC 236 : if (presorted_keys == 0 || !enable_incremental_sort)
6914 236 : path = (Path *) create_sort_path(root,
6915 : grouped_rel,
6916 : path,
6917 : root->group_pathkeys,
6918 : -1.0);
6919 : else
114 drowley 6920 UNC 0 : path = (Path *) create_incremental_sort_path(root,
6921 : grouped_rel,
6922 : path,
6923 : root->group_pathkeys,
6924 : presorted_keys,
6925 : -1.0);
6926 : }
6927 :
188 tgl 6928 GIC 1069 : if (parse->hasAggs)
6929 949 : add_path(grouped_rel, (Path *)
6930 949 : create_agg_path(root,
6931 : grouped_rel,
6932 : path,
6933 949 : grouped_rel->reltarget,
6934 949 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6935 : AGGSPLIT_FINAL_DESERIAL,
6936 : root->processed_groupClause,
6937 : havingQual,
6938 : agg_final_costs,
6939 : dNumGroups));
6940 : else
6941 120 : add_path(grouped_rel, (Path *)
6942 120 : create_group_path(root,
6943 : grouped_rel,
6944 : path,
6945 : root->processed_groupClause,
6946 : havingQual,
6947 : dNumGroups));
6948 :
6949 : }
6950 : }
6951 : }
6952 :
1899 rhaas 6953 16633 : if (can_hash)
6954 : {
1899 rhaas 6955 CBC 2193 : if (parse->groupingSets)
6956 : {
6957 : /*
1899 rhaas 6958 ECB : * Try for a hash-only groupingsets path over unsorted input.
6959 : */
1899 rhaas 6960 GIC 325 : consider_groupingsets_paths(root, grouped_rel,
6961 : cheapest_path, false, true,
6962 : gd, agg_costs, dNumGroups);
6963 : }
1899 rhaas 6964 ECB : else
6965 : {
6966 : /*
986 pg 6967 : * Generate a HashAgg Path. We just need an Agg over the
6968 : * cheapest-total input path, since input order won't matter.
1899 rhaas 6969 : */
986 pg 6970 GIC 1868 : add_path(grouped_rel, (Path *)
6971 1868 : create_agg_path(root, grouped_rel,
6972 : cheapest_path,
6973 1868 : grouped_rel->reltarget,
6974 : AGG_HASHED,
6975 : AGGSPLIT_SIMPLE,
6976 : root->processed_groupClause,
6977 : havingQual,
6978 : agg_costs,
6979 : dNumGroups));
6980 : }
6981 :
6982 : /*
6983 : * Generate a Finalize HashAgg Path atop of the cheapest partially
6984 : * grouped path, assuming there is one
1899 rhaas 6985 ECB : */
1846 rhaas 6986 GIC 2193 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
6987 : {
1868 6988 375 : Path *path = partially_grouped_rel->cheapest_total_path;
6989 :
986 pg 6990 375 : add_path(grouped_rel, (Path *)
6991 375 : create_agg_path(root,
6992 : grouped_rel,
6993 : path,
6994 375 : grouped_rel->reltarget,
6995 : AGG_HASHED,
6996 : AGGSPLIT_FINAL_DESERIAL,
6997 : root->processed_groupClause,
6998 : havingQual,
6999 : agg_final_costs,
7000 : dNumGroups));
1899 rhaas 7001 ECB : }
7002 : }
7003 :
7004 : /*
1844 7005 : * When partitionwise aggregate is used, we might have fully aggregated
7006 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7007 : * consider a path for grouped_rel consisting of a Parallel Append of
7008 : * non-partial paths from each child.
7009 : */
1844 rhaas 7010 CBC 16633 : if (grouped_rel->partial_pathlist != NIL)
7011 81 : gather_grouping_paths(root, grouped_rel);
4567 tgl 7012 16633 : }
2210 rhaas 7013 ECB :
7014 : /*
1846 7015 : * create_partial_grouping_paths
2210 7016 : *
7017 : * Create a new upper relation representing the result of partial aggregation
7018 : * and populate it with appropriate paths. Note that we don't finalize the
1846 7019 : * lists of paths here, so the caller can add additional partial or non-partial
7020 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7021 : * the returned upper relation.
7022 : *
7023 : * All paths for this new upper relation -- both partial and non-partial --
7024 : * have been partially aggregated but require a subsequent FinalizeAggregate
7025 : * step.
1844 7026 : *
7027 : * NB: This function is allowed to return NULL if it determines that there is
7028 : * no real need to create a new RelOptInfo.
7029 : */
7030 : static RelOptInfo *
1846 rhaas 7031 CBC 15091 : create_partial_grouping_paths(PlannerInfo *root,
7032 : RelOptInfo *grouped_rel,
7033 : RelOptInfo *input_rel,
1846 rhaas 7034 ECB : grouping_sets_data *gd,
7035 : GroupPathExtraData *extra,
7036 : bool force_rel_creation)
2210 7037 : {
1899 rhaas 7038 CBC 15091 : Query *parse = root->parse;
7039 : RelOptInfo *partially_grouped_rel;
1844 7040 15091 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7041 15091 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7042 15091 : Path *cheapest_partial_path = NULL;
1844 rhaas 7043 GIC 15091 : Path *cheapest_total_path = NULL;
1899 7044 15091 : double dNumPartialGroups = 0;
1844 7045 15091 : double dNumPartialPartialGroups = 0;
7046 : ListCell *lc;
7047 15091 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7048 15091 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7049 :
7050 : /*
7051 : * Consider whether we should generate partially aggregated non-partial
1844 rhaas 7052 ECB : * paths. We can only do this if we have a non-partial path, and only if
1844 rhaas 7053 EUB : * the parent of the input rel is performing partial partitionwise
7054 : * aggregation. (Note that extra->patype is the type of partitionwise
7055 : * aggregation being used at the parent level, not this level.)
7056 : */
1844 rhaas 7057 GIC 15091 : if (input_rel->pathlist != NIL &&
7058 15091 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
1844 rhaas 7059 CBC 297 : cheapest_total_path = input_rel->cheapest_total_path;
1844 rhaas 7060 ECB :
7061 : /*
7062 : * If parallelism is possible for grouped_rel, then we should consider
7063 : * generating partially-grouped partial paths. However, if the input rel
7064 : * has no partial paths, then we can't.
7065 : */
1844 rhaas 7066 GIC 15091 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7067 868 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7068 :
7069 : /*
1844 rhaas 7070 ECB : * If we can't partially aggregate partial paths, and we can't partially
7071 : * aggregate non-partial paths, then don't bother creating the new
7072 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7073 : */
1844 rhaas 7074 CBC 15091 : if (cheapest_total_path == NULL &&
7075 14094 : cheapest_partial_path == NULL &&
7076 14094 : !force_rel_creation)
1844 rhaas 7077 GIC 14051 : return NULL;
7078 :
7079 : /*
1846 rhaas 7080 ECB : * Build a new upper relation to represent the result of partially
7081 : * aggregating the rows from the input relation.
7082 : */
1846 rhaas 7083 GIC 1040 : partially_grouped_rel = fetch_upper_rel(root,
7084 : UPPERREL_PARTIAL_GROUP_AGG,
1846 rhaas 7085 ECB : grouped_rel->relids);
1846 rhaas 7086 GIC 1040 : partially_grouped_rel->consider_parallel =
7087 1040 : grouped_rel->consider_parallel;
1844 7088 1040 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
1846 7089 1040 : partially_grouped_rel->serverid = grouped_rel->serverid;
7090 1040 : partially_grouped_rel->userid = grouped_rel->userid;
7091 1040 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7092 1040 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7093 :
7094 : /*
7095 : * Build target list for partial aggregate paths. These paths cannot just
7096 : * emit the same tlist as regular aggregate paths, because (1) we must
7097 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7098 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7099 : */
7100 1040 : partially_grouped_rel->reltarget =
7101 1040 : make_partial_grouping_target(root, grouped_rel->reltarget,
7102 : extra->havingQual);
7103 :
1844 7104 1040 : if (!extra->partial_costs_set)
1846 rhaas 7105 ECB : {
7106 : /*
7107 : * Collect statistics about aggregates for estimating costs of
7108 : * performing aggregation in parallel.
7109 : */
1844 rhaas 7110 GIC 3594 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7111 3594 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7112 599 : if (parse->hasAggs)
7113 : {
7114 : /* partial phase */
866 heikki.linnakangas 7115 537 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7116 : agg_partial_costs);
7117 :
7118 : /* final phase */
7119 537 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7120 : agg_final_costs);
7121 : }
7122 :
1844 rhaas 7123 CBC 599 : extra->partial_costs_set = true;
1846 rhaas 7124 ECB : }
7125 :
7126 : /* Estimate number of partial groups. */
1844 rhaas 7127 CBC 1040 : if (cheapest_total_path != NULL)
1844 rhaas 7128 ECB : dNumPartialGroups =
1844 rhaas 7129 GIC 297 : get_number_of_groups(root,
1844 rhaas 7130 ECB : cheapest_total_path->rows,
7131 : gd,
7132 : extra->targetList);
1844 rhaas 7133 CBC 1040 : if (cheapest_partial_path != NULL)
1844 rhaas 7134 ECB : dNumPartialPartialGroups =
1844 rhaas 7135 CBC 868 : get_number_of_groups(root,
1844 rhaas 7136 ECB : cheapest_partial_path->rows,
7137 : gd,
7138 : extra->targetList);
7139 :
1844 rhaas 7140 GIC 1040 : if (can_sort && cheapest_total_path != NULL)
7141 : {
7142 : /* This should have been checked previously */
1899 7143 297 : Assert(parse->hasAggs || parse->groupClause);
7144 :
7145 : /*
7146 : * Use any available suitably-sorted path as input, and also consider
7147 : * sorting the cheapest partial path.
1899 rhaas 7148 ECB : */
1844 rhaas 7149 CBC 597 : foreach(lc, input_rel->pathlist)
1844 rhaas 7150 ECB : {
1844 rhaas 7151 CBC 300 : Path *path = (Path *) lfirst(lc);
188 tgl 7152 ECB : bool is_sorted;
7153 : int presorted_keys;
374 tomas.vondra 7154 :
114 drowley 7155 GNC 300 : is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7156 : path->pathkeys,
7157 : &presorted_keys);
7158 300 : if (!is_sorted)
188 tgl 7159 ECB : {
7160 : /*
7161 : * Try at least sorting the cheapest path and also try
7162 : * incrementally sorting any path which is partially sorted
7163 : * already (no need to deal with paths which have presorted
7164 : * keys when incremental sort is disabled unless it's the
7165 : * cheapest input path).
7166 : */
114 drowley 7167 GNC 297 : if (path != cheapest_total_path &&
114 drowley 7168 UNC 0 : (presorted_keys == 0 || !enable_incremental_sort))
7169 0 : continue;
7170 :
7171 : /*
7172 : * We've no need to consider both a sort and incremental sort.
7173 : * We'll just do a sort if there are no presorted keys and an
7174 : * incremental sort when there are presorted keys.
7175 : */
114 drowley 7176 GNC 297 : if (presorted_keys == 0 || !enable_incremental_sort)
188 tgl 7177 CBC 297 : path = (Path *) create_sort_path(root,
7178 : partially_grouped_rel,
7179 : path,
188 tgl 7180 ECB : root->group_pathkeys,
7181 : -1.0);
7182 : else
114 drowley 7183 UNC 0 : path = (Path *) create_incremental_sort_path(root,
7184 : partially_grouped_rel,
7185 : path,
7186 : root->group_pathkeys,
7187 : presorted_keys,
7188 : -1.0);
7189 : }
7190 :
114 drowley 7191 GNC 300 : if (parse->hasAggs)
7192 264 : add_path(partially_grouped_rel, (Path *)
7193 264 : create_agg_path(root,
7194 : partially_grouped_rel,
7195 : path,
7196 264 : partially_grouped_rel->reltarget,
7197 264 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7198 : AGGSPLIT_INITIAL_SERIAL,
7199 : root->processed_groupClause,
7200 : NIL,
7201 : agg_partial_costs,
7202 : dNumPartialGroups));
7203 : else
7204 36 : add_path(partially_grouped_rel, (Path *)
7205 36 : create_group_path(root,
7206 : partially_grouped_rel,
7207 : path,
7208 : root->processed_groupClause,
7209 : NIL,
7210 : dNumPartialGroups));
7211 : }
1844 rhaas 7212 ECB : }
7213 :
1844 rhaas 7214 GIC 1040 : if (can_sort && cheapest_partial_path != NULL)
1844 rhaas 7215 ECB : {
7216 : /* Similar to above logic, but for partial paths. */
1899 rhaas 7217 CBC 1742 : foreach(lc, input_rel->partial_pathlist)
2210 rhaas 7218 ECB : {
1899 rhaas 7219 GIC 874 : Path *path = (Path *) lfirst(lc);
7220 : bool is_sorted;
7221 : int presorted_keys;
7222 :
188 tgl 7223 874 : is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7224 : path->pathkeys,
7225 : &presorted_keys);
374 tomas.vondra 7226 ECB :
114 drowley 7227 GNC 874 : if (!is_sorted)
1899 rhaas 7228 ECB : {
7229 : /*
7230 : * Try at least sorting the cheapest path and also try
7231 : * incrementally sorting any path which is partially sorted
7232 : * already (no need to deal with paths which have presorted
7233 : * keys when incremental sort is disabled unless it's the
7234 : * cheapest input path).
7235 : */
114 drowley 7236 GNC 506 : if (path != cheapest_partial_path &&
7237 6 : (presorted_keys == 0 || !enable_incremental_sort))
7238 3 : continue;
7239 :
7240 : /*
7241 : * We've no need to consider both a sort and incremental sort.
7242 : * We'll just do a sort if there are no presorted keys and an
7243 : * incremental sort when there are presorted keys.
7244 : */
7245 503 : if (presorted_keys == 0 || !enable_incremental_sort)
188 tgl 7246 GIC 500 : path = (Path *) create_sort_path(root,
188 tgl 7247 ECB : partially_grouped_rel,
7248 : path,
7249 : root->group_pathkeys,
7250 : -1.0);
7251 : else
114 drowley 7252 GNC 3 : path = (Path *) create_incremental_sort_path(root,
7253 : partially_grouped_rel,
7254 : path,
7255 : root->group_pathkeys,
7256 : presorted_keys,
7257 : -1.0);
7258 : }
7259 :
188 tgl 7260 CBC 871 : if (parse->hasAggs)
188 tgl 7261 GIC 797 : add_partial_path(partially_grouped_rel, (Path *)
7262 797 : create_agg_path(root,
7263 : partially_grouped_rel,
7264 : path,
7265 797 : partially_grouped_rel->reltarget,
7266 797 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
188 tgl 7267 ECB : AGGSPLIT_INITIAL_SERIAL,
7268 : root->processed_groupClause,
7269 : NIL,
7270 : agg_partial_costs,
7271 : dNumPartialPartialGroups));
7272 : else
188 tgl 7273 CBC 74 : add_partial_path(partially_grouped_rel, (Path *)
7274 74 : create_group_path(root,
7275 : partially_grouped_rel,
7276 : path,
7277 : root->processed_groupClause,
7278 : NIL,
7279 : dNumPartialPartialGroups));
7280 : }
7281 : }
7282 :
7283 : /*
986 pg 7284 EUB : * Add a partially-grouped HashAgg Path where possible
7285 : */
1844 rhaas 7286 GIC 1040 : if (can_hash && cheapest_total_path != NULL)
7287 : {
7288 : /* Checked above */
1899 7289 297 : Assert(parse->hasAggs || parse->groupClause);
7290 :
986 pg 7291 297 : add_path(partially_grouped_rel, (Path *)
986 pg 7292 CBC 297 : create_agg_path(root,
7293 : partially_grouped_rel,
986 pg 7294 ECB : cheapest_total_path,
986 pg 7295 GIC 297 : partially_grouped_rel->reltarget,
986 pg 7296 ECB : AGG_HASHED,
7297 : AGGSPLIT_INITIAL_SERIAL,
7298 : root->processed_groupClause,
7299 : NIL,
7300 : agg_partial_costs,
7301 : dNumPartialGroups));
7302 : }
7303 :
7304 : /*
7305 : * Now add a partially-grouped HashAgg partial Path where possible
7306 : */
1844 rhaas 7307 GIC 1040 : if (can_hash && cheapest_partial_path != NULL)
7308 : {
986 pg 7309 500 : add_partial_path(partially_grouped_rel, (Path *)
7310 500 : create_agg_path(root,
7311 : partially_grouped_rel,
7312 : cheapest_partial_path,
986 pg 7313 CBC 500 : partially_grouped_rel->reltarget,
986 pg 7314 ECB : AGG_HASHED,
7315 : AGGSPLIT_INITIAL_SERIAL,
7316 : root->processed_groupClause,
7317 : NIL,
7318 : agg_partial_costs,
7319 : dNumPartialPartialGroups));
7320 : }
7321 :
7322 : /*
1868 rhaas 7323 : * If there is an FDW that's responsible for all baserels of the query,
7324 : * let it consider adding partially grouped ForeignPaths.
7325 : */
1868 rhaas 7326 GIC 1040 : if (partially_grouped_rel->fdwroutine &&
7327 3 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7328 : {
7329 3 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
1868 rhaas 7330 EUB :
1868 rhaas 7331 GIC 3 : fdwroutine->GetForeignUpperPaths(root,
7332 : UPPERREL_PARTIAL_GROUP_AGG,
7333 : input_rel, partially_grouped_rel,
7334 : extra);
7335 : }
7336 :
1846 7337 1040 : return partially_grouped_rel;
1846 rhaas 7338 ECB : }
1868 7339 :
1846 7340 : /*
7341 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7342 : * grouping relation.
7343 : *
775 alvherre 7344 : * generate_useful_gather_paths does most of the work, but we also consider a
7345 : * special case: we could try sorting the data by the group_pathkeys and then
7346 : * applying Gather Merge.
7347 : *
7348 : * NB: This function shouldn't be used for anything other than a grouped or
7349 : * partially grouped relation not only because of the fact that it explicitly
7350 : * references group_pathkeys but we pass "true" as the third argument to
7351 : * generate_useful_gather_paths().
1846 rhaas 7352 : */
7353 : static void
1846 rhaas 7354 GIC 781 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7355 : {
7356 : ListCell *lc;
7357 : Path *cheapest_partial_path;
7358 :
7359 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
1097 tomas.vondra 7360 781 : generate_useful_gather_paths(root, rel, true);
7361 :
7362 : /* Try cheapest partial path + explicit Sort + Gather Merge. */
1846 rhaas 7363 CBC 781 : cheapest_partial_path = linitial(rel->partial_pathlist);
1868 rhaas 7364 GIC 781 : if (!pathkeys_contained_in(root->group_pathkeys,
1868 rhaas 7365 ECB : cheapest_partial_path->pathkeys))
7366 : {
7367 : Path *path;
7368 : double total_groups;
7369 :
1868 rhaas 7370 CBC 379 : total_groups =
1868 rhaas 7371 GIC 379 : cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
1846 7372 379 : path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
7373 : root->group_pathkeys,
7374 : -1.0);
7375 : path = (Path *)
1868 7376 379 : create_gather_merge_path(root,
7377 : rel,
7378 : path,
1846 7379 379 : rel->reltarget,
1868 rhaas 7380 ECB : root->group_pathkeys,
7381 : NULL,
7382 : &total_groups);
7383 :
1846 rhaas 7384 GIC 379 : add_path(rel, path);
7385 : }
7386 :
7387 : /*
7388 : * Consider incremental sort on all partial paths, if enabled.
7389 : *
7390 : * We can also skip the entire loop when we only have a single-item
7391 : * group_pathkeys because then we can't possibly have a presorted prefix
7392 : * of the list without having the list be fully sorted.
7393 : */
1008 peter 7394 781 : if (!enable_incremental_sort || list_length(root->group_pathkeys) == 1)
1097 tomas.vondra 7395 383 : return;
1097 tomas.vondra 7396 ECB :
7397 : /* also consider incremental sort on partial paths, if enabled */
1097 tomas.vondra 7398 CBC 823 : foreach(lc, rel->partial_pathlist)
7399 : {
7400 425 : Path *path = (Path *) lfirst(lc);
1097 tomas.vondra 7401 ECB : bool is_sorted;
7402 : int presorted_keys;
7403 : double total_groups;
7404 :
1097 tomas.vondra 7405 GIC 425 : is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
7406 : path->pathkeys,
7407 : &presorted_keys);
7408 :
7409 425 : if (is_sorted)
7410 425 : continue;
7411 :
7412 27 : if (presorted_keys == 0)
7413 27 : continue;
7414 :
1097 tomas.vondra 7415 UIC 0 : path = (Path *) create_incremental_sort_path(root,
7416 : rel,
7417 : path,
7418 : root->group_pathkeys,
7419 : presorted_keys,
1097 tomas.vondra 7420 ECB : -1.0);
7421 :
7422 : path = (Path *)
1097 tomas.vondra 7423 UIC 0 : create_gather_merge_path(root,
7424 : rel,
7425 : path,
7426 0 : rel->reltarget,
7427 : root->group_pathkeys,
7428 : NULL,
7429 : &total_groups);
7430 :
7431 0 : add_path(rel, path);
7432 : }
7433 : }
7434 :
7435 : /*
7436 : * can_partial_agg
7437 : *
7438 : * Determines whether or not partial grouping and/or aggregation is possible.
7439 : * Returns true when possible, false otherwise.
7440 : */
1899 rhaas 7441 ECB : static bool
866 heikki.linnakangas 7442 GIC 16231 : can_partial_agg(PlannerInfo *root)
7443 : {
1899 rhaas 7444 16231 : Query *parse = root->parse;
7445 :
1846 7446 16231 : if (!parse->hasAggs && parse->groupClause == NIL)
7447 : {
1899 rhaas 7448 ECB : /*
7449 : * We don't know how to do parallel aggregation unless we have either
7450 : * some aggregates or a grouping clause.
7451 : */
1899 rhaas 7452 LBC 0 : return false;
1899 rhaas 7453 ECB : }
1899 rhaas 7454 CBC 16231 : else if (parse->groupingSets)
1899 rhaas 7455 ECB : {
7456 : /* We don't know how to do grouping sets in parallel. */
1899 rhaas 7457 CBC 364 : return false;
1899 rhaas 7458 ECB : }
866 heikki.linnakangas 7459 GIC 15867 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7460 : {
7461 : /* Insufficient support for partial mode. */
1899 rhaas 7462 1439 : return false;
7463 : }
7464 :
7465 : /* Everything looks good. */
7466 14428 : return true;
2011 rhaas 7467 ECB : }
1844 7468 :
7469 : /*
7470 : * apply_scanjoin_target_to_paths
7471 : *
7472 : * Adjust the final scan/join relation, and recursively all of its children,
7473 : * to generate the final scan/join target. It would be more correct to model
7474 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7475 : * for each child relation, but doing it this way is noticeably cheaper.
1837 7476 : * Maybe that problem can be solved at some point, but for now we do this.
7477 : *
7478 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7479 : * the same expressions as the existing reltarget, so we need only insert the
7480 : * appropriate sortgroupref information. By avoiding the creation of
7481 : * projection paths we save effort both immediately and at plan creation time.
7482 : */
7483 : static void
1844 rhaas 7484 CBC 232971 : apply_scanjoin_target_to_paths(PlannerInfo *root,
1844 rhaas 7485 ECB : RelOptInfo *rel,
1837 7486 : List *scanjoin_targets,
7487 : List *scanjoin_targets_contain_srfs,
7488 : bool scanjoin_target_parallel_safe,
7489 : bool tlist_same_exprs)
7490 : {
1494 tgl 7491 GIC 232971 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7492 : PathTarget *scanjoin_target;
1494 tgl 7493 ECB : ListCell *lc;
7494 :
7495 : /* This recurses, so be paranoid. */
1837 rhaas 7496 CBC 232971 : check_stack_depth();
1844 rhaas 7497 ECB :
1494 tgl 7498 : /*
7499 : * If the rel is partitioned, we want to drop its existing paths and
7500 : * generate new ones. This function would still be correct if we kept the
7501 : * existing paths: we'd modify them to generate the correct target above
7502 : * the partitioning Append, and then they'd compete on cost with paths
7503 : * generating the target below the Append. However, in our current cost
7504 : * model the latter way is always the same or cheaper cost, so modifying
7505 : * the existing paths would just be useless work. Moreover, when the cost
7506 : * is the same, varying roundoff errors might sometimes allow an existing
7507 : * path to be picked, resulting in undesirable cross-platform plan
7508 : * variations. So we drop old paths and thereby force the work to be done
7509 : * below the Append, except in the case of a non-parallel-safe target.
7510 : *
775 alvherre 7511 : * Some care is needed, because we have to allow
7512 : * generate_useful_gather_paths to see the old partial paths in the next
7513 : * stanza. Hence, zap the main pathlist here, then allow
7514 : * generate_useful_gather_paths to add path(s) to the main list, and
7515 : * finally zap the partial pathlist.
7516 : */
1494 tgl 7517 GIC 232971 : if (rel_is_partitioned)
7518 5555 : rel->pathlist = NIL;
7519 :
1844 rhaas 7520 ECB : /*
1836 7521 : * If the scan/join target is not parallel-safe, partial paths cannot
7522 : * generate it.
7523 : */
1837 rhaas 7524 GIC 232971 : if (!scanjoin_target_parallel_safe)
1837 rhaas 7525 ECB : {
7526 : /*
7527 : * Since we can't generate the final scan/join target in parallel
7528 : * workers, this is our last opportunity to use any partial paths that
1494 tgl 7529 : * exist; so build Gather path(s) that use them and emit whatever the
7530 : * current reltarget is. We don't do this in the case where the
7531 : * target is parallel-safe, since we will be able to generate superior
7532 : * paths by doing it after the final scan/join target has been
7533 : * applied.
7534 : */
1097 tomas.vondra 7535 GIC 46734 : generate_useful_gather_paths(root, rel, false);
7536 :
1836 rhaas 7537 ECB : /* Can't use parallel query above this level. */
1837 rhaas 7538 GIC 46734 : rel->partial_pathlist = NIL;
1837 rhaas 7539 CBC 46734 : rel->consider_parallel = false;
7540 : }
7541 :
7542 : /* Finish dropping old paths for a partitioned rel, per comment above */
1494 tgl 7543 232971 : if (rel_is_partitioned)
1837 rhaas 7544 GIC 5555 : rel->partial_pathlist = NIL;
1837 rhaas 7545 ECB :
7546 : /* Extract SRF-free scan/join target. */
1837 rhaas 7547 GIC 232971 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7548 :
7549 : /*
1494 tgl 7550 ECB : * Apply the SRF-free scan/join target to each existing path.
7551 : *
7552 : * If the tlist exprs are the same, we can just inject the sortgroupref
7553 : * information into the existing pathtargets. Otherwise, replace each
7554 : * path with a projection path that generates the SRF-free scan/join
7555 : * target. This can't change the ordering of paths within rel->pathlist,
7556 : * so we just modify the list in place.
7557 : */
1844 rhaas 7558 GIC 473254 : foreach(lc, rel->pathlist)
1844 rhaas 7559 ECB : {
1844 rhaas 7560 GIC 240283 : Path *subpath = (Path *) lfirst(lc);
1844 rhaas 7561 ECB :
7562 : /* Shouldn't have any parameterized paths anymore */
1844 rhaas 7563 GIC 240283 : Assert(subpath->param_info == NULL);
7564 :
1837 rhaas 7565 CBC 240283 : if (tlist_same_exprs)
1837 rhaas 7566 GIC 77488 : subpath->pathtarget->sortgrouprefs =
7567 77488 : scanjoin_target->sortgrouprefs;
1844 rhaas 7568 ECB : else
7569 : {
7570 : Path *newpath;
7571 :
1844 rhaas 7572 GIC 162795 : newpath = (Path *) create_projection_path(root, rel, subpath,
7573 : scanjoin_target);
7574 162795 : lfirst(lc) = newpath;
7575 : }
7576 : }
1844 rhaas 7577 ECB :
1494 tgl 7578 EUB : /* Likewise adjust the targets for any partial paths. */
1837 rhaas 7579 GBC 241857 : foreach(lc, rel->partial_pathlist)
7580 : {
1837 rhaas 7581 GIC 8886 : Path *subpath = (Path *) lfirst(lc);
7582 :
7583 : /* Shouldn't have any parameterized paths anymore */
7584 8886 : Assert(subpath->param_info == NULL);
7585 :
1837 rhaas 7586 CBC 8886 : if (tlist_same_exprs)
7587 7308 : subpath->pathtarget->sortgrouprefs =
1837 rhaas 7588 GIC 7308 : scanjoin_target->sortgrouprefs;
7589 : else
7590 : {
7591 : Path *newpath;
7592 :
1494 tgl 7593 GBC 1578 : newpath = (Path *) create_projection_path(root, rel, subpath,
7594 : scanjoin_target);
1844 rhaas 7595 GIC 1578 : lfirst(lc) = newpath;
7596 : }
7597 : }
7598 :
7599 : /*
7600 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
1494 tgl 7601 ECB : * atop each existing path. (Note that this function doesn't look at the
7602 : * cheapest-path fields, which is a good thing because they're bogus right
7603 : * now.)
7604 : */
1837 rhaas 7605 GIC 232971 : if (root->parse->hasTargetSRFs)
1837 rhaas 7606 CBC 3246 : adjust_paths_for_srfs(root, rel,
1837 rhaas 7607 ECB : scanjoin_targets,
7608 : scanjoin_targets_contain_srfs);
7609 :
7610 : /*
7611 : * Update the rel's target to be the final (with SRFs) scan/join target.
7612 : * This now matches the actual output of all the paths, and we might get
7613 : * confused in createplan.c if they don't agree. We must do this now so
1494 tgl 7614 : * that any append paths made in the next part will use the correct
7615 : * pathtarget (cf. create_append_path).
7616 : *
7617 : * Note that this is also necessary if GetForeignUpperPaths() gets called
7618 : * on the final scan/join relation or on any of its children, since the
7619 : * FDW might look at the rel's target to create ForeignPaths.
7620 : */
1494 tgl 7621 GIC 232971 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7622 :
7623 : /*
1494 tgl 7624 ECB : * If the relation is partitioned, recursively apply the scan/join target
7625 : * to all partitions, and generate brand-new Append paths in which the
7626 : * scan/join target is computed below the Append rather than above it.
7627 : * Since Append is not projection-capable, that might save a separate
7628 : * Result node, and it also is important for partitionwise aggregate.
7629 : */
1494 tgl 7630 GIC 232971 : if (rel_is_partitioned)
7631 : {
1837 rhaas 7632 5555 : List *live_children = NIL;
614 drowley 7633 ECB : int i;
7634 :
7635 : /* Adjust each partition. */
614 drowley 7636 GIC 5555 : i = -1;
614 drowley 7637 CBC 15669 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7638 : {
614 drowley 7639 GIC 10114 : RelOptInfo *child_rel = rel->part_rels[i];
7640 : AppendRelInfo **appinfos;
7641 : int nappinfos;
1837 rhaas 7642 10114 : List *child_scanjoin_targets = NIL;
7643 :
614 drowley 7644 10114 : Assert(child_rel != NULL);
614 drowley 7645 ECB :
7646 : /* Dummy children can be ignored. */
614 drowley 7647 CBC 10114 : if (IS_DUMMY_REL(child_rel))
1471 tgl 7648 GIC 21 : continue;
7649 :
7650 : /* Translate scan/join targets for this child. */
1837 rhaas 7651 10093 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
7652 : &nappinfos);
7653 20186 : foreach(lc, scanjoin_targets)
1837 rhaas 7654 ECB : {
1837 rhaas 7655 CBC 10093 : PathTarget *target = lfirst_node(PathTarget, lc);
7656 :
1837 rhaas 7657 GIC 10093 : target = copy_pathtarget(target);
7658 10093 : target->exprs = (List *)
7659 10093 : adjust_appendrel_attrs(root,
7660 10093 : (Node *) target->exprs,
1837 rhaas 7661 ECB : nappinfos, appinfos);
1837 rhaas 7662 GIC 10093 : child_scanjoin_targets = lappend(child_scanjoin_targets,
7663 : target);
7664 : }
7665 10093 : pfree(appinfos);
7666 :
7667 : /* Recursion does the real work. */
7668 10093 : apply_scanjoin_target_to_paths(root, child_rel,
1837 rhaas 7669 ECB : child_scanjoin_targets,
7670 : scanjoin_targets_contain_srfs,
7671 : scanjoin_target_parallel_safe,
7672 : tlist_same_exprs);
7673 :
7674 : /* Save non-dummy children for Append paths. */
1837 rhaas 7675 CBC 10093 : if (!IS_DUMMY_REL(child_rel))
1837 rhaas 7676 GIC 10093 : live_children = lappend(live_children, child_rel);
7677 : }
7678 :
7679 : /* Build new paths for this relation by appending child paths. */
1494 tgl 7680 5555 : add_paths_to_append_rel(root, rel, live_children);
7681 : }
1837 rhaas 7682 ECB :
7683 : /*
7684 : * Consider generating Gather or Gather Merge paths. We must only do this
7685 : * if the relation is parallel safe, and we don't do it for child rels to
7686 : * avoid creating multiple Gather nodes within the same plan. We must do
7687 : * this after all paths have been generated and before set_cheapest, since
7688 : * one of the generated paths may turn out to be the cheapest one.
7689 : */
1837 rhaas 7690 GIC 232971 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
1097 tomas.vondra 7691 62689 : generate_useful_gather_paths(root, rel, false);
7692 :
7693 : /*
7694 : * Reassess which paths are the cheapest, now that we've potentially added
1837 rhaas 7695 ECB : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7696 : * this relation.
7697 : */
1837 rhaas 7698 CBC 232971 : set_cheapest(rel);
1844 rhaas 7699 GIC 232971 : }
1844 rhaas 7700 ECB :
7701 : /*
7702 : * create_partitionwise_grouping_paths
7703 : *
7704 : * If the partition keys of input relation are part of the GROUP BY clause, all
7705 : * the rows belonging to a given group come from a single partition. This
7706 : * allows aggregation/grouping over a partitioned relation to be broken down
7707 : * into aggregation/grouping on each partition. This should be no worse, and
7708 : * often better, than the normal approach.
7709 : *
7710 : * However, if the GROUP BY clause does not contain all the partition keys,
7711 : * rows from a given group may be spread across multiple partitions. In that
7712 : * case, we perform partial aggregation for each group, append the results,
7713 : * and then finalize aggregation. This is less certain to win than the
7714 : * previous case. It may win if the PartialAggregate stage greatly reduces
7715 : * the number of groups, because fewer rows will pass through the Append node.
7716 : * It may lose if we have lots of small groups.
7717 : */
7718 : static void
1844 rhaas 7719 CBC 257 : create_partitionwise_grouping_paths(PlannerInfo *root,
7720 : RelOptInfo *input_rel,
7721 : RelOptInfo *grouped_rel,
1844 rhaas 7722 ECB : RelOptInfo *partially_grouped_rel,
7723 : const AggClauseCosts *agg_costs,
7724 : grouping_sets_data *gd,
7725 : PartitionwiseAggregateType patype,
7726 : GroupPathExtraData *extra)
7727 : {
1844 rhaas 7728 GIC 257 : List *grouped_live_children = NIL;
7729 257 : List *partially_grouped_live_children = NIL;
1837 7730 257 : PathTarget *target = grouped_rel->reltarget;
1752 7731 257 : bool partial_grouping_valid = true;
7732 : int i;
7733 :
1844 7734 257 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
1844 rhaas 7735 CBC 257 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
1844 rhaas 7736 ECB : partially_grouped_rel != NULL);
7737 :
7738 : /* Add paths for partitionwise aggregation/grouping. */
614 drowley 7739 GIC 257 : i = -1;
614 drowley 7740 CBC 956 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7741 : {
614 drowley 7742 GIC 699 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
7743 : PathTarget *child_target;
7744 : AppendRelInfo **appinfos;
7745 : int nappinfos;
1844 rhaas 7746 ECB : GroupPathExtraData child_extra;
7747 : RelOptInfo *child_grouped_rel;
7748 : RelOptInfo *child_partially_grouped_rel;
7749 :
614 drowley 7750 GIC 699 : Assert(child_input_rel != NULL);
7751 :
7752 : /* Dummy children can be ignored. */
7753 699 : if (IS_DUMMY_REL(child_input_rel))
1471 tgl 7754 UIC 0 : continue;
7755 :
614 drowley 7756 GIC 699 : child_target = copy_pathtarget(target);
7757 :
7758 : /*
7759 : * Copy the given "extra" structure as is and then override the
7760 : * members specific to this child.
7761 : */
1844 rhaas 7762 699 : memcpy(&child_extra, extra, sizeof(child_extra));
1844 rhaas 7763 ECB :
1844 rhaas 7764 GIC 699 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7765 : &nappinfos);
7766 :
7767 699 : child_target->exprs = (List *)
7768 699 : adjust_appendrel_attrs(root,
1844 rhaas 7769 CBC 699 : (Node *) target->exprs,
7770 : nappinfos, appinfos);
7771 :
1844 rhaas 7772 ECB : /* Translate havingQual and targetList. */
1844 rhaas 7773 CBC 699 : child_extra.havingQual = (Node *)
7774 : adjust_appendrel_attrs(root,
7775 : extra->havingQual,
7776 : nappinfos, appinfos);
1844 rhaas 7777 GIC 699 : child_extra.targetList = (List *)
7778 699 : adjust_appendrel_attrs(root,
1844 rhaas 7779 CBC 699 : (Node *) extra->targetList,
1844 rhaas 7780 ECB : nappinfos, appinfos);
7781 :
7782 : /*
7783 : * extra->patype was the value computed for our parent rel; patype is
7784 : * the value for this relation. For the child, our value is its
7785 : * parent rel's value.
7786 : */
1844 rhaas 7787 GIC 699 : child_extra.patype = patype;
1844 rhaas 7788 ECB :
7789 : /*
7790 : * Create grouping relation to hold fully aggregated grouping and/or
7791 : * aggregation paths for the child.
7792 : */
1844 rhaas 7793 CBC 699 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
7794 : child_target,
1844 rhaas 7795 GIC 699 : extra->target_parallel_safe,
7796 : child_extra.havingQual);
7797 :
7798 : /* Create grouping paths for this child relation. */
7799 699 : create_ordinary_grouping_paths(root, child_input_rel,
7800 : child_grouped_rel,
7801 : agg_costs, gd, &child_extra,
7802 : &child_partially_grouped_rel);
1844 rhaas 7803 ECB :
1844 rhaas 7804 CBC 699 : if (child_partially_grouped_rel)
7805 : {
7806 : partially_grouped_live_children =
7807 441 : lappend(partially_grouped_live_children,
7808 : child_partially_grouped_rel);
1844 rhaas 7809 ECB : }
7810 : else
1752 rhaas 7811 GIC 258 : partial_grouping_valid = false;
7812 :
1844 7813 699 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
1844 rhaas 7814 ECB : {
1844 rhaas 7815 GIC 402 : set_cheapest(child_grouped_rel);
7816 402 : grouped_live_children = lappend(grouped_live_children,
7817 : child_grouped_rel);
1844 rhaas 7818 ECB : }
7819 :
1844 rhaas 7820 GIC 699 : pfree(appinfos);
1844 rhaas 7821 ECB : }
7822 :
7823 : /*
1844 rhaas 7824 EUB : * Try to create append paths for partially grouped children. For full
7825 : * partitionwise aggregation, we might have paths in the partial_pathlist
7826 : * if parallel aggregation is possible. For partial partitionwise
7827 : * aggregation, we may have paths in both pathlist and partial_pathlist.
7828 : *
7829 : * NB: We must have a partially grouped path for every child in order to
7830 : * generate a partially grouped path for this relation.
7831 : */
1752 rhaas 7832 GBC 257 : if (partially_grouped_rel && partial_grouping_valid)
7833 : {
1752 rhaas 7834 GIC 169 : Assert(partially_grouped_live_children != NIL);
1752 rhaas 7835 EUB :
1844 rhaas 7836 GIC 169 : add_paths_to_append_rel(root, partially_grouped_rel,
7837 : partially_grouped_live_children);
7838 :
7839 : /*
1844 rhaas 7840 EUB : * We need call set_cheapest, since the finalization step will use the
7841 : * cheapest path from the rel.
7842 : */
1844 rhaas 7843 GIC 169 : if (partially_grouped_rel->pathlist)
7844 169 : set_cheapest(partially_grouped_rel);
7845 : }
7846 :
7847 : /* If possible, create append paths for fully grouped children. */
7848 257 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
7849 : {
1752 7850 142 : Assert(grouped_live_children != NIL);
1752 rhaas 7851 ECB :
1844 rhaas 7852 GIC 142 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
1752 rhaas 7853 ECB : }
1844 rhaas 7854 GIC 257 : }
1844 rhaas 7855 ECB :
7856 : /*
7857 : * group_by_has_partkey
7858 : *
7859 : * Returns true, if all the partition keys of the given relation are part of
7860 : * the GROUP BY clauses, false otherwise.
1844 rhaas 7861 EUB : */
7862 : static bool
1844 rhaas 7863 CBC 254 : group_by_has_partkey(RelOptInfo *input_rel,
7864 : List *targetList,
7865 : List *groupClause)
1844 rhaas 7866 ECB : {
1844 rhaas 7867 GIC 254 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
1844 rhaas 7868 CBC 254 : int cnt = 0;
7869 : int partnatts;
7870 :
1844 rhaas 7871 ECB : /* Input relation should be partitioned. */
1844 rhaas 7872 GIC 254 : Assert(input_rel->part_scheme);
7873 :
7874 : /* Rule out early, if there are no partition keys present. */
1844 rhaas 7875 CBC 254 : if (!input_rel->partexprs)
1844 rhaas 7876 UIC 0 : return false;
7877 :
1844 rhaas 7878 GIC 254 : partnatts = input_rel->part_scheme->partnatts;
7879 :
7880 414 : for (cnt = 0; cnt < partnatts; cnt++)
7881 : {
7882 272 : List *partexprs = input_rel->partexprs[cnt];
7883 : ListCell *lc;
7884 272 : bool found = false;
7885 :
7886 381 : foreach(lc, partexprs)
7887 : {
7888 269 : Expr *partexpr = lfirst(lc);
7889 :
7890 269 : if (list_member(groupexprs, partexpr))
7891 : {
7892 160 : found = true;
1844 rhaas 7893 CBC 160 : break;
7894 : }
7895 : }
7896 :
7897 : /*
7898 : * If none of the partition key expressions match with any of the
7899 : * GROUP BY expression, return false.
1844 rhaas 7900 ECB : */
1844 rhaas 7901 GIC 272 : if (!found)
7902 112 : return false;
7903 : }
7904 :
1844 rhaas 7905 CBC 142 : return true;
7906 : }
|