Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * For largely historical reasons, most of the routines in this module use
54 : * the passed result Path only to store their results (rows, startup_cost and
55 : * total_cost) into. All the input data they need is passed as separate
56 : * parameters, even though much of it could be extracted from the Path.
57 : * An exception is made for the cost_XXXjoin() routines, which expect all
58 : * the other fields of the passed XXXPath to be filled in, and similarly
59 : * cost_index() assumes the passed IndexPath is valid except for its output
60 : * values.
61 : *
62 : *
63 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
64 : * Portions Copyright (c) 1994, Regents of the University of California
65 : *
66 : * IDENTIFICATION
67 : * src/backend/optimizer/path/costsize.c
68 : *
69 : *-------------------------------------------------------------------------
70 : */
71 :
72 : #include "postgres.h"
73 :
74 : #ifdef _MSC_VER
75 : #include <float.h> /* for _isnan */
76 : #endif
77 : #include <math.h>
78 :
79 : #include "access/amapi.h"
80 : #include "access/htup_details.h"
81 : #include "access/tsmapi.h"
82 : #include "executor/executor.h"
83 : #include "executor/nodeHash.h"
84 : #include "miscadmin.h"
85 : #include "nodes/nodeFuncs.h"
86 : #include "optimizer/clauses.h"
87 : #include "optimizer/cost.h"
88 : #include "optimizer/pathnode.h"
89 : #include "optimizer/paths.h"
90 : #include "optimizer/placeholder.h"
91 : #include "optimizer/plancat.h"
92 : #include "optimizer/planmain.h"
93 : #include "optimizer/restrictinfo.h"
94 : #include "parser/parsetree.h"
95 : #include "utils/lsyscache.h"
96 : #include "utils/selfuncs.h"
97 : #include "utils/spccache.h"
98 : #include "utils/tuplesort.h"
99 :
100 :
101 : #define LOG2(x) (log(x) / 0.693147180559945)
102 :
103 :
104 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
105 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
106 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
107 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
108 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
109 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
110 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
111 :
112 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
113 :
114 : Cost disable_cost = 1.0e10;
115 :
116 : int max_parallel_workers_per_gather = 2;
117 :
118 : bool enable_seqscan = true;
119 : bool enable_indexscan = true;
120 : bool enable_indexonlyscan = true;
121 : bool enable_bitmapscan = true;
122 : bool enable_tidscan = true;
123 : bool enable_sort = true;
124 : bool enable_hashagg = true;
125 : bool enable_nestloop = true;
126 : bool enable_material = true;
127 : bool enable_mergejoin = true;
128 : bool enable_hashjoin = true;
129 : bool enable_gathermerge = true;
130 :
131 : typedef struct
132 : {
133 : PlannerInfo *root;
134 : QualCost total;
135 : } cost_qual_eval_context;
136 :
137 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexquals);
138 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
139 : RestrictInfo *rinfo,
140 : PathKey *pathkey);
141 : static void cost_rescan(PlannerInfo *root, Path *path,
142 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
143 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
144 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
145 : ParamPathInfo *param_info,
146 : QualCost *qpqual_cost);
147 : static bool has_indexed_join_quals(NestPath *joinpath);
148 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
149 : List *quals);
150 : static double calc_joinrel_size_estimate(PlannerInfo *root,
151 : RelOptInfo *outer_rel,
152 : RelOptInfo *inner_rel,
153 : double outer_rows,
154 : double inner_rows,
155 : SpecialJoinInfo *sjinfo,
156 : List *restrictlist);
157 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
158 : Relids outer_relids,
159 : Relids inner_relids,
160 : SpecialJoinInfo *sjinfo,
161 : List **restrictlist);
162 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
163 : static double relation_byte_size(double tuples, int width);
164 : static double page_size(double tuples, int width);
165 : static double get_parallel_divisor(Path *path);
166 :
167 :
168 : /*
169 : * clamp_row_est
170 : * Force a row-count estimate to a sane value.
171 : */
172 : double
173 169354 : clamp_row_est(double nrows)
174 : {
175 : /*
176 : * Force estimate to be at least one row, to make explain output look
177 : * better and to avoid possible divide-by-zero when interpolating costs.
178 : * Make it an integer, too.
179 : */
180 169354 : if (nrows <= 1.0)
181 62423 : nrows = 1.0;
182 : else
183 106931 : nrows = rint(nrows);
184 :
185 169354 : return nrows;
186 : }
187 :
188 :
189 : /*
190 : * cost_seqscan
191 : * Determines and returns the cost of scanning a relation sequentially.
192 : *
193 : * 'baserel' is the relation to be scanned
194 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
195 : */
196 : void
197 16197 : cost_seqscan(Path *path, PlannerInfo *root,
198 : RelOptInfo *baserel, ParamPathInfo *param_info)
199 : {
200 16197 : Cost startup_cost = 0;
201 : Cost cpu_run_cost;
202 : Cost disk_run_cost;
203 : double spc_seq_page_cost;
204 : QualCost qpqual_cost;
205 : Cost cpu_per_tuple;
206 :
207 : /* Should only be applied to base relations */
208 16197 : Assert(baserel->relid > 0);
209 16197 : Assert(baserel->rtekind == RTE_RELATION);
210 :
211 : /* Mark the path with the correct row estimate */
212 16197 : if (param_info)
213 38 : path->rows = param_info->ppi_rows;
214 : else
215 16159 : path->rows = baserel->rows;
216 :
217 16197 : if (!enable_seqscan)
218 1085 : startup_cost += disable_cost;
219 :
220 : /* fetch estimated page cost for tablespace containing table */
221 16197 : get_tablespace_page_costs(baserel->reltablespace,
222 : NULL,
223 : &spc_seq_page_cost);
224 :
225 : /*
226 : * disk costs
227 : */
228 16197 : disk_run_cost = spc_seq_page_cost * baserel->pages;
229 :
230 : /* CPU costs */
231 16197 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
232 :
233 16197 : startup_cost += qpqual_cost.startup;
234 16197 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
235 16197 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
236 : /* tlist eval costs are paid per output row, not per tuple scanned */
237 16197 : startup_cost += path->pathtarget->cost.startup;
238 16197 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
239 :
240 : /* Adjust costing for parallelism, if used. */
241 16197 : if (path->parallel_workers > 0)
242 : {
243 654 : double parallel_divisor = get_parallel_divisor(path);
244 :
245 : /* The CPU cost is divided among all the workers. */
246 654 : cpu_run_cost /= parallel_divisor;
247 :
248 : /*
249 : * It may be possible to amortize some of the I/O cost, but probably
250 : * not very much, because most operating systems already do aggressive
251 : * prefetching. For now, we assume that the disk run cost can't be
252 : * amortized at all.
253 : */
254 :
255 : /*
256 : * In the case of a parallel plan, the row count needs to represent
257 : * the number of tuples processed per worker.
258 : */
259 654 : path->rows = clamp_row_est(path->rows / parallel_divisor);
260 : }
261 :
262 16197 : path->startup_cost = startup_cost;
263 16197 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
264 16197 : }
265 :
266 : /*
267 : * cost_samplescan
268 : * Determines and returns the cost of scanning a relation using sampling.
269 : *
270 : * 'baserel' is the relation to be scanned
271 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
272 : */
273 : void
274 36 : cost_samplescan(Path *path, PlannerInfo *root,
275 : RelOptInfo *baserel, ParamPathInfo *param_info)
276 : {
277 36 : Cost startup_cost = 0;
278 36 : Cost run_cost = 0;
279 : RangeTblEntry *rte;
280 : TableSampleClause *tsc;
281 : TsmRoutine *tsm;
282 : double spc_seq_page_cost,
283 : spc_random_page_cost,
284 : spc_page_cost;
285 : QualCost qpqual_cost;
286 : Cost cpu_per_tuple;
287 :
288 : /* Should only be applied to base relations with tablesample clauses */
289 36 : Assert(baserel->relid > 0);
290 36 : rte = planner_rt_fetch(baserel->relid, root);
291 36 : Assert(rte->rtekind == RTE_RELATION);
292 36 : tsc = rte->tablesample;
293 36 : Assert(tsc != NULL);
294 36 : tsm = GetTsmRoutine(tsc->tsmhandler);
295 :
296 : /* Mark the path with the correct row estimate */
297 36 : if (param_info)
298 3 : path->rows = param_info->ppi_rows;
299 : else
300 33 : path->rows = baserel->rows;
301 :
302 : /* fetch estimated page cost for tablespace containing table */
303 36 : get_tablespace_page_costs(baserel->reltablespace,
304 : &spc_random_page_cost,
305 : &spc_seq_page_cost);
306 :
307 : /* if NextSampleBlock is used, assume random access, else sequential */
308 72 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
309 36 : spc_random_page_cost : spc_seq_page_cost;
310 :
311 : /*
312 : * disk costs (recall that baserel->pages has already been set to the
313 : * number of pages the sampling method will visit)
314 : */
315 36 : run_cost += spc_page_cost * baserel->pages;
316 :
317 : /*
318 : * CPU costs (recall that baserel->tuples has already been set to the
319 : * number of tuples the sampling method will select). Note that we ignore
320 : * execution cost of the TABLESAMPLE parameter expressions; they will be
321 : * evaluated only once per scan, and in most usages they'll likely be
322 : * simple constants anyway. We also don't charge anything for the
323 : * calculations the sampling method might do internally.
324 : */
325 36 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
326 :
327 36 : startup_cost += qpqual_cost.startup;
328 36 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
329 36 : run_cost += cpu_per_tuple * baserel->tuples;
330 : /* tlist eval costs are paid per output row, not per tuple scanned */
331 36 : startup_cost += path->pathtarget->cost.startup;
332 36 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
333 :
334 36 : path->startup_cost = startup_cost;
335 36 : path->total_cost = startup_cost + run_cost;
336 36 : }
337 :
338 : /*
339 : * cost_gather
340 : * Determines and returns the cost of gather path.
341 : *
342 : * 'rel' is the relation to be operated upon
343 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
344 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
345 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
346 : * correspond to any particular RelOptInfo.
347 : */
348 : void
349 314 : cost_gather(GatherPath *path, PlannerInfo *root,
350 : RelOptInfo *rel, ParamPathInfo *param_info,
351 : double *rows)
352 : {
353 314 : Cost startup_cost = 0;
354 314 : Cost run_cost = 0;
355 :
356 : /* Mark the path with the correct row estimate */
357 314 : if (rows)
358 56 : path->path.rows = *rows;
359 258 : else if (param_info)
360 0 : path->path.rows = param_info->ppi_rows;
361 : else
362 258 : path->path.rows = rel->rows;
363 :
364 314 : startup_cost = path->subpath->startup_cost;
365 :
366 314 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
367 :
368 : /* Parallel setup and communication cost. */
369 314 : startup_cost += parallel_setup_cost;
370 314 : run_cost += parallel_tuple_cost * path->path.rows;
371 :
372 314 : path->path.startup_cost = startup_cost;
373 314 : path->path.total_cost = (startup_cost + run_cost);
374 314 : }
375 :
376 : /*
377 : * cost_gather_merge
378 : * Determines and returns the cost of gather merge path.
379 : *
380 : * GatherMerge merges several pre-sorted input streams, using a heap that at
381 : * any given instant holds the next tuple from each stream. If there are N
382 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
383 : * startup, and then for each output tuple, about log2(N) comparisons to
384 : * replace the top heap entry with the next tuple from the same stream.
385 : */
386 : void
387 46 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
388 : RelOptInfo *rel, ParamPathInfo *param_info,
389 : Cost input_startup_cost, Cost input_total_cost,
390 : double *rows)
391 : {
392 46 : Cost startup_cost = 0;
393 46 : Cost run_cost = 0;
394 : Cost comparison_cost;
395 : double N;
396 : double logN;
397 :
398 : /* Mark the path with the correct row estimate */
399 46 : if (rows)
400 43 : path->path.rows = *rows;
401 3 : else if (param_info)
402 0 : path->path.rows = param_info->ppi_rows;
403 : else
404 3 : path->path.rows = rel->rows;
405 :
406 46 : if (!enable_gathermerge)
407 0 : startup_cost += disable_cost;
408 :
409 : /*
410 : * Add one to the number of workers to account for the leader. This might
411 : * be overgenerous since the leader will do less work than other workers
412 : * in typical cases, but we'll go with it for now.
413 : */
414 46 : Assert(path->num_workers > 0);
415 46 : N = (double) path->num_workers + 1;
416 46 : logN = LOG2(N);
417 :
418 : /* Assumed cost per tuple comparison */
419 46 : comparison_cost = 2.0 * cpu_operator_cost;
420 :
421 : /* Heap creation cost */
422 46 : startup_cost += comparison_cost * N * logN;
423 :
424 : /* Per-tuple heap maintenance cost */
425 46 : run_cost += path->path.rows * comparison_cost * logN;
426 :
427 : /* small cost for heap management, like cost_merge_append */
428 46 : run_cost += cpu_operator_cost * path->path.rows;
429 :
430 : /*
431 : * Parallel setup and communication cost. Since Gather Merge, unlike
432 : * Gather, requires us to block until a tuple is available from every
433 : * worker, we bump the IPC cost up a little bit as compared with Gather.
434 : * For lack of a better idea, charge an extra 5%.
435 : */
436 46 : startup_cost += parallel_setup_cost;
437 46 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
438 :
439 46 : path->path.startup_cost = startup_cost + input_startup_cost;
440 46 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
441 46 : }
442 :
443 : /*
444 : * cost_index
445 : * Determines and returns the cost of scanning a relation using an index.
446 : *
447 : * 'path' describes the indexscan under consideration, and is complete
448 : * except for the fields to be set by this routine
449 : * 'loop_count' is the number of repetitions of the indexscan to factor into
450 : * estimates of caching behavior
451 : *
452 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
453 : * path's indextotalcost and indexselectivity fields. These values will be
454 : * needed if the IndexPath is used in a BitmapIndexScan.
455 : *
456 : * NOTE: path->indexquals must contain only clauses usable as index
457 : * restrictions. Any additional quals evaluated as qpquals may reduce the
458 : * number of returned tuples, but they won't reduce the number of tuples
459 : * we have to fetch from the table, so they don't reduce the scan cost.
460 : */
461 : void
462 22002 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
463 : bool partial_path)
464 : {
465 22002 : IndexOptInfo *index = path->indexinfo;
466 22002 : RelOptInfo *baserel = index->rel;
467 22002 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
468 : amcostestimate_function amcostestimate;
469 : List *qpquals;
470 22002 : Cost startup_cost = 0;
471 22002 : Cost run_cost = 0;
472 22002 : Cost cpu_run_cost = 0;
473 : Cost indexStartupCost;
474 : Cost indexTotalCost;
475 : Selectivity indexSelectivity;
476 : double indexCorrelation,
477 : csquared;
478 : double spc_seq_page_cost,
479 : spc_random_page_cost;
480 : Cost min_IO_cost,
481 : max_IO_cost;
482 : QualCost qpqual_cost;
483 : Cost cpu_per_tuple;
484 : double tuples_fetched;
485 : double pages_fetched;
486 : double rand_heap_pages;
487 : double index_pages;
488 :
489 : /* Should only be applied to base relations */
490 22002 : Assert(IsA(baserel, RelOptInfo) &&
491 : IsA(index, IndexOptInfo));
492 22002 : Assert(baserel->relid > 0);
493 22002 : Assert(baserel->rtekind == RTE_RELATION);
494 :
495 : /*
496 : * Mark the path with the correct row estimate, and identify which quals
497 : * will need to be enforced as qpquals. We need not check any quals that
498 : * are implied by the index's predicate, so we can use indrestrictinfo not
499 : * baserestrictinfo as the list of relevant restriction clauses for the
500 : * rel.
501 : */
502 22002 : if (path->path.param_info)
503 : {
504 3578 : path->path.rows = path->path.param_info->ppi_rows;
505 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
506 10734 : qpquals = list_concat(
507 3578 : extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
508 : path->indexquals),
509 3578 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
510 : path->indexquals));
511 : }
512 : else
513 : {
514 18424 : path->path.rows = baserel->rows;
515 : /* qpquals come from just the rel's restriction clauses */
516 18424 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
517 : path->indexquals);
518 : }
519 :
520 22002 : if (!enable_indexscan)
521 261 : startup_cost += disable_cost;
522 : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
523 :
524 : /*
525 : * Call index-access-method-specific code to estimate the processing cost
526 : * for scanning the index, as well as the selectivity of the index (ie,
527 : * the fraction of main-table tuples we will have to retrieve) and its
528 : * correlation to the main-table tuple order. We need a cast here because
529 : * relation.h uses a weak function type to avoid including amapi.h.
530 : */
531 22002 : amcostestimate = (amcostestimate_function) index->amcostestimate;
532 22002 : amcostestimate(root, path, loop_count,
533 : &indexStartupCost, &indexTotalCost,
534 : &indexSelectivity, &indexCorrelation,
535 : &index_pages);
536 :
537 : /*
538 : * Save amcostestimate's results for possible use in bitmap scan planning.
539 : * We don't bother to save indexStartupCost or indexCorrelation, because a
540 : * bitmap scan doesn't care about either.
541 : */
542 22002 : path->indextotalcost = indexTotalCost;
543 22002 : path->indexselectivity = indexSelectivity;
544 :
545 : /* all costs for touching index itself included here */
546 22002 : startup_cost += indexStartupCost;
547 22002 : run_cost += indexTotalCost - indexStartupCost;
548 :
549 : /* estimate number of main-table tuples fetched */
550 22002 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
551 :
552 : /* fetch estimated page costs for tablespace containing table */
553 22002 : get_tablespace_page_costs(baserel->reltablespace,
554 : &spc_random_page_cost,
555 : &spc_seq_page_cost);
556 :
557 : /*----------
558 : * Estimate number of main-table pages fetched, and compute I/O cost.
559 : *
560 : * When the index ordering is uncorrelated with the table ordering,
561 : * we use an approximation proposed by Mackert and Lohman (see
562 : * index_pages_fetched() for details) to compute the number of pages
563 : * fetched, and then charge spc_random_page_cost per page fetched.
564 : *
565 : * When the index ordering is exactly correlated with the table ordering
566 : * (just after a CLUSTER, for example), the number of pages fetched should
567 : * be exactly selectivity * table_size. What's more, all but the first
568 : * will be sequential fetches, not the random fetches that occur in the
569 : * uncorrelated case. So if the number of pages is more than 1, we
570 : * ought to charge
571 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
572 : * For partially-correlated indexes, we ought to charge somewhere between
573 : * these two estimates. We currently interpolate linearly between the
574 : * estimates based on the correlation squared (XXX is that appropriate?).
575 : *
576 : * If it's an index-only scan, then we will not need to fetch any heap
577 : * pages for which the visibility map shows all tuples are visible.
578 : * Hence, reduce the estimated number of heap fetches accordingly.
579 : * We use the measured fraction of the entire heap that is all-visible,
580 : * which might not be particularly relevant to the subset of the heap
581 : * that this query will fetch; but it's not clear how to do better.
582 : *----------
583 : */
584 22002 : if (loop_count > 1)
585 : {
586 : /*
587 : * For repeated indexscans, the appropriate estimate for the
588 : * uncorrelated case is to scale up the number of tuples fetched in
589 : * the Mackert and Lohman formula by the number of scans, so that we
590 : * estimate the number of pages fetched by all the scans; then
591 : * pro-rate the costs for one scan. In this case we assume all the
592 : * fetches are random accesses.
593 : */
594 2440 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
595 : baserel->pages,
596 2440 : (double) index->pages,
597 : root);
598 :
599 2440 : if (indexonly)
600 550 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
601 :
602 2440 : rand_heap_pages = pages_fetched;
603 :
604 2440 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
605 :
606 : /*
607 : * In the perfectly correlated case, the number of pages touched by
608 : * each scan is selectivity * table_size, and we can use the Mackert
609 : * and Lohman formula at the page level to estimate how much work is
610 : * saved by caching across scans. We still assume all the fetches are
611 : * random, though, which is an overestimate that's hard to correct for
612 : * without double-counting the cache effects. (But in most cases
613 : * where such a plan is actually interesting, only one page would get
614 : * fetched per scan anyway, so it shouldn't matter much.)
615 : */
616 2440 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
617 :
618 2440 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
619 : baserel->pages,
620 2440 : (double) index->pages,
621 : root);
622 :
623 2440 : if (indexonly)
624 550 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
625 :
626 2440 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
627 : }
628 : else
629 : {
630 : /*
631 : * Normal case: apply the Mackert and Lohman formula, and then
632 : * interpolate between that and the correlation-derived result.
633 : */
634 19562 : pages_fetched = index_pages_fetched(tuples_fetched,
635 : baserel->pages,
636 19562 : (double) index->pages,
637 : root);
638 :
639 19562 : if (indexonly)
640 3008 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
641 :
642 19562 : rand_heap_pages = pages_fetched;
643 :
644 : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
645 19562 : max_IO_cost = pages_fetched * spc_random_page_cost;
646 :
647 : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
648 19562 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
649 :
650 19562 : if (indexonly)
651 3008 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
652 :
653 19562 : if (pages_fetched > 0)
654 : {
655 17964 : min_IO_cost = spc_random_page_cost;
656 17964 : if (pages_fetched > 1)
657 5985 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
658 : }
659 : else
660 1598 : min_IO_cost = 0;
661 : }
662 :
663 22002 : if (partial_path)
664 : {
665 : /*
666 : * For index only scans compute workers based on number of index pages
667 : * fetched; the number of heap pages we fetch might be so small as to
668 : * effectively rule out parallelism, which we don't want to do.
669 : */
670 6818 : if (indexonly)
671 1160 : rand_heap_pages = -1;
672 :
673 : /*
674 : * Estimate the number of parallel workers required to scan index. Use
675 : * the number of heap pages computed considering heap fetches won't be
676 : * sequential as for parallel scans the pages are accessed in random
677 : * order.
678 : */
679 6818 : path->path.parallel_workers = compute_parallel_worker(baserel,
680 : rand_heap_pages, index_pages);
681 :
682 : /*
683 : * Fall out if workers can't be assigned for parallel scan, because in
684 : * such a case this path will be rejected. So there is no benefit in
685 : * doing extra computation.
686 : */
687 6818 : if (path->path.parallel_workers <= 0)
688 28588 : return;
689 :
690 232 : path->path.parallel_aware = true;
691 : }
692 :
693 : /*
694 : * Now interpolate based on estimated index order correlation to get total
695 : * disk I/O cost for main table accesses.
696 : */
697 15416 : csquared = indexCorrelation * indexCorrelation;
698 :
699 15416 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
700 :
701 : /*
702 : * Estimate CPU costs per tuple.
703 : *
704 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
705 : * qual clauses that we have to evaluate as qpquals.
706 : */
707 15416 : cost_qual_eval(&qpqual_cost, qpquals, root);
708 :
709 15416 : startup_cost += qpqual_cost.startup;
710 15416 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
711 :
712 15416 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
713 :
714 : /* tlist eval costs are paid per output row, not per tuple scanned */
715 15416 : startup_cost += path->path.pathtarget->cost.startup;
716 15416 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
717 :
718 : /* Adjust costing for parallelism, if used. */
719 15416 : if (path->path.parallel_workers > 0)
720 : {
721 232 : double parallel_divisor = get_parallel_divisor(&path->path);
722 :
723 232 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
724 :
725 : /* The CPU cost is divided among all the workers. */
726 232 : cpu_run_cost /= parallel_divisor;
727 : }
728 :
729 15416 : run_cost += cpu_run_cost;
730 :
731 15416 : path->path.startup_cost = startup_cost;
732 15416 : path->path.total_cost = startup_cost + run_cost;
733 : }
734 :
735 : /*
736 : * extract_nonindex_conditions
737 : *
738 : * Given a list of quals to be enforced in an indexscan, extract the ones that
739 : * will have to be applied as qpquals (ie, the index machinery won't handle
740 : * them). The actual rules for this appear in create_indexscan_plan() in
741 : * createplan.c, but the full rules are fairly expensive and we don't want to
742 : * go to that much effort for index paths that don't get selected for the
743 : * final plan. So we approximate it as quals that don't appear directly in
744 : * indexquals and also are not redundant children of the same EquivalenceClass
745 : * as some indexqual. This method neglects some infrequently-relevant
746 : * considerations, specifically clauses that needn't be checked because they
747 : * are implied by an indexqual. It does not seem worth the cycles to try to
748 : * factor that in at this stage, even though createplan.c will take pains to
749 : * remove such unnecessary clauses from the qpquals list if this path is
750 : * selected for use.
751 : */
752 : static List *
753 25580 : extract_nonindex_conditions(List *qual_clauses, List *indexquals)
754 : {
755 25580 : List *result = NIL;
756 : ListCell *lc;
757 :
758 51781 : foreach(lc, qual_clauses)
759 : {
760 26201 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
761 :
762 26201 : if (rinfo->pseudoconstant)
763 262 : continue; /* we may drop pseudoconstants here */
764 25939 : if (list_member_ptr(indexquals, rinfo))
765 13550 : continue; /* simple duplicate */
766 12389 : if (is_redundant_derived_clause(rinfo, indexquals))
767 2682 : continue; /* derived from same EquivalenceClass */
768 : /* ... skip the predicate proof attempt createplan.c will try ... */
769 9707 : result = lappend(result, rinfo);
770 : }
771 25580 : return result;
772 : }
773 :
774 : /*
775 : * index_pages_fetched
776 : * Estimate the number of pages actually fetched after accounting for
777 : * cache effects.
778 : *
779 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
780 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
781 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
782 : * The Mackert and Lohman approximation is that the number of pages
783 : * fetched is
784 : * PF =
785 : * min(2TNs/(2T+Ns), T) when T <= b
786 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
787 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
788 : * where
789 : * T = # pages in table
790 : * N = # tuples in table
791 : * s = selectivity = fraction of table to be scanned
792 : * b = # buffer pages available (we include kernel space here)
793 : *
794 : * We assume that effective_cache_size is the total number of buffer pages
795 : * available for the whole query, and pro-rate that space across all the
796 : * tables in the query and the index currently under consideration. (This
797 : * ignores space needed for other indexes used by the query, but since we
798 : * don't know which indexes will get used, we can't estimate that very well;
799 : * and in any case counting all the tables may well be an overestimate, since
800 : * depending on the join plan not all the tables may be scanned concurrently.)
801 : *
802 : * The product Ns is the number of tuples fetched; we pass in that
803 : * product rather than calculating it here. "pages" is the number of pages
804 : * in the object under consideration (either an index or a table).
805 : * "index_pages" is the amount to add to the total table space, which was
806 : * computed for us by query_planner.
807 : *
808 : * Caller is expected to have ensured that tuples_fetched is greater than zero
809 : * and rounded to integer (see clamp_row_est). The result will likewise be
810 : * greater than zero and integral.
811 : */
812 : double
813 31139 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
814 : double index_pages, PlannerInfo *root)
815 : {
816 : double pages_fetched;
817 : double total_pages;
818 : double T,
819 : b;
820 :
821 : /* T is # pages in table, but don't allow it to be zero */
822 31139 : T = (pages > 1) ? (double) pages : 1.0;
823 :
824 : /* Compute number of pages assumed to be competing for cache space */
825 31139 : total_pages = root->total_table_pages + index_pages;
826 31139 : total_pages = Max(total_pages, 1.0);
827 31139 : Assert(T <= total_pages);
828 :
829 : /* b is pro-rated share of effective_cache_size */
830 31139 : b = (double) effective_cache_size * T / total_pages;
831 :
832 : /* force it positive and integral */
833 31139 : if (b <= 1.0)
834 0 : b = 1.0;
835 : else
836 31139 : b = ceil(b);
837 :
838 : /* This part is the Mackert and Lohman formula */
839 31139 : if (T <= b)
840 : {
841 31139 : pages_fetched =
842 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
843 31139 : if (pages_fetched >= T)
844 16836 : pages_fetched = T;
845 : else
846 14303 : pages_fetched = ceil(pages_fetched);
847 : }
848 : else
849 : {
850 : double lim;
851 :
852 0 : lim = (2.0 * T * b) / (2.0 * T - b);
853 0 : if (tuples_fetched <= lim)
854 : {
855 0 : pages_fetched =
856 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
857 : }
858 : else
859 : {
860 0 : pages_fetched =
861 : b + (tuples_fetched - lim) * (T - b) / T;
862 : }
863 0 : pages_fetched = ceil(pages_fetched);
864 : }
865 31139 : return pages_fetched;
866 : }
867 :
868 : /*
869 : * get_indexpath_pages
870 : * Determine the total size of the indexes used in a bitmap index path.
871 : *
872 : * Note: if the same index is used more than once in a bitmap tree, we will
873 : * count it multiple times, which perhaps is the wrong thing ... but it's
874 : * not completely clear, and detecting duplicates is difficult, so ignore it
875 : * for now.
876 : */
877 : static double
878 5126 : get_indexpath_pages(Path *bitmapqual)
879 : {
880 5126 : double result = 0;
881 : ListCell *l;
882 :
883 5126 : if (IsA(bitmapqual, BitmapAndPath))
884 : {
885 583 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
886 :
887 1749 : foreach(l, apath->bitmapquals)
888 : {
889 1166 : result += get_indexpath_pages((Path *) lfirst(l));
890 : }
891 : }
892 4543 : else if (IsA(bitmapqual, BitmapOrPath))
893 : {
894 0 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
895 :
896 0 : foreach(l, opath->bitmapquals)
897 : {
898 0 : result += get_indexpath_pages((Path *) lfirst(l));
899 : }
900 : }
901 4543 : else if (IsA(bitmapqual, IndexPath))
902 : {
903 4543 : IndexPath *ipath = (IndexPath *) bitmapqual;
904 :
905 4543 : result = (double) ipath->indexinfo->pages;
906 : }
907 : else
908 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
909 :
910 5126 : return result;
911 : }
912 :
913 : /*
914 : * cost_bitmap_heap_scan
915 : * Determines and returns the cost of scanning a relation using a bitmap
916 : * index-then-heap plan.
917 : *
918 : * 'baserel' is the relation to be scanned
919 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
920 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
921 : * 'loop_count' is the number of repetitions of the indexscan to factor into
922 : * estimates of caching behavior
923 : *
924 : * Note: the component IndexPaths in bitmapqual should have been costed
925 : * using the same loop_count.
926 : */
927 : void
928 14743 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
929 : ParamPathInfo *param_info,
930 : Path *bitmapqual, double loop_count)
931 : {
932 14743 : Cost startup_cost = 0;
933 14743 : Cost run_cost = 0;
934 : Cost indexTotalCost;
935 : QualCost qpqual_cost;
936 : Cost cpu_per_tuple;
937 : Cost cost_per_page;
938 : Cost cpu_run_cost;
939 : double tuples_fetched;
940 : double pages_fetched;
941 : double spc_seq_page_cost,
942 : spc_random_page_cost;
943 : double T;
944 :
945 : /* Should only be applied to base relations */
946 14743 : Assert(IsA(baserel, RelOptInfo));
947 14743 : Assert(baserel->relid > 0);
948 14743 : Assert(baserel->rtekind == RTE_RELATION);
949 :
950 : /* Mark the path with the correct row estimate */
951 14743 : if (param_info)
952 5282 : path->rows = param_info->ppi_rows;
953 : else
954 9461 : path->rows = baserel->rows;
955 :
956 14743 : if (!enable_bitmapscan)
957 799 : startup_cost += disable_cost;
958 :
959 14743 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
960 : loop_count, &indexTotalCost,
961 : &tuples_fetched);
962 :
963 14743 : startup_cost += indexTotalCost;
964 14743 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
965 :
966 : /* Fetch estimated page costs for tablespace containing table. */
967 14743 : get_tablespace_page_costs(baserel->reltablespace,
968 : &spc_random_page_cost,
969 : &spc_seq_page_cost);
970 :
971 : /*
972 : * For small numbers of pages we should charge spc_random_page_cost
973 : * apiece, while if nearly all the table's pages are being read, it's more
974 : * appropriate to charge spc_seq_page_cost apiece. The effect is
975 : * nonlinear, too. For lack of a better idea, interpolate like this to
976 : * determine the cost per page.
977 : */
978 14743 : if (pages_fetched >= 2.0)
979 9730 : cost_per_page = spc_random_page_cost -
980 : (spc_random_page_cost - spc_seq_page_cost)
981 4865 : * sqrt(pages_fetched / T);
982 : else
983 9878 : cost_per_page = spc_random_page_cost;
984 :
985 14743 : run_cost += pages_fetched * cost_per_page;
986 :
987 : /*
988 : * Estimate CPU costs per tuple.
989 : *
990 : * Often the indexquals don't need to be rechecked at each tuple ... but
991 : * not always, especially not if there are enough tuples involved that the
992 : * bitmaps become lossy. For the moment, just assume they will be
993 : * rechecked always. This means we charge the full freight for all the
994 : * scan clauses.
995 : */
996 14743 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
997 :
998 14743 : startup_cost += qpqual_cost.startup;
999 14743 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1000 14743 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1001 :
1002 : /* Adjust costing for parallelism, if used. */
1003 14743 : if (path->parallel_workers > 0)
1004 : {
1005 143 : double parallel_divisor = get_parallel_divisor(path);
1006 :
1007 : /* The CPU cost is divided among all the workers. */
1008 143 : cpu_run_cost /= parallel_divisor;
1009 :
1010 143 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1011 : }
1012 :
1013 :
1014 14743 : run_cost += cpu_run_cost;
1015 :
1016 : /* tlist eval costs are paid per output row, not per tuple scanned */
1017 14743 : startup_cost += path->pathtarget->cost.startup;
1018 14743 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1019 :
1020 14743 : path->startup_cost = startup_cost;
1021 14743 : path->total_cost = startup_cost + run_cost;
1022 14743 : }
1023 :
1024 : /*
1025 : * cost_bitmap_tree_node
1026 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1027 : */
1028 : void
1029 25652 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1030 : {
1031 25652 : if (IsA(path, IndexPath))
1032 : {
1033 24729 : *cost = ((IndexPath *) path)->indextotalcost;
1034 24729 : *selec = ((IndexPath *) path)->indexselectivity;
1035 :
1036 : /*
1037 : * Charge a small amount per retrieved tuple to reflect the costs of
1038 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1039 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1040 : * single tuple.
1041 : */
1042 24729 : *cost += 0.1 * cpu_operator_cost * path->rows;
1043 : }
1044 923 : else if (IsA(path, BitmapAndPath))
1045 : {
1046 817 : *cost = path->total_cost;
1047 817 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1048 : }
1049 106 : else if (IsA(path, BitmapOrPath))
1050 : {
1051 106 : *cost = path->total_cost;
1052 106 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1053 : }
1054 : else
1055 : {
1056 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1057 : *cost = *selec = 0; /* keep compiler quiet */
1058 : }
1059 25652 : }
1060 :
1061 : /*
1062 : * cost_bitmap_and_node
1063 : * Estimate the cost of a BitmapAnd node
1064 : *
1065 : * Note that this considers only the costs of index scanning and bitmap
1066 : * creation, not the eventual heap access. In that sense the object isn't
1067 : * truly a Path, but it has enough path-like properties (costs in particular)
1068 : * to warrant treating it as one. We don't bother to set the path rows field,
1069 : * however.
1070 : */
1071 : void
1072 814 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1073 : {
1074 : Cost totalCost;
1075 : Selectivity selec;
1076 : ListCell *l;
1077 :
1078 : /*
1079 : * We estimate AND selectivity on the assumption that the inputs are
1080 : * independent. This is probably often wrong, but we don't have the info
1081 : * to do better.
1082 : *
1083 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1084 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1085 : * definitely too simplistic?
1086 : */
1087 814 : totalCost = 0.0;
1088 814 : selec = 1.0;
1089 2442 : foreach(l, path->bitmapquals)
1090 : {
1091 1628 : Path *subpath = (Path *) lfirst(l);
1092 : Cost subCost;
1093 : Selectivity subselec;
1094 :
1095 1628 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1096 :
1097 1628 : selec *= subselec;
1098 :
1099 1628 : totalCost += subCost;
1100 1628 : if (l != list_head(path->bitmapquals))
1101 814 : totalCost += 100.0 * cpu_operator_cost;
1102 : }
1103 814 : path->bitmapselectivity = selec;
1104 814 : path->path.rows = 0; /* per above, not used */
1105 814 : path->path.startup_cost = totalCost;
1106 814 : path->path.total_cost = totalCost;
1107 814 : }
1108 :
1109 : /*
1110 : * cost_bitmap_or_node
1111 : * Estimate the cost of a BitmapOr node
1112 : *
1113 : * See comments for cost_bitmap_and_node.
1114 : */
1115 : void
1116 45 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1117 : {
1118 : Cost totalCost;
1119 : Selectivity selec;
1120 : ListCell *l;
1121 :
1122 : /*
1123 : * We estimate OR selectivity on the assumption that the inputs are
1124 : * non-overlapping, since that's often the case in "x IN (list)" type
1125 : * situations. Of course, we clamp to 1.0 at the end.
1126 : *
1127 : * The runtime cost of the BitmapOr itself is estimated at 100x
1128 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1129 : * definitely too simplistic? We are aware that the tbm_unions are
1130 : * optimized out when the inputs are BitmapIndexScans.
1131 : */
1132 45 : totalCost = 0.0;
1133 45 : selec = 0.0;
1134 140 : foreach(l, path->bitmapquals)
1135 : {
1136 95 : Path *subpath = (Path *) lfirst(l);
1137 : Cost subCost;
1138 : Selectivity subselec;
1139 :
1140 95 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1141 :
1142 95 : selec += subselec;
1143 :
1144 95 : totalCost += subCost;
1145 145 : if (l != list_head(path->bitmapquals) &&
1146 50 : !IsA(subpath, IndexPath))
1147 4 : totalCost += 100.0 * cpu_operator_cost;
1148 : }
1149 45 : path->bitmapselectivity = Min(selec, 1.0);
1150 45 : path->path.rows = 0; /* per above, not used */
1151 45 : path->path.startup_cost = totalCost;
1152 45 : path->path.total_cost = totalCost;
1153 45 : }
1154 :
1155 : /*
1156 : * cost_tidscan
1157 : * Determines and returns the cost of scanning a relation using TIDs.
1158 : *
1159 : * 'baserel' is the relation to be scanned
1160 : * 'tidquals' is the list of TID-checkable quals
1161 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1162 : */
1163 : void
1164 66 : cost_tidscan(Path *path, PlannerInfo *root,
1165 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1166 : {
1167 66 : Cost startup_cost = 0;
1168 66 : Cost run_cost = 0;
1169 66 : bool isCurrentOf = false;
1170 : QualCost qpqual_cost;
1171 : Cost cpu_per_tuple;
1172 : QualCost tid_qual_cost;
1173 : int ntuples;
1174 : ListCell *l;
1175 : double spc_random_page_cost;
1176 :
1177 : /* Should only be applied to base relations */
1178 66 : Assert(baserel->relid > 0);
1179 66 : Assert(baserel->rtekind == RTE_RELATION);
1180 :
1181 : /* Mark the path with the correct row estimate */
1182 66 : if (param_info)
1183 0 : path->rows = param_info->ppi_rows;
1184 : else
1185 66 : path->rows = baserel->rows;
1186 :
1187 : /* Count how many tuples we expect to retrieve */
1188 66 : ntuples = 0;
1189 134 : foreach(l, tidquals)
1190 : {
1191 68 : if (IsA(lfirst(l), ScalarArrayOpExpr))
1192 : {
1193 : /* Each element of the array yields 1 tuple */
1194 5 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
1195 5 : Node *arraynode = (Node *) lsecond(saop->args);
1196 :
1197 5 : ntuples += estimate_array_length(arraynode);
1198 : }
1199 63 : else if (IsA(lfirst(l), CurrentOfExpr))
1200 : {
1201 : /* CURRENT OF yields 1 tuple */
1202 57 : isCurrentOf = true;
1203 57 : ntuples++;
1204 : }
1205 : else
1206 : {
1207 : /* It's just CTID = something, count 1 tuple */
1208 6 : ntuples++;
1209 : }
1210 : }
1211 :
1212 : /*
1213 : * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1214 : * understands how to do it correctly. Therefore, honor enable_tidscan
1215 : * only when CURRENT OF isn't present. Also note that cost_qual_eval
1216 : * counts a CurrentOfExpr as having startup cost disable_cost, which we
1217 : * subtract off here; that's to prevent other plan types such as seqscan
1218 : * from winning.
1219 : */
1220 66 : if (isCurrentOf)
1221 : {
1222 57 : Assert(baserel->baserestrictcost.startup >= disable_cost);
1223 57 : startup_cost -= disable_cost;
1224 : }
1225 9 : else if (!enable_tidscan)
1226 0 : startup_cost += disable_cost;
1227 :
1228 : /*
1229 : * The TID qual expressions will be computed once, any other baserestrict
1230 : * quals once per retrieved tuple.
1231 : */
1232 66 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1233 :
1234 : /* fetch estimated page cost for tablespace containing table */
1235 66 : get_tablespace_page_costs(baserel->reltablespace,
1236 : &spc_random_page_cost,
1237 : NULL);
1238 :
1239 : /* disk costs --- assume each tuple on a different page */
1240 66 : run_cost += spc_random_page_cost * ntuples;
1241 :
1242 : /* Add scanning CPU costs */
1243 66 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1244 :
1245 : /* XXX currently we assume TID quals are a subset of qpquals */
1246 66 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1247 132 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1248 66 : tid_qual_cost.per_tuple;
1249 66 : run_cost += cpu_per_tuple * ntuples;
1250 :
1251 : /* tlist eval costs are paid per output row, not per tuple scanned */
1252 66 : startup_cost += path->pathtarget->cost.startup;
1253 66 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1254 :
1255 66 : path->startup_cost = startup_cost;
1256 66 : path->total_cost = startup_cost + run_cost;
1257 66 : }
1258 :
1259 : /*
1260 : * cost_subqueryscan
1261 : * Determines and returns the cost of scanning a subquery RTE.
1262 : *
1263 : * 'baserel' is the relation to be scanned
1264 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1265 : */
1266 : void
1267 1111 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1268 : RelOptInfo *baserel, ParamPathInfo *param_info)
1269 : {
1270 : Cost startup_cost;
1271 : Cost run_cost;
1272 : QualCost qpqual_cost;
1273 : Cost cpu_per_tuple;
1274 :
1275 : /* Should only be applied to base relations that are subqueries */
1276 1111 : Assert(baserel->relid > 0);
1277 1111 : Assert(baserel->rtekind == RTE_SUBQUERY);
1278 :
1279 : /* Mark the path with the correct row estimate */
1280 1111 : if (param_info)
1281 61 : path->path.rows = param_info->ppi_rows;
1282 : else
1283 1050 : path->path.rows = baserel->rows;
1284 :
1285 : /*
1286 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1287 : * any restriction clauses and tlist that will be attached to the
1288 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1289 : * projection overhead.
1290 : */
1291 1111 : path->path.startup_cost = path->subpath->startup_cost;
1292 1111 : path->path.total_cost = path->subpath->total_cost;
1293 :
1294 1111 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1295 :
1296 1111 : startup_cost = qpqual_cost.startup;
1297 1111 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1298 1111 : run_cost = cpu_per_tuple * baserel->tuples;
1299 :
1300 : /* tlist eval costs are paid per output row, not per tuple scanned */
1301 1111 : startup_cost += path->path.pathtarget->cost.startup;
1302 1111 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1303 :
1304 1111 : path->path.startup_cost += startup_cost;
1305 1111 : path->path.total_cost += startup_cost + run_cost;
1306 1111 : }
1307 :
1308 : /*
1309 : * cost_functionscan
1310 : * Determines and returns the cost of scanning a function RTE.
1311 : *
1312 : * 'baserel' is the relation to be scanned
1313 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1314 : */
1315 : void
1316 1327 : cost_functionscan(Path *path, PlannerInfo *root,
1317 : RelOptInfo *baserel, ParamPathInfo *param_info)
1318 : {
1319 1327 : Cost startup_cost = 0;
1320 1327 : Cost run_cost = 0;
1321 : QualCost qpqual_cost;
1322 : Cost cpu_per_tuple;
1323 : RangeTblEntry *rte;
1324 : QualCost exprcost;
1325 :
1326 : /* Should only be applied to base relations that are functions */
1327 1327 : Assert(baserel->relid > 0);
1328 1327 : rte = planner_rt_fetch(baserel->relid, root);
1329 1327 : Assert(rte->rtekind == RTE_FUNCTION);
1330 :
1331 : /* Mark the path with the correct row estimate */
1332 1327 : if (param_info)
1333 51 : path->rows = param_info->ppi_rows;
1334 : else
1335 1276 : path->rows = baserel->rows;
1336 :
1337 : /*
1338 : * Estimate costs of executing the function expression(s).
1339 : *
1340 : * Currently, nodeFunctionscan.c always executes the functions to
1341 : * completion before returning any rows, and caches the results in a
1342 : * tuplestore. So the function eval cost is all startup cost, and per-row
1343 : * costs are minimal.
1344 : *
1345 : * XXX in principle we ought to charge tuplestore spill costs if the
1346 : * number of rows is large. However, given how phony our rowcount
1347 : * estimates for functions tend to be, there's not a lot of point in that
1348 : * refinement right now.
1349 : */
1350 1327 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1351 :
1352 1327 : startup_cost += exprcost.startup + exprcost.per_tuple;
1353 :
1354 : /* Add scanning CPU costs */
1355 1327 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1356 :
1357 1327 : startup_cost += qpqual_cost.startup;
1358 1327 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1359 1327 : run_cost += cpu_per_tuple * baserel->tuples;
1360 :
1361 : /* tlist eval costs are paid per output row, not per tuple scanned */
1362 1327 : startup_cost += path->pathtarget->cost.startup;
1363 1327 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1364 :
1365 1327 : path->startup_cost = startup_cost;
1366 1327 : path->total_cost = startup_cost + run_cost;
1367 1327 : }
1368 :
1369 : /*
1370 : * cost_tablefuncscan
1371 : * Determines and returns the cost of scanning a table function.
1372 : *
1373 : * 'baserel' is the relation to be scanned
1374 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1375 : */
1376 : void
1377 22 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1378 : RelOptInfo *baserel, ParamPathInfo *param_info)
1379 : {
1380 22 : Cost startup_cost = 0;
1381 22 : Cost run_cost = 0;
1382 : QualCost qpqual_cost;
1383 : Cost cpu_per_tuple;
1384 : RangeTblEntry *rte;
1385 : QualCost exprcost;
1386 :
1387 : /* Should only be applied to base relations that are functions */
1388 22 : Assert(baserel->relid > 0);
1389 22 : rte = planner_rt_fetch(baserel->relid, root);
1390 22 : Assert(rte->rtekind == RTE_TABLEFUNC);
1391 :
1392 : /* Mark the path with the correct row estimate */
1393 22 : if (param_info)
1394 22 : path->rows = param_info->ppi_rows;
1395 : else
1396 0 : path->rows = baserel->rows;
1397 :
1398 : /*
1399 : * Estimate costs of executing the table func expression(s).
1400 : *
1401 : * XXX in principle we ought to charge tuplestore spill costs if the
1402 : * number of rows is large. However, given how phony our rowcount
1403 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1404 : * refinement right now.
1405 : */
1406 22 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1407 :
1408 22 : startup_cost += exprcost.startup + exprcost.per_tuple;
1409 :
1410 : /* Add scanning CPU costs */
1411 22 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1412 :
1413 22 : startup_cost += qpqual_cost.startup;
1414 22 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1415 22 : run_cost += cpu_per_tuple * baserel->tuples;
1416 :
1417 : /* tlist eval costs are paid per output row, not per tuple scanned */
1418 22 : startup_cost += path->pathtarget->cost.startup;
1419 22 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1420 :
1421 22 : path->startup_cost = startup_cost;
1422 22 : path->total_cost = startup_cost + run_cost;
1423 22 : }
1424 :
1425 : /*
1426 : * cost_valuesscan
1427 : * Determines and returns the cost of scanning a VALUES RTE.
1428 : *
1429 : * 'baserel' is the relation to be scanned
1430 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1431 : */
1432 : void
1433 463 : cost_valuesscan(Path *path, PlannerInfo *root,
1434 : RelOptInfo *baserel, ParamPathInfo *param_info)
1435 : {
1436 463 : Cost startup_cost = 0;
1437 463 : Cost run_cost = 0;
1438 : QualCost qpqual_cost;
1439 : Cost cpu_per_tuple;
1440 :
1441 : /* Should only be applied to base relations that are values lists */
1442 463 : Assert(baserel->relid > 0);
1443 463 : Assert(baserel->rtekind == RTE_VALUES);
1444 :
1445 : /* Mark the path with the correct row estimate */
1446 463 : if (param_info)
1447 6 : path->rows = param_info->ppi_rows;
1448 : else
1449 457 : path->rows = baserel->rows;
1450 :
1451 : /*
1452 : * For now, estimate list evaluation cost at one operator eval per list
1453 : * (probably pretty bogus, but is it worth being smarter?)
1454 : */
1455 463 : cpu_per_tuple = cpu_operator_cost;
1456 :
1457 : /* Add scanning CPU costs */
1458 463 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1459 :
1460 463 : startup_cost += qpqual_cost.startup;
1461 463 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1462 463 : run_cost += cpu_per_tuple * baserel->tuples;
1463 :
1464 : /* tlist eval costs are paid per output row, not per tuple scanned */
1465 463 : startup_cost += path->pathtarget->cost.startup;
1466 463 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1467 :
1468 463 : path->startup_cost = startup_cost;
1469 463 : path->total_cost = startup_cost + run_cost;
1470 463 : }
1471 :
1472 : /*
1473 : * cost_ctescan
1474 : * Determines and returns the cost of scanning a CTE RTE.
1475 : *
1476 : * Note: this is used for both self-reference and regular CTEs; the
1477 : * possible cost differences are below the threshold of what we could
1478 : * estimate accurately anyway. Note that the costs of evaluating the
1479 : * referenced CTE query are added into the final plan as initplan costs,
1480 : * and should NOT be counted here.
1481 : */
1482 : void
1483 202 : cost_ctescan(Path *path, PlannerInfo *root,
1484 : RelOptInfo *baserel, ParamPathInfo *param_info)
1485 : {
1486 202 : Cost startup_cost = 0;
1487 202 : Cost run_cost = 0;
1488 : QualCost qpqual_cost;
1489 : Cost cpu_per_tuple;
1490 :
1491 : /* Should only be applied to base relations that are CTEs */
1492 202 : Assert(baserel->relid > 0);
1493 202 : Assert(baserel->rtekind == RTE_CTE);
1494 :
1495 : /* Mark the path with the correct row estimate */
1496 202 : if (param_info)
1497 0 : path->rows = param_info->ppi_rows;
1498 : else
1499 202 : path->rows = baserel->rows;
1500 :
1501 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1502 202 : cpu_per_tuple = cpu_tuple_cost;
1503 :
1504 : /* Add scanning CPU costs */
1505 202 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1506 :
1507 202 : startup_cost += qpqual_cost.startup;
1508 202 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1509 202 : run_cost += cpu_per_tuple * baserel->tuples;
1510 :
1511 : /* tlist eval costs are paid per output row, not per tuple scanned */
1512 202 : startup_cost += path->pathtarget->cost.startup;
1513 202 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1514 :
1515 202 : path->startup_cost = startup_cost;
1516 202 : path->total_cost = startup_cost + run_cost;
1517 202 : }
1518 :
1519 : /*
1520 : * cost_namedtuplestorescan
1521 : * Determines and returns the cost of scanning a named tuplestore.
1522 : */
1523 : void
1524 43 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1525 : RelOptInfo *baserel, ParamPathInfo *param_info)
1526 : {
1527 43 : Cost startup_cost = 0;
1528 43 : Cost run_cost = 0;
1529 : QualCost qpqual_cost;
1530 : Cost cpu_per_tuple;
1531 :
1532 : /* Should only be applied to base relations that are Tuplestores */
1533 43 : Assert(baserel->relid > 0);
1534 43 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1535 :
1536 : /* Mark the path with the correct row estimate */
1537 43 : if (param_info)
1538 0 : path->rows = param_info->ppi_rows;
1539 : else
1540 43 : path->rows = baserel->rows;
1541 :
1542 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1543 43 : cpu_per_tuple = cpu_tuple_cost;
1544 :
1545 : /* Add scanning CPU costs */
1546 43 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1547 :
1548 43 : startup_cost += qpqual_cost.startup;
1549 43 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1550 43 : run_cost += cpu_per_tuple * baserel->tuples;
1551 :
1552 43 : path->startup_cost = startup_cost;
1553 43 : path->total_cost = startup_cost + run_cost;
1554 43 : }
1555 :
1556 : /*
1557 : * cost_recursive_union
1558 : * Determines and returns the cost of performing a recursive union,
1559 : * and also the estimated output size.
1560 : *
1561 : * We are given Paths for the nonrecursive and recursive terms.
1562 : */
1563 : void
1564 40 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1565 : {
1566 : Cost startup_cost;
1567 : Cost total_cost;
1568 : double total_rows;
1569 :
1570 : /* We probably have decent estimates for the non-recursive term */
1571 40 : startup_cost = nrterm->startup_cost;
1572 40 : total_cost = nrterm->total_cost;
1573 40 : total_rows = nrterm->rows;
1574 :
1575 : /*
1576 : * We arbitrarily assume that about 10 recursive iterations will be
1577 : * needed, and that we've managed to get a good fix on the cost and output
1578 : * size of each one of them. These are mighty shaky assumptions but it's
1579 : * hard to see how to do better.
1580 : */
1581 40 : total_cost += 10 * rterm->total_cost;
1582 40 : total_rows += 10 * rterm->rows;
1583 :
1584 : /*
1585 : * Also charge cpu_tuple_cost per row to account for the costs of
1586 : * manipulating the tuplestores. (We don't worry about possible
1587 : * spill-to-disk costs.)
1588 : */
1589 40 : total_cost += cpu_tuple_cost * total_rows;
1590 :
1591 40 : runion->startup_cost = startup_cost;
1592 40 : runion->total_cost = total_cost;
1593 40 : runion->rows = total_rows;
1594 40 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1595 : rterm->pathtarget->width);
1596 40 : }
1597 :
1598 : /*
1599 : * cost_sort
1600 : * Determines and returns the cost of sorting a relation, including
1601 : * the cost of reading the input data.
1602 : *
1603 : * If the total volume of data to sort is less than sort_mem, we will do
1604 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1605 : * comparisons for t tuples.
1606 : *
1607 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1608 : * algorithm. There will still be about t*log2(t) tuple comparisons in
1609 : * total, but we will also need to write and read each tuple once per
1610 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1611 : * number of initial runs formed and M is the merge order used by tuplesort.c.
1612 : * Since the average initial run should be about sort_mem, we have
1613 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1614 : * cpu = comparison_cost * t * log2(t)
1615 : *
1616 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1617 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1618 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1619 : *
1620 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1621 : * accesses (XXX can't we refine that guess?)
1622 : *
1623 : * By default, we charge two operator evals per tuple comparison, which should
1624 : * be in the right ballpark in most cases. The caller can tweak this by
1625 : * specifying nonzero comparison_cost; typically that's used for any extra
1626 : * work that has to be done to prepare the inputs to the comparison operators.
1627 : *
1628 : * 'pathkeys' is a list of sort keys
1629 : * 'input_cost' is the total cost for reading the input data
1630 : * 'tuples' is the number of tuples in the relation
1631 : * 'width' is the average tuple width in bytes
1632 : * 'comparison_cost' is the extra cost per comparison, if any
1633 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1634 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1635 : *
1636 : * NOTE: some callers currently pass NIL for pathkeys because they
1637 : * can't conveniently supply the sort keys. Since this routine doesn't
1638 : * currently do anything with pathkeys anyway, that doesn't matter...
1639 : * but if it ever does, it should react gracefully to lack of key data.
1640 : * (Actually, the thing we'd most likely be interested in is just the number
1641 : * of sort keys, which all callers *could* supply.)
1642 : */
1643 : void
1644 31343 : cost_sort(Path *path, PlannerInfo *root,
1645 : List *pathkeys, Cost input_cost, double tuples, int width,
1646 : Cost comparison_cost, int sort_mem,
1647 : double limit_tuples)
1648 : {
1649 31343 : Cost startup_cost = input_cost;
1650 31343 : Cost run_cost = 0;
1651 31343 : double input_bytes = relation_byte_size(tuples, width);
1652 : double output_bytes;
1653 : double output_tuples;
1654 31343 : long sort_mem_bytes = sort_mem * 1024L;
1655 :
1656 31343 : if (!enable_sort)
1657 44 : startup_cost += disable_cost;
1658 :
1659 31343 : path->rows = tuples;
1660 :
1661 : /*
1662 : * We want to be sure the cost of a sort is never estimated as zero, even
1663 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1664 : */
1665 31343 : if (tuples < 2.0)
1666 7868 : tuples = 2.0;
1667 :
1668 : /* Include the default cost-per-comparison */
1669 31343 : comparison_cost += 2.0 * cpu_operator_cost;
1670 :
1671 : /* Do we have a useful LIMIT? */
1672 31343 : if (limit_tuples > 0 && limit_tuples < tuples)
1673 : {
1674 55 : output_tuples = limit_tuples;
1675 55 : output_bytes = relation_byte_size(output_tuples, width);
1676 : }
1677 : else
1678 : {
1679 31288 : output_tuples = tuples;
1680 31288 : output_bytes = input_bytes;
1681 : }
1682 :
1683 31343 : if (output_bytes > sort_mem_bytes)
1684 : {
1685 : /*
1686 : * We'll have to use a disk-based sort of all the tuples
1687 : */
1688 466 : double npages = ceil(input_bytes / BLCKSZ);
1689 466 : double nruns = input_bytes / sort_mem_bytes;
1690 466 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1691 : double log_runs;
1692 : double npageaccesses;
1693 :
1694 : /*
1695 : * CPU costs
1696 : *
1697 : * Assume about N log2 N comparisons
1698 : */
1699 466 : startup_cost += comparison_cost * tuples * LOG2(tuples);
1700 :
1701 : /* Disk costs */
1702 :
1703 : /* Compute logM(r) as log(r) / log(M) */
1704 466 : if (nruns > mergeorder)
1705 350 : log_runs = ceil(log(nruns) / log(mergeorder));
1706 : else
1707 116 : log_runs = 1.0;
1708 466 : npageaccesses = 2.0 * npages * log_runs;
1709 : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1710 466 : startup_cost += npageaccesses *
1711 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1712 : }
1713 30877 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1714 : {
1715 : /*
1716 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1717 : * a total number of tuple comparisons of N log2 K; but the constant
1718 : * factor is a bit higher than for quicksort. Tweak it so that the
1719 : * cost curve is continuous at the crossover point.
1720 : */
1721 42 : startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1722 : }
1723 : else
1724 : {
1725 : /* We'll use plain quicksort on all the input tuples */
1726 30835 : startup_cost += comparison_cost * tuples * LOG2(tuples);
1727 : }
1728 :
1729 : /*
1730 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1731 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1732 : * doesn't do qual-checking or projection, so it has less overhead than
1733 : * most plan nodes. Note it's correct to use tuples not output_tuples
1734 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1735 : * counting the LIMIT otherwise.
1736 : */
1737 31343 : run_cost += cpu_operator_cost * tuples;
1738 :
1739 31343 : path->startup_cost = startup_cost;
1740 31343 : path->total_cost = startup_cost + run_cost;
1741 31343 : }
1742 :
1743 : /*
1744 : * cost_merge_append
1745 : * Determines and returns the cost of a MergeAppend node.
1746 : *
1747 : * MergeAppend merges several pre-sorted input streams, using a heap that
1748 : * at any given instant holds the next tuple from each stream. If there
1749 : * are N streams, we need about N*log2(N) tuple comparisons to construct
1750 : * the heap at startup, and then for each output tuple, about log2(N)
1751 : * comparisons to replace the top entry.
1752 : *
1753 : * (The effective value of N will drop once some of the input streams are
1754 : * exhausted, but it seems unlikely to be worth trying to account for that.)
1755 : *
1756 : * The heap is never spilled to disk, since we assume N is not very large.
1757 : * So this is much simpler than cost_sort.
1758 : *
1759 : * As in cost_sort, we charge two operator evals per tuple comparison.
1760 : *
1761 : * 'pathkeys' is a list of sort keys
1762 : * 'n_streams' is the number of input streams
1763 : * 'input_startup_cost' is the sum of the input streams' startup costs
1764 : * 'input_total_cost' is the sum of the input streams' total costs
1765 : * 'tuples' is the number of tuples in all the streams
1766 : */
1767 : void
1768 81 : cost_merge_append(Path *path, PlannerInfo *root,
1769 : List *pathkeys, int n_streams,
1770 : Cost input_startup_cost, Cost input_total_cost,
1771 : double tuples)
1772 : {
1773 81 : Cost startup_cost = 0;
1774 81 : Cost run_cost = 0;
1775 : Cost comparison_cost;
1776 : double N;
1777 : double logN;
1778 :
1779 : /*
1780 : * Avoid log(0)...
1781 : */
1782 81 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
1783 81 : logN = LOG2(N);
1784 :
1785 : /* Assumed cost per tuple comparison */
1786 81 : comparison_cost = 2.0 * cpu_operator_cost;
1787 :
1788 : /* Heap creation cost */
1789 81 : startup_cost += comparison_cost * N * logN;
1790 :
1791 : /* Per-tuple heap maintenance cost */
1792 81 : run_cost += tuples * comparison_cost * logN;
1793 :
1794 : /*
1795 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1796 : * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1797 : * node doesn't do qual-checking or projection, so it has less overhead
1798 : * than most plan nodes.
1799 : */
1800 81 : run_cost += cpu_operator_cost * tuples;
1801 :
1802 81 : path->startup_cost = startup_cost + input_startup_cost;
1803 81 : path->total_cost = startup_cost + run_cost + input_total_cost;
1804 81 : }
1805 :
1806 : /*
1807 : * cost_material
1808 : * Determines and returns the cost of materializing a relation, including
1809 : * the cost of reading the input data.
1810 : *
1811 : * If the total volume of data to materialize exceeds work_mem, we will need
1812 : * to write it to disk, so the cost is much higher in that case.
1813 : *
1814 : * Note that here we are estimating the costs for the first scan of the
1815 : * relation, so the materialization is all overhead --- any savings will
1816 : * occur only on rescan, which is estimated in cost_rescan.
1817 : */
1818 : void
1819 12697 : cost_material(Path *path,
1820 : Cost input_startup_cost, Cost input_total_cost,
1821 : double tuples, int width)
1822 : {
1823 12697 : Cost startup_cost = input_startup_cost;
1824 12697 : Cost run_cost = input_total_cost - input_startup_cost;
1825 12697 : double nbytes = relation_byte_size(tuples, width);
1826 12697 : long work_mem_bytes = work_mem * 1024L;
1827 :
1828 12697 : path->rows = tuples;
1829 :
1830 : /*
1831 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1832 : * reflect bookkeeping overhead. (This rate must be more than what
1833 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1834 : * if it is exactly the same then there will be a cost tie between
1835 : * nestloop with A outer, materialized B inner and nestloop with B outer,
1836 : * materialized A inner. The extra cost ensures we'll prefer
1837 : * materializing the smaller rel.) Note that this is normally a good deal
1838 : * less than cpu_tuple_cost; which is OK because a Material plan node
1839 : * doesn't do qual-checking or projection, so it's got less overhead than
1840 : * most plan nodes.
1841 : */
1842 12697 : run_cost += 2 * cpu_operator_cost * tuples;
1843 :
1844 : /*
1845 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
1846 : * This cost is assumed to be evenly spread through the plan run phase,
1847 : * which isn't exactly accurate but our cost model doesn't allow for
1848 : * nonuniform costs within the run phase.
1849 : */
1850 12697 : if (nbytes > work_mem_bytes)
1851 : {
1852 132 : double npages = ceil(nbytes / BLCKSZ);
1853 :
1854 132 : run_cost += seq_page_cost * npages;
1855 : }
1856 :
1857 12697 : path->startup_cost = startup_cost;
1858 12697 : path->total_cost = startup_cost + run_cost;
1859 12697 : }
1860 :
1861 : /*
1862 : * cost_agg
1863 : * Determines and returns the cost of performing an Agg plan node,
1864 : * including the cost of its input.
1865 : *
1866 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1867 : * we are using a hashed Agg node just to do grouping).
1868 : *
1869 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1870 : * are for appropriately-sorted input.
1871 : */
1872 : void
1873 3589 : cost_agg(Path *path, PlannerInfo *root,
1874 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1875 : int numGroupCols, double numGroups,
1876 : Cost input_startup_cost, Cost input_total_cost,
1877 : double input_tuples)
1878 : {
1879 : double output_tuples;
1880 : Cost startup_cost;
1881 : Cost total_cost;
1882 : AggClauseCosts dummy_aggcosts;
1883 :
1884 : /* Use all-zero per-aggregate costs if NULL is passed */
1885 3589 : if (aggcosts == NULL)
1886 : {
1887 456 : Assert(aggstrategy == AGG_HASHED);
1888 456 : MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1889 456 : aggcosts = &dummy_aggcosts;
1890 : }
1891 :
1892 : /*
1893 : * The transCost.per_tuple component of aggcosts should be charged once
1894 : * per input tuple, corresponding to the costs of evaluating the aggregate
1895 : * transfns and their input expressions (with any startup cost of course
1896 : * charged but once). The finalCost component is charged once per output
1897 : * tuple, corresponding to the costs of evaluating the finalfns.
1898 : *
1899 : * If we are grouping, we charge an additional cpu_operator_cost per
1900 : * grouping column per input tuple for grouping comparisons.
1901 : *
1902 : * We will produce a single output tuple if not grouping, and a tuple per
1903 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
1904 : *
1905 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1906 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1907 : * input path is already sorted appropriately, AGG_SORTED should be
1908 : * preferred (since it has no risk of memory overflow). This will happen
1909 : * as long as the computed total costs are indeed exactly equal --- but if
1910 : * there's roundoff error we might do the wrong thing. So be sure that
1911 : * the computations below form the same intermediate values in the same
1912 : * order.
1913 : */
1914 3589 : if (aggstrategy == AGG_PLAIN)
1915 : {
1916 2189 : startup_cost = input_total_cost;
1917 2189 : startup_cost += aggcosts->transCost.startup;
1918 2189 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1919 2189 : startup_cost += aggcosts->finalCost;
1920 : /* we aren't grouping */
1921 2189 : total_cost = startup_cost + cpu_tuple_cost;
1922 2189 : output_tuples = 1;
1923 : }
1924 1400 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
1925 : {
1926 : /* Here we are able to deliver output on-the-fly */
1927 569 : startup_cost = input_startup_cost;
1928 569 : total_cost = input_total_cost;
1929 569 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
1930 : {
1931 49 : startup_cost += disable_cost;
1932 49 : total_cost += disable_cost;
1933 : }
1934 : /* calcs phrased this way to match HASHED case, see note above */
1935 569 : total_cost += aggcosts->transCost.startup;
1936 569 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
1937 569 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1938 569 : total_cost += aggcosts->finalCost * numGroups;
1939 569 : total_cost += cpu_tuple_cost * numGroups;
1940 569 : output_tuples = numGroups;
1941 : }
1942 : else
1943 : {
1944 : /* must be AGG_HASHED */
1945 831 : startup_cost = input_total_cost;
1946 831 : if (!enable_hashagg)
1947 118 : startup_cost += disable_cost;
1948 831 : startup_cost += aggcosts->transCost.startup;
1949 831 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1950 831 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1951 831 : total_cost = startup_cost;
1952 831 : total_cost += aggcosts->finalCost * numGroups;
1953 831 : total_cost += cpu_tuple_cost * numGroups;
1954 831 : output_tuples = numGroups;
1955 : }
1956 :
1957 3589 : path->rows = output_tuples;
1958 3589 : path->startup_cost = startup_cost;
1959 3589 : path->total_cost = total_cost;
1960 3589 : }
1961 :
1962 : /*
1963 : * cost_windowagg
1964 : * Determines and returns the cost of performing a WindowAgg plan node,
1965 : * including the cost of its input.
1966 : *
1967 : * Input is assumed already properly sorted.
1968 : */
1969 : void
1970 147 : cost_windowagg(Path *path, PlannerInfo *root,
1971 : List *windowFuncs, int numPartCols, int numOrderCols,
1972 : Cost input_startup_cost, Cost input_total_cost,
1973 : double input_tuples)
1974 : {
1975 : Cost startup_cost;
1976 : Cost total_cost;
1977 : ListCell *lc;
1978 :
1979 147 : startup_cost = input_startup_cost;
1980 147 : total_cost = input_total_cost;
1981 :
1982 : /*
1983 : * Window functions are assumed to cost their stated execution cost, plus
1984 : * the cost of evaluating their input expressions, per tuple. Since they
1985 : * may in fact evaluate their inputs at multiple rows during each cycle,
1986 : * this could be a drastic underestimate; but without a way to know how
1987 : * many rows the window function will fetch, it's hard to do better. In
1988 : * any case, it's a good estimate for all the built-in window functions,
1989 : * so we'll just do this for now.
1990 : */
1991 319 : foreach(lc, windowFuncs)
1992 : {
1993 172 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
1994 : Cost wfunccost;
1995 : QualCost argcosts;
1996 :
1997 172 : wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1998 :
1999 : /* also add the input expressions' cost to per-input-row costs */
2000 172 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
2001 172 : startup_cost += argcosts.startup;
2002 172 : wfunccost += argcosts.per_tuple;
2003 :
2004 : /*
2005 : * Add the filter's cost to per-input-row costs. XXX We should reduce
2006 : * input expression costs according to filter selectivity.
2007 : */
2008 172 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
2009 172 : startup_cost += argcosts.startup;
2010 172 : wfunccost += argcosts.per_tuple;
2011 :
2012 172 : total_cost += wfunccost * input_tuples;
2013 : }
2014 :
2015 : /*
2016 : * We also charge cpu_operator_cost per grouping column per tuple for
2017 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
2018 : * overhead.
2019 : *
2020 : * XXX this neglects costs of spooling the data to disk when it overflows
2021 : * work_mem. Sooner or later that should get accounted for.
2022 : */
2023 147 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
2024 147 : total_cost += cpu_tuple_cost * input_tuples;
2025 :
2026 147 : path->rows = input_tuples;
2027 147 : path->startup_cost = startup_cost;
2028 147 : path->total_cost = total_cost;
2029 147 : }
2030 :
2031 : /*
2032 : * cost_group
2033 : * Determines and returns the cost of performing a Group plan node,
2034 : * including the cost of its input.
2035 : *
2036 : * Note: caller must ensure that input costs are for appropriately-sorted
2037 : * input.
2038 : */
2039 : void
2040 130 : cost_group(Path *path, PlannerInfo *root,
2041 : int numGroupCols, double numGroups,
2042 : Cost input_startup_cost, Cost input_total_cost,
2043 : double input_tuples)
2044 : {
2045 : Cost startup_cost;
2046 : Cost total_cost;
2047 :
2048 130 : startup_cost = input_startup_cost;
2049 130 : total_cost = input_total_cost;
2050 :
2051 : /*
2052 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
2053 : * all columns get compared at most of the tuples.
2054 : */
2055 130 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
2056 :
2057 130 : path->rows = numGroups;
2058 130 : path->startup_cost = startup_cost;
2059 130 : path->total_cost = total_cost;
2060 130 : }
2061 :
2062 : /*
2063 : * initial_cost_nestloop
2064 : * Preliminary estimate of the cost of a nestloop join path.
2065 : *
2066 : * This must quickly produce lower-bound estimates of the path's startup and
2067 : * total costs. If we are unable to eliminate the proposed path from
2068 : * consideration using the lower bounds, final_cost_nestloop will be called
2069 : * to obtain the final estimates.
2070 : *
2071 : * The exact division of labor between this function and final_cost_nestloop
2072 : * is private to them, and represents a tradeoff between speed of the initial
2073 : * estimate and getting a tight lower bound. We choose to not examine the
2074 : * join quals here, since that's by far the most expensive part of the
2075 : * calculations. The end result is that CPU-cost considerations must be
2076 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
2077 : * incorporation of the inner path's run cost.
2078 : *
2079 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2080 : * other data to be used by final_cost_nestloop
2081 : * 'jointype' is the type of join to be performed
2082 : * 'outer_path' is the outer input to the join
2083 : * 'inner_path' is the inner input to the join
2084 : * 'extra' contains miscellaneous information about the join
2085 : */
2086 : void
2087 49901 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
2088 : JoinType jointype,
2089 : Path *outer_path, Path *inner_path,
2090 : JoinPathExtraData *extra)
2091 : {
2092 49901 : Cost startup_cost = 0;
2093 49901 : Cost run_cost = 0;
2094 49901 : double outer_path_rows = outer_path->rows;
2095 : Cost inner_rescan_start_cost;
2096 : Cost inner_rescan_total_cost;
2097 : Cost inner_run_cost;
2098 : Cost inner_rescan_run_cost;
2099 :
2100 : /* estimate costs to rescan the inner relation */
2101 49901 : cost_rescan(root, inner_path,
2102 : &inner_rescan_start_cost,
2103 : &inner_rescan_total_cost);
2104 :
2105 : /* cost of source data */
2106 :
2107 : /*
2108 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
2109 : * before we can start returning tuples, so the join's startup cost is
2110 : * their sum. We'll also pay the inner path's rescan startup cost
2111 : * multiple times.
2112 : */
2113 49901 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
2114 49901 : run_cost += outer_path->total_cost - outer_path->startup_cost;
2115 49901 : if (outer_path_rows > 1)
2116 34844 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
2117 :
2118 49901 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
2119 49901 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
2120 :
2121 98132 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
2122 48231 : extra->inner_unique)
2123 : {
2124 : /*
2125 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
2126 : * executor will stop after the first match.
2127 : *
2128 : * Getting decent estimates requires inspection of the join quals,
2129 : * which we choose to postpone to final_cost_nestloop.
2130 : */
2131 :
2132 : /* Save private data for final_cost_nestloop */
2133 20469 : workspace->inner_run_cost = inner_run_cost;
2134 20469 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
2135 : }
2136 : else
2137 : {
2138 : /* Normal case; we'll scan whole input rel for each outer row */
2139 29432 : run_cost += inner_run_cost;
2140 29432 : if (outer_path_rows > 1)
2141 21251 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
2142 : }
2143 :
2144 : /* CPU costs left for later */
2145 :
2146 : /* Public result fields */
2147 49901 : workspace->startup_cost = startup_cost;
2148 49901 : workspace->total_cost = startup_cost + run_cost;
2149 : /* Save private data for final_cost_nestloop */
2150 49901 : workspace->run_cost = run_cost;
2151 49901 : }
2152 :
2153 : /*
2154 : * final_cost_nestloop
2155 : * Final estimate of the cost and result size of a nestloop join path.
2156 : *
2157 : * 'path' is already filled in except for the rows and cost fields
2158 : * 'workspace' is the result from initial_cost_nestloop
2159 : * 'extra' contains miscellaneous information about the join
2160 : */
2161 : void
2162 25841 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
2163 : JoinCostWorkspace *workspace,
2164 : JoinPathExtraData *extra)
2165 : {
2166 25841 : Path *outer_path = path->outerjoinpath;
2167 25841 : Path *inner_path = path->innerjoinpath;
2168 25841 : double outer_path_rows = outer_path->rows;
2169 25841 : double inner_path_rows = inner_path->rows;
2170 25841 : Cost startup_cost = workspace->startup_cost;
2171 25841 : Cost run_cost = workspace->run_cost;
2172 : Cost cpu_per_tuple;
2173 : QualCost restrict_qual_cost;
2174 : double ntuples;
2175 :
2176 : /* Protect some assumptions below that rowcounts aren't zero or NaN */
2177 25841 : if (outer_path_rows <= 0 || isnan(outer_path_rows))
2178 0 : outer_path_rows = 1;
2179 25841 : if (inner_path_rows <= 0 || isnan(inner_path_rows))
2180 2 : inner_path_rows = 1;
2181 :
2182 : /* Mark the path with the correct row estimate */
2183 25841 : if (path->path.param_info)
2184 480 : path->path.rows = path->path.param_info->ppi_rows;
2185 : else
2186 25361 : path->path.rows = path->path.parent->rows;
2187 :
2188 : /* For partial paths, scale row estimate. */
2189 25841 : if (path->path.parallel_workers > 0)
2190 : {
2191 22 : double parallel_divisor = get_parallel_divisor(&path->path);
2192 :
2193 22 : path->path.rows =
2194 22 : clamp_row_est(path->path.rows / parallel_divisor);
2195 : }
2196 :
2197 : /*
2198 : * We could include disable_cost in the preliminary estimate, but that
2199 : * would amount to optimizing for the case where the join method is
2200 : * disabled, which doesn't seem like the way to bet.
2201 : */
2202 25841 : if (!enable_nestloop)
2203 73 : startup_cost += disable_cost;
2204 :
2205 : /* cost of inner-relation source data (we already dealt with outer rel) */
2206 :
2207 50332 : if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI ||
2208 24491 : extra->inner_unique)
2209 14862 : {
2210 : /*
2211 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
2212 : * executor will stop after the first match.
2213 : */
2214 14862 : Cost inner_run_cost = workspace->inner_run_cost;
2215 14862 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
2216 : double outer_matched_rows;
2217 : double outer_unmatched_rows;
2218 : Selectivity inner_scan_frac;
2219 :
2220 : /*
2221 : * For an outer-rel row that has at least one match, we can expect the
2222 : * inner scan to stop after a fraction 1/(match_count+1) of the inner
2223 : * rows, if the matches are evenly distributed. Since they probably
2224 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
2225 : * that fraction. (If we used a larger fuzz factor, we'd have to
2226 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
2227 : * least 1, no such clamp is needed now.)
2228 : */
2229 14862 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
2230 14862 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
2231 14862 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
2232 :
2233 : /*
2234 : * Compute number of tuples processed (not number emitted!). First,
2235 : * account for successfully-matched outer rows.
2236 : */
2237 14862 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
2238 :
2239 : /*
2240 : * Now we need to estimate the actual costs of scanning the inner
2241 : * relation, which may be quite a bit less than N times inner_run_cost
2242 : * due to early scan stops. We consider two cases. If the inner path
2243 : * is an indexscan using all the joinquals as indexquals, then an
2244 : * unmatched outer row results in an indexscan returning no rows,
2245 : * which is probably quite cheap. Otherwise, the executor will have
2246 : * to scan the whole inner rel for an unmatched row; not so cheap.
2247 : */
2248 14862 : if (has_indexed_join_quals(path))
2249 : {
2250 : /*
2251 : * Successfully-matched outer rows will only require scanning
2252 : * inner_scan_frac of the inner relation. In this case, we don't
2253 : * need to charge the full inner_run_cost even when that's more
2254 : * than inner_rescan_run_cost, because we can assume that none of
2255 : * the inner scans ever scan the whole inner relation. So it's
2256 : * okay to assume that all the inner scan executions can be
2257 : * fractions of the full cost, even if materialization is reducing
2258 : * the rescan cost. At this writing, it's impossible to get here
2259 : * for a materialized inner scan, so inner_run_cost and
2260 : * inner_rescan_run_cost will be the same anyway; but just in
2261 : * case, use inner_run_cost for the first matched tuple and
2262 : * inner_rescan_run_cost for additional ones.
2263 : */
2264 3535 : run_cost += inner_run_cost * inner_scan_frac;
2265 3535 : if (outer_matched_rows > 1)
2266 316 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2267 :
2268 : /*
2269 : * Add the cost of inner-scan executions for unmatched outer rows.
2270 : * We estimate this as the same cost as returning the first tuple
2271 : * of a nonempty scan. We consider that these are all rescans,
2272 : * since we used inner_run_cost once already.
2273 : */
2274 3535 : run_cost += outer_unmatched_rows *
2275 : inner_rescan_run_cost / inner_path_rows;
2276 :
2277 : /*
2278 : * We won't be evaluating any quals at all for unmatched rows, so
2279 : * don't add them to ntuples.
2280 : */
2281 : }
2282 : else
2283 : {
2284 : /*
2285 : * Here, a complicating factor is that rescans may be cheaper than
2286 : * first scans. If we never scan all the way to the end of the
2287 : * inner rel, it might be (depending on the plan type) that we'd
2288 : * never pay the whole inner first-scan run cost. However it is
2289 : * difficult to estimate whether that will happen (and it could
2290 : * not happen if there are any unmatched outer rows!), so be
2291 : * conservative and always charge the whole first-scan cost once.
2292 : * We consider this charge to correspond to the first unmatched
2293 : * outer row, unless there isn't one in our estimate, in which
2294 : * case blame it on the first matched row.
2295 : */
2296 :
2297 : /* First, count all unmatched join tuples as being processed */
2298 11327 : ntuples += outer_unmatched_rows * inner_path_rows;
2299 :
2300 : /* Now add the forced full scan, and decrement appropriate count */
2301 11327 : run_cost += inner_run_cost;
2302 11327 : if (outer_unmatched_rows >= 1)
2303 10169 : outer_unmatched_rows -= 1;
2304 : else
2305 1158 : outer_matched_rows -= 1;
2306 :
2307 : /* Add inner run cost for additional outer tuples having matches */
2308 11327 : if (outer_matched_rows > 0)
2309 3698 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
2310 :
2311 : /* Add inner run cost for additional unmatched outer tuples */
2312 11327 : if (outer_unmatched_rows > 0)
2313 6812 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
2314 : }
2315 : }
2316 : else
2317 : {
2318 : /* Normal-case source costs were included in preliminary estimate */
2319 :
2320 : /* Compute number of tuples processed (not number emitted!) */
2321 10979 : ntuples = outer_path_rows * inner_path_rows;
2322 : }
2323 :
2324 : /* CPU costs */
2325 25841 : cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
2326 25841 : startup_cost += restrict_qual_cost.startup;
2327 25841 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2328 25841 : run_cost += cpu_per_tuple * ntuples;
2329 :
2330 : /* tlist eval costs are paid per output row, not per tuple scanned */
2331 25841 : startup_cost += path->path.pathtarget->cost.startup;
2332 25841 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2333 :
2334 25841 : path->path.startup_cost = startup_cost;
2335 25841 : path->path.total_cost = startup_cost + run_cost;
2336 25841 : }
2337 :
2338 : /*
2339 : * initial_cost_mergejoin
2340 : * Preliminary estimate of the cost of a mergejoin path.
2341 : *
2342 : * This must quickly produce lower-bound estimates of the path's startup and
2343 : * total costs. If we are unable to eliminate the proposed path from
2344 : * consideration using the lower bounds, final_cost_mergejoin will be called
2345 : * to obtain the final estimates.
2346 : *
2347 : * The exact division of labor between this function and final_cost_mergejoin
2348 : * is private to them, and represents a tradeoff between speed of the initial
2349 : * estimate and getting a tight lower bound. We choose to not examine the
2350 : * join quals here, except for obtaining the scan selectivity estimate which
2351 : * is really essential (but fortunately, use of caching keeps the cost of
2352 : * getting that down to something reasonable).
2353 : * We also assume that cost_sort is cheap enough to use here.
2354 : *
2355 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2356 : * other data to be used by final_cost_mergejoin
2357 : * 'jointype' is the type of join to be performed
2358 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
2359 : * 'outer_path' is the outer input to the join
2360 : * 'inner_path' is the inner input to the join
2361 : * 'outersortkeys' is the list of sort keys for the outer path
2362 : * 'innersortkeys' is the list of sort keys for the inner path
2363 : * 'extra' contains miscellaneous information about the join
2364 : *
2365 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
2366 : * sort is needed because the respective source path is already ordered.
2367 : */
2368 : void
2369 20167 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2370 : JoinType jointype,
2371 : List *mergeclauses,
2372 : Path *outer_path, Path *inner_path,
2373 : List *outersortkeys, List *innersortkeys,
2374 : JoinPathExtraData *extra)
2375 : {
2376 20167 : Cost startup_cost = 0;
2377 20167 : Cost run_cost = 0;
2378 20167 : double outer_path_rows = outer_path->rows;
2379 20167 : double inner_path_rows = inner_path->rows;
2380 : Cost inner_run_cost;
2381 : double outer_rows,
2382 : inner_rows,
2383 : outer_skip_rows,
2384 : inner_skip_rows;
2385 : Selectivity outerstartsel,
2386 : outerendsel,
2387 : innerstartsel,
2388 : innerendsel;
2389 : Path sort_path; /* dummy for result of cost_sort */
2390 :
2391 : /* Protect some assumptions below that rowcounts aren't zero or NaN */
2392 20167 : if (outer_path_rows <= 0 || isnan(outer_path_rows))
2393 1 : outer_path_rows = 1;
2394 20167 : if (inner_path_rows <= 0 || isnan(inner_path_rows))
2395 2 : inner_path_rows = 1;
2396 :
2397 : /*
2398 : * A merge join will stop as soon as it exhausts either input stream
2399 : * (unless it's an outer join, in which case the outer side has to be
2400 : * scanned all the way anyway). Estimate fraction of the left and right
2401 : * inputs that will actually need to be scanned. Likewise, we can
2402 : * estimate the number of rows that will be skipped before the first join
2403 : * pair is found, which should be factored into startup cost. We use only
2404 : * the first (most significant) merge clause for this purpose. Since
2405 : * mergejoinscansel() is a fairly expensive computation, we cache the
2406 : * results in the merge clause RestrictInfo.
2407 : */
2408 20167 : if (mergeclauses && jointype != JOIN_FULL)
2409 20085 : {
2410 20085 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
2411 : List *opathkeys;
2412 : List *ipathkeys;
2413 : PathKey *opathkey;
2414 : PathKey *ipathkey;
2415 : MergeScanSelCache *cache;
2416 :
2417 : /* Get the input pathkeys to determine the sort-order details */
2418 20085 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
2419 20085 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
2420 20085 : Assert(opathkeys);
2421 20085 : Assert(ipathkeys);
2422 20085 : opathkey = (PathKey *) linitial(opathkeys);
2423 20085 : ipathkey = (PathKey *) linitial(ipathkeys);
2424 : /* debugging check */
2425 40170 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
2426 40170 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
2427 40170 : opathkey->pk_strategy != ipathkey->pk_strategy ||
2428 20085 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
2429 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
2430 :
2431 : /* Get the selectivity with caching */
2432 20085 : cache = cached_scansel(root, firstclause, opathkey);
2433 :
2434 20085 : if (bms_is_subset(firstclause->left_relids,
2435 20085 : outer_path->parent->relids))
2436 : {
2437 : /* left side of clause is outer */
2438 9270 : outerstartsel = cache->leftstartsel;
2439 9270 : outerendsel = cache->leftendsel;
2440 9270 : innerstartsel = cache->rightstartsel;
2441 9270 : innerendsel = cache->rightendsel;
2442 : }
2443 : else
2444 : {
2445 : /* left side of clause is inner */
2446 10815 : outerstartsel = cache->rightstartsel;
2447 10815 : outerendsel = cache->rightendsel;
2448 10815 : innerstartsel = cache->leftstartsel;
2449 10815 : innerendsel = cache->leftendsel;
2450 : }
2451 20085 : if (jointype == JOIN_LEFT ||
2452 : jointype == JOIN_ANTI)
2453 : {
2454 2473 : outerstartsel = 0.0;
2455 2473 : outerendsel = 1.0;
2456 : }
2457 17612 : else if (jointype == JOIN_RIGHT)
2458 : {
2459 2003 : innerstartsel = 0.0;
2460 2003 : innerendsel = 1.0;
2461 : }
2462 : }
2463 : else
2464 : {
2465 : /* cope with clauseless or full mergejoin */
2466 82 : outerstartsel = innerstartsel = 0.0;
2467 82 : outerendsel = innerendsel = 1.0;
2468 : }
2469 :
2470 : /*
2471 : * Convert selectivities to row counts. We force outer_rows and
2472 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
2473 : */
2474 20167 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
2475 20167 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
2476 20167 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2477 20167 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2478 :
2479 20167 : Assert(outer_skip_rows <= outer_rows);
2480 20167 : Assert(inner_skip_rows <= inner_rows);
2481 :
2482 : /*
2483 : * Readjust scan selectivities to account for above rounding. This is
2484 : * normally an insignificant effect, but when there are only a few rows in
2485 : * the inputs, failing to do this makes for a large percentage error.
2486 : */
2487 20167 : outerstartsel = outer_skip_rows / outer_path_rows;
2488 20167 : innerstartsel = inner_skip_rows / inner_path_rows;
2489 20167 : outerendsel = outer_rows / outer_path_rows;
2490 20167 : innerendsel = inner_rows / inner_path_rows;
2491 :
2492 20167 : Assert(outerstartsel <= outerendsel);
2493 20167 : Assert(innerstartsel <= innerendsel);
2494 :
2495 : /* cost of source data */
2496 :
2497 20167 : if (outersortkeys) /* do we need to sort outer? */
2498 : {
2499 21824 : cost_sort(&sort_path,
2500 : root,
2501 : outersortkeys,
2502 : outer_path->total_cost,
2503 : outer_path_rows,
2504 10912 : outer_path->pathtarget->width,
2505 : 0.0,
2506 : work_mem,
2507 : -1.0);
2508 10912 : startup_cost += sort_path.startup_cost;
2509 10912 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2510 : * outerstartsel;
2511 10912 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
2512 : * (outerendsel - outerstartsel);
2513 : }
2514 : else
2515 : {
2516 9255 : startup_cost += outer_path->startup_cost;
2517 9255 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2518 : * outerstartsel;
2519 9255 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
2520 : * (outerendsel - outerstartsel);
2521 : }
2522 :
2523 20167 : if (innersortkeys) /* do we need to sort inner? */
2524 : {
2525 33604 : cost_sort(&sort_path,
2526 : root,
2527 : innersortkeys,
2528 : inner_path->total_cost,
2529 : inner_path_rows,
2530 16802 : inner_path->pathtarget->width,
2531 : 0.0,
2532 : work_mem,
2533 : -1.0);
2534 16802 : startup_cost += sort_path.startup_cost;
2535 16802 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2536 : * innerstartsel;
2537 16802 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2538 : * (innerendsel - innerstartsel);
2539 : }
2540 : else
2541 : {
2542 3365 : startup_cost += inner_path->startup_cost;
2543 3365 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2544 : * innerstartsel;
2545 3365 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2546 : * (innerendsel - innerstartsel);
2547 : }
2548 :
2549 : /*
2550 : * We can't yet determine whether rescanning occurs, or whether
2551 : * materialization of the inner input should be done. The minimum
2552 : * possible inner input cost, regardless of rescan and materialization
2553 : * considerations, is inner_run_cost. We include that in
2554 : * workspace->total_cost, but not yet in run_cost.
2555 : */
2556 :
2557 : /* CPU costs left for later */
2558 :
2559 : /* Public result fields */
2560 20167 : workspace->startup_cost = startup_cost;
2561 20167 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2562 : /* Save private data for final_cost_mergejoin */
2563 20167 : workspace->run_cost = run_cost;
2564 20167 : workspace->inner_run_cost = inner_run_cost;
2565 20167 : workspace->outer_rows = outer_rows;
2566 20167 : workspace->inner_rows = inner_rows;
2567 20167 : workspace->outer_skip_rows = outer_skip_rows;
2568 20167 : workspace->inner_skip_rows = inner_skip_rows;
2569 20167 : }
2570 :
2571 : /*
2572 : * final_cost_mergejoin
2573 : * Final estimate of the cost and result size of a mergejoin path.
2574 : *
2575 : * Unlike other costsize functions, this routine makes two actual decisions:
2576 : * whether the executor will need to do mark/restore, and whether we should
2577 : * materialize the inner path. It would be logically cleaner to build
2578 : * separate paths testing these alternatives, but that would require repeating
2579 : * most of the cost calculations, which are not all that cheap. Since the
2580 : * choice will not affect output pathkeys or startup cost, only total cost,
2581 : * there is no possibility of wanting to keep more than one path. So it seems
2582 : * best to make the decisions here and record them in the path's
2583 : * skip_mark_restore and materialize_inner fields.
2584 : *
2585 : * Mark/restore overhead is usually required, but can be skipped if we know
2586 : * that the executor need find only one match per outer tuple, and that the
2587 : * mergeclauses are sufficient to identify a match.
2588 : *
2589 : * We materialize the inner path if we need mark/restore and either the inner
2590 : * path can't support mark/restore, or it's cheaper to use an interposed
2591 : * Material node to handle mark/restore.
2592 : *
2593 : * 'path' is already filled in except for the rows and cost fields and
2594 : * skip_mark_restore and materialize_inner
2595 : * 'workspace' is the result from initial_cost_mergejoin
2596 : * 'extra' contains miscellaneous information about the join
2597 : */
2598 : void
2599 6510 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
2600 : JoinCostWorkspace *workspace,
2601 : JoinPathExtraData *extra)
2602 : {
2603 6510 : Path *outer_path = path->jpath.outerjoinpath;
2604 6510 : Path *inner_path = path->jpath.innerjoinpath;
2605 6510 : double inner_path_rows = inner_path->rows;
2606 6510 : List *mergeclauses = path->path_mergeclauses;
2607 6510 : List *innersortkeys = path->innersortkeys;
2608 6510 : Cost startup_cost = workspace->startup_cost;
2609 6510 : Cost run_cost = workspace->run_cost;
2610 6510 : Cost inner_run_cost = workspace->inner_run_cost;
2611 6510 : double outer_rows = workspace->outer_rows;
2612 6510 : double inner_rows = workspace->inner_rows;
2613 6510 : double outer_skip_rows = workspace->outer_skip_rows;
2614 6510 : double inner_skip_rows = workspace->inner_skip_rows;
2615 : Cost cpu_per_tuple,
2616 : bare_inner_cost,
2617 : mat_inner_cost;
2618 : QualCost merge_qual_cost;
2619 : QualCost qp_qual_cost;
2620 : double mergejointuples,
2621 : rescannedtuples;
2622 : double rescanratio;
2623 :
2624 : /* Protect some assumptions below that rowcounts aren't zero or NaN */
2625 6510 : if (inner_path_rows <= 0 || isnan(inner_path_rows))
2626 2 : inner_path_rows = 1;
2627 :
2628 : /* Mark the path with the correct row estimate */
2629 6510 : if (path->jpath.path.param_info)
2630 22 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2631 : else
2632 6488 : path->jpath.path.rows = path->jpath.path.parent->rows;
2633 :
2634 : /* For partial paths, scale row estimate. */
2635 6510 : if (path->jpath.path.parallel_workers > 0)
2636 : {
2637 21 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
2638 :
2639 21 : path->jpath.path.rows =
2640 21 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
2641 : }
2642 :
2643 : /*
2644 : * We could include disable_cost in the preliminary estimate, but that
2645 : * would amount to optimizing for the case where the join method is
2646 : * disabled, which doesn't seem like the way to bet.
2647 : */
2648 6510 : if (!enable_mergejoin)
2649 0 : startup_cost += disable_cost;
2650 :
2651 : /*
2652 : * Compute cost of the mergequals and qpquals (other restriction clauses)
2653 : * separately.
2654 : */
2655 6510 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2656 6510 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2657 6510 : qp_qual_cost.startup -= merge_qual_cost.startup;
2658 6510 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2659 :
2660 : /*
2661 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
2662 : * executor will stop scanning for matches after the first match. When
2663 : * all the joinclauses are merge clauses, this means we don't ever need to
2664 : * back up the merge, and so we can skip mark/restore overhead.
2665 : */
2666 12957 : if ((path->jpath.jointype == JOIN_SEMI ||
2667 12590 : path->jpath.jointype == JOIN_ANTI ||
2668 9435 : extra->inner_unique) &&
2669 3292 : (list_length(path->jpath.joinrestrictinfo) ==
2670 3292 : list_length(path->path_mergeclauses)))
2671 2977 : path->skip_mark_restore = true;
2672 : else
2673 3533 : path->skip_mark_restore = false;
2674 :
2675 : /*
2676 : * Get approx # tuples passing the mergequals. We use approx_tuple_count
2677 : * here because we need an estimate done with JOIN_INNER semantics.
2678 : */
2679 6510 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2680 :
2681 : /*
2682 : * When there are equal merge keys in the outer relation, the mergejoin
2683 : * must rescan any matching tuples in the inner relation. This means
2684 : * re-fetching inner tuples; we have to estimate how often that happens.
2685 : *
2686 : * For regular inner and outer joins, the number of re-fetches can be
2687 : * estimated approximately as size of merge join output minus size of
2688 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
2689 : * denote the number of values of each key in the outer relation as m1,
2690 : * m2, ...; in the inner relation, n1, n2, ... Then we have
2691 : *
2692 : * size of join = m1 * n1 + m2 * n2 + ...
2693 : *
2694 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2695 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2696 : * relation
2697 : *
2698 : * This equation works correctly for outer tuples having no inner match
2699 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2700 : * are effectively subtracting those from the number of rescanned tuples,
2701 : * when we should not. Can we do better without expensive selectivity
2702 : * computations?
2703 : *
2704 : * The whole issue is moot if we are working from a unique-ified outer
2705 : * input, or if we know we don't need to mark/restore at all.
2706 : */
2707 6510 : if (IsA(outer_path, UniquePath) ||path->skip_mark_restore)
2708 2994 : rescannedtuples = 0;
2709 : else
2710 : {
2711 3516 : rescannedtuples = mergejointuples - inner_path_rows;
2712 : /* Must clamp because of possible underestimate */
2713 3516 : if (rescannedtuples < 0)
2714 949 : rescannedtuples = 0;
2715 : }
2716 : /* We'll inflate various costs this much to account for rescanning */
2717 6510 : rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2718 :
2719 : /*
2720 : * Decide whether we want to materialize the inner input to shield it from
2721 : * mark/restore and performing re-fetches. Our cost model for regular
2722 : * re-fetches is that a re-fetch costs the same as an original fetch,
2723 : * which is probably an overestimate; but on the other hand we ignore the
2724 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
2725 : * a more refined model. So we just need to inflate the inner run cost by
2726 : * rescanratio.
2727 : */
2728 6510 : bare_inner_cost = inner_run_cost * rescanratio;
2729 :
2730 : /*
2731 : * When we interpose a Material node the re-fetch cost is assumed to be
2732 : * just cpu_operator_cost per tuple, independently of the underlying
2733 : * plan's cost; and we charge an extra cpu_operator_cost per original
2734 : * fetch as well. Note that we're assuming the materialize node will
2735 : * never spill to disk, since it only has to remember tuples back to the
2736 : * last mark. (If there are a huge number of duplicates, our other cost
2737 : * factors will make the path so expensive that it probably won't get
2738 : * chosen anyway.) So we don't use cost_rescan here.
2739 : *
2740 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2741 : * of the generated Material node.
2742 : */
2743 6510 : mat_inner_cost = inner_run_cost +
2744 : cpu_operator_cost * inner_path_rows * rescanratio;
2745 :
2746 : /*
2747 : * If we don't need mark/restore at all, we don't need materialization.
2748 : */
2749 6510 : if (path->skip_mark_restore)
2750 2977 : path->materialize_inner = false;
2751 :
2752 : /*
2753 : * Prefer materializing if it looks cheaper, unless the user has asked to
2754 : * suppress materialization.
2755 : */
2756 3533 : else if (enable_material && mat_inner_cost < bare_inner_cost)
2757 138 : path->materialize_inner = true;
2758 :
2759 : /*
2760 : * Even if materializing doesn't look cheaper, we *must* do it if the
2761 : * inner path is to be used directly (without sorting) and it doesn't
2762 : * support mark/restore.
2763 : *
2764 : * Since the inner side must be ordered, and only Sorts and IndexScans can
2765 : * create order to begin with, and they both support mark/restore, you
2766 : * might think there's no problem --- but you'd be wrong. Nestloop and
2767 : * merge joins can *preserve* the order of their inputs, so they can be
2768 : * selected as the input of a mergejoin, and they don't support
2769 : * mark/restore at present.
2770 : *
2771 : * We don't test the value of enable_material here, because
2772 : * materialization is required for correctness in this case, and turning
2773 : * it off does not entitle us to deliver an invalid plan.
2774 : */
2775 3506 : else if (innersortkeys == NIL &&
2776 111 : !ExecSupportsMarkRestore(inner_path))
2777 23 : path->materialize_inner = true;
2778 :
2779 : /*
2780 : * Also, force materializing if the inner path is to be sorted and the
2781 : * sort is expected to spill to disk. This is because the final merge
2782 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
2783 : * We don't try to adjust the cost estimates for this consideration,
2784 : * though.
2785 : *
2786 : * Since materialization is a performance optimization in this case,
2787 : * rather than necessary for correctness, we skip it if enable_material is
2788 : * off.
2789 : */
2790 6656 : else if (enable_material && innersortkeys != NIL &&
2791 3284 : relation_byte_size(inner_path_rows,
2792 6568 : inner_path->pathtarget->width) >
2793 3284 : (work_mem * 1024L))
2794 9 : path->materialize_inner = true;
2795 : else
2796 3363 : path->materialize_inner = false;
2797 :
2798 : /* Charge the right incremental cost for the chosen case */
2799 6510 : if (path->materialize_inner)
2800 170 : run_cost += mat_inner_cost;
2801 : else
2802 6340 : run_cost += bare_inner_cost;
2803 :
2804 : /* CPU costs */
2805 :
2806 : /*
2807 : * The number of tuple comparisons needed is approximately number of outer
2808 : * rows plus number of inner rows plus number of rescanned tuples (can we
2809 : * refine this?). At each one, we need to evaluate the mergejoin quals.
2810 : */
2811 6510 : startup_cost += merge_qual_cost.startup;
2812 6510 : startup_cost += merge_qual_cost.per_tuple *
2813 : (outer_skip_rows + inner_skip_rows * rescanratio);
2814 6510 : run_cost += merge_qual_cost.per_tuple *
2815 : ((outer_rows - outer_skip_rows) +
2816 : (inner_rows - inner_skip_rows) * rescanratio);
2817 :
2818 : /*
2819 : * For each tuple that gets through the mergejoin proper, we charge
2820 : * cpu_tuple_cost plus the cost of evaluating additional restriction
2821 : * clauses that are to be applied at the join. (This is pessimistic since
2822 : * not all of the quals may get evaluated at each tuple.)
2823 : *
2824 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
2825 : * evaluations here, but it's probably not worth the trouble.
2826 : */
2827 6510 : startup_cost += qp_qual_cost.startup;
2828 6510 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2829 6510 : run_cost += cpu_per_tuple * mergejointuples;
2830 :
2831 : /* tlist eval costs are paid per output row, not per tuple scanned */
2832 6510 : startup_cost += path->jpath.path.pathtarget->cost.startup;
2833 6510 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2834 :
2835 6510 : path->jpath.path.startup_cost = startup_cost;
2836 6510 : path->jpath.path.total_cost = startup_cost + run_cost;
2837 6510 : }
2838 :
2839 : /*
2840 : * run mergejoinscansel() with caching
2841 : */
2842 : static MergeScanSelCache *
2843 20085 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
2844 : {
2845 : MergeScanSelCache *cache;
2846 : ListCell *lc;
2847 : Selectivity leftstartsel,
2848 : leftendsel,
2849 : rightstartsel,
2850 : rightendsel;
2851 : MemoryContext oldcontext;
2852 :
2853 : /* Do we have this result already? */
2854 20085 : foreach(lc, rinfo->scansel_cache)
2855 : {
2856 16899 : cache = (MergeScanSelCache *) lfirst(lc);
2857 33798 : if (cache->opfamily == pathkey->pk_opfamily &&
2858 33798 : cache->collation == pathkey->pk_eclass->ec_collation &&
2859 33798 : cache->strategy == pathkey->pk_strategy &&
2860 16899 : cache->nulls_first == pathkey->pk_nulls_first)
2861 16899 : return cache;
2862 : }
2863 :
2864 : /* Nope, do the computation */
2865 6372 : mergejoinscansel(root,
2866 3186 : (Node *) rinfo->clause,
2867 : pathkey->pk_opfamily,
2868 : pathkey->pk_strategy,
2869 3186 : pathkey->pk_nulls_first,
2870 : &leftstartsel,
2871 : &leftendsel,
2872 : &rightstartsel,
2873 : &rightendsel);
2874 :
2875 : /* Cache the result in suitably long-lived workspace */
2876 3186 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2877 :
2878 3186 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2879 3186 : cache->opfamily = pathkey->pk_opfamily;
2880 3186 : cache->collation = pathkey->pk_eclass->ec_collation;
2881 3186 : cache->strategy = pathkey->pk_strategy;
2882 3186 : cache->nulls_first = pathkey->pk_nulls_first;
2883 3186 : cache->leftstartsel = leftstartsel;
2884 3186 : cache->leftendsel = leftendsel;
2885 3186 : cache->rightstartsel = rightstartsel;
2886 3186 : cache->rightendsel = rightendsel;
2887 :
2888 3186 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2889 :
2890 3186 : MemoryContextSwitchTo(oldcontext);
2891 :
2892 3186 : return cache;
2893 : }
2894 :
2895 : /*
2896 : * initial_cost_hashjoin
2897 : * Preliminary estimate of the cost of a hashjoin path.
2898 : *
2899 : * This must quickly produce lower-bound estimates of the path's startup and
2900 : * total costs. If we are unable to eliminate the proposed path from
2901 : * consideration using the lower bounds, final_cost_hashjoin will be called
2902 : * to obtain the final estimates.
2903 : *
2904 : * The exact division of labor between this function and final_cost_hashjoin
2905 : * is private to them, and represents a tradeoff between speed of the initial
2906 : * estimate and getting a tight lower bound. We choose to not examine the
2907 : * join quals here (other than by counting the number of hash clauses),
2908 : * so we can't do much with CPU costs. We do assume that
2909 : * ExecChooseHashTableSize is cheap enough to use here.
2910 : *
2911 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2912 : * other data to be used by final_cost_hashjoin
2913 : * 'jointype' is the type of join to be performed
2914 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
2915 : * 'outer_path' is the outer input to the join
2916 : * 'inner_path' is the inner input to the join
2917 : * 'extra' contains miscellaneous information about the join
2918 : */
2919 : void
2920 12380 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2921 : JoinType jointype,
2922 : List *hashclauses,
2923 : Path *outer_path, Path *inner_path,
2924 : JoinPathExtraData *extra)
2925 : {
2926 12380 : Cost startup_cost = 0;
2927 12380 : Cost run_cost = 0;
2928 12380 : double outer_path_rows = outer_path->rows;
2929 12380 : double inner_path_rows = inner_path->rows;
2930 12380 : int num_hashclauses = list_length(hashclauses);
2931 : int numbuckets;
2932 : int numbatches;
2933 : int num_skew_mcvs;
2934 :
2935 : /* cost of source data */
2936 12380 : startup_cost += outer_path->startup_cost;
2937 12380 : run_cost += outer_path->total_cost - outer_path->startup_cost;
2938 12380 : startup_cost += inner_path->total_cost;
2939 :
2940 : /*
2941 : * Cost of computing hash function: must do it once per input tuple. We
2942 : * charge one cpu_operator_cost for each column's hash function. Also,
2943 : * tack on one cpu_tuple_cost per inner row, to model the costs of
2944 : * inserting the row into the hashtable.
2945 : *
2946 : * XXX when a hashclause is more complex than a single operator, we really
2947 : * should charge the extra eval costs of the left or right side, as
2948 : * appropriate, here. This seems more work than it's worth at the moment.
2949 : */
2950 12380 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2951 : * inner_path_rows;
2952 12380 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2953 :
2954 : /*
2955 : * Get hash table size that executor would use for inner relation.
2956 : *
2957 : * XXX for the moment, always assume that skew optimization will be
2958 : * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2959 : * trying to determine that for sure.
2960 : *
2961 : * XXX at some point it might be interesting to try to account for skew
2962 : * optimization in the cost estimate, but for now, we don't.
2963 : */
2964 12380 : ExecChooseHashTableSize(inner_path_rows,
2965 12380 : inner_path->pathtarget->width,
2966 : true, /* useskew */
2967 : &numbuckets,
2968 : &numbatches,
2969 : &num_skew_mcvs);
2970 :
2971 : /*
2972 : * If inner relation is too big then we will need to "batch" the join,
2973 : * which implies writing and reading most of the tuples to disk an extra
2974 : * time. Charge seq_page_cost per page, since the I/O should be nice and
2975 : * sequential. Writing the inner rel counts as startup cost, all the rest
2976 : * as run cost.
2977 : */
2978 12380 : if (numbatches > 1)
2979 : {
2980 121 : double outerpages = page_size(outer_path_rows,
2981 121 : outer_path->pathtarget->width);
2982 121 : double innerpages = page_size(inner_path_rows,
2983 121 : inner_path->pathtarget->width);
2984 :
2985 121 : startup_cost += seq_page_cost * innerpages;
2986 121 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2987 : }
2988 :
2989 : /* CPU costs left for later */
2990 :
2991 : /* Public result fields */
2992 12380 : workspace->startup_cost = startup_cost;
2993 12380 : workspace->total_cost = startup_cost + run_cost;
2994 : /* Save private data for final_cost_hashjoin */
2995 12380 : workspace->run_cost = run_cost;
2996 12380 : workspace->numbuckets = numbuckets;
2997 12380 : workspace->numbatches = numbatches;
2998 12380 : }
2999 :
3000 : /*
3001 : * final_cost_hashjoin
3002 : * Final estimate of the cost and result size of a hashjoin path.
3003 : *
3004 : * Note: the numbatches estimate is also saved into 'path' for use later
3005 : *
3006 : * 'path' is already filled in except for the rows and cost fields and
3007 : * num_batches
3008 : * 'workspace' is the result from initial_cost_hashjoin
3009 : * 'extra' contains miscellaneous information about the join
3010 : */
3011 : void
3012 7067 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
3013 : JoinCostWorkspace *workspace,
3014 : JoinPathExtraData *extra)
3015 : {
3016 7067 : Path *outer_path = path->jpath.outerjoinpath;
3017 7067 : Path *inner_path = path->jpath.innerjoinpath;
3018 7067 : double outer_path_rows = outer_path->rows;
3019 7067 : double inner_path_rows = inner_path->rows;
3020 7067 : List *hashclauses = path->path_hashclauses;
3021 7067 : Cost startup_cost = workspace->startup_cost;
3022 7067 : Cost run_cost = workspace->run_cost;
3023 7067 : int numbuckets = workspace->numbuckets;
3024 7067 : int numbatches = workspace->numbatches;
3025 : Cost cpu_per_tuple;
3026 : QualCost hash_qual_cost;
3027 : QualCost qp_qual_cost;
3028 : double hashjointuples;
3029 : double virtualbuckets;
3030 : Selectivity innerbucketsize;
3031 : Selectivity innermcvfreq;
3032 : ListCell *hcl;
3033 :
3034 : /* Mark the path with the correct row estimate */
3035 7067 : if (path->jpath.path.param_info)
3036 76 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3037 : else
3038 6991 : path->jpath.path.rows = path->jpath.path.parent->rows;
3039 :
3040 : /* For partial paths, scale row estimate. */
3041 7067 : if (path->jpath.path.parallel_workers > 0)
3042 : {
3043 17 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3044 :
3045 17 : path->jpath.path.rows =
3046 17 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3047 : }
3048 :
3049 : /*
3050 : * We could include disable_cost in the preliminary estimate, but that
3051 : * would amount to optimizing for the case where the join method is
3052 : * disabled, which doesn't seem like the way to bet.
3053 : */
3054 7067 : if (!enable_hashjoin)
3055 0 : startup_cost += disable_cost;
3056 :
3057 : /* mark the path with estimated # of batches */
3058 7067 : path->num_batches = numbatches;
3059 :
3060 : /* and compute the number of "virtual" buckets in the whole join */
3061 7067 : virtualbuckets = (double) numbuckets * (double) numbatches;
3062 :
3063 : /*
3064 : * Determine bucketsize fraction and MCV frequency for the inner relation.
3065 : * We use the smallest bucketsize or MCV frequency estimated for any
3066 : * individual hashclause; this is undoubtedly conservative.
3067 : *
3068 : * BUT: if inner relation has been unique-ified, we can assume it's good
3069 : * for hashing. This is important both because it's the right answer, and
3070 : * because we avoid contaminating the cache with a value that's wrong for
3071 : * non-unique-ified paths.
3072 : */
3073 7067 : if (IsA(inner_path, UniquePath))
3074 : {
3075 37 : innerbucketsize = 1.0 / virtualbuckets;
3076 37 : innermcvfreq = 0.0;
3077 : }
3078 : else
3079 : {
3080 7030 : innerbucketsize = 1.0;
3081 7030 : innermcvfreq = 1.0;
3082 14517 : foreach(hcl, hashclauses)
3083 : {
3084 7487 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
3085 : Selectivity thisbucketsize;
3086 : Selectivity thismcvfreq;
3087 :
3088 : /*
3089 : * First we have to figure out which side of the hashjoin clause
3090 : * is the inner side.
3091 : *
3092 : * Since we tend to visit the same clauses over and over when
3093 : * planning a large query, we cache the bucket stats estimates in
3094 : * the RestrictInfo node to avoid repeated lookups of statistics.
3095 : */
3096 7487 : if (bms_is_subset(restrictinfo->right_relids,
3097 7487 : inner_path->parent->relids))
3098 : {
3099 : /* righthand side is inner */
3100 4004 : thisbucketsize = restrictinfo->right_bucketsize;
3101 4004 : if (thisbucketsize < 0)
3102 : {
3103 : /* not cached yet */
3104 4886 : estimate_hash_bucket_stats(root,
3105 2443 : get_rightop(restrictinfo->clause),
3106 : virtualbuckets,
3107 : &restrictinfo->right_mcvfreq,
3108 : &restrictinfo->right_bucketsize);
3109 2443 : thisbucketsize = restrictinfo->right_bucketsize;
3110 : }
3111 4004 : thismcvfreq = restrictinfo->right_mcvfreq;
3112 : }
3113 : else
3114 : {
3115 3483 : Assert(bms_is_subset(restrictinfo->left_relids,
3116 : inner_path->parent->relids));
3117 : /* lefthand side is inner */
3118 3483 : thisbucketsize = restrictinfo->left_bucketsize;
3119 3483 : if (thisbucketsize < 0)
3120 : {
3121 : /* not cached yet */
3122 4178 : estimate_hash_bucket_stats(root,
3123 2089 : get_leftop(restrictinfo->clause),
3124 : virtualbuckets,
3125 : &restrictinfo->left_mcvfreq,
3126 : &restrictinfo->left_bucketsize);
3127 2089 : thisbucketsize = restrictinfo->left_bucketsize;
3128 : }
3129 3483 : thismcvfreq = restrictinfo->left_mcvfreq;
3130 : }
3131 :
3132 7487 : if (innerbucketsize > thisbucketsize)
3133 5104 : innerbucketsize = thisbucketsize;
3134 7487 : if (innermcvfreq > thismcvfreq)
3135 7005 : innermcvfreq = thismcvfreq;
3136 : }
3137 : }
3138 :
3139 : /*
3140 : * If the bucket holding the inner MCV would exceed work_mem, we don't
3141 : * want to hash unless there is really no other alternative, so apply
3142 : * disable_cost. (The executor normally copes with excessive memory usage
3143 : * by splitting batches, but obviously it cannot separate equal values
3144 : * that way, so it will be unable to drive the batch size below work_mem
3145 : * when this is true.)
3146 : */
3147 14134 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
3148 14134 : inner_path->pathtarget->width) >
3149 7067 : (work_mem * 1024L))
3150 0 : startup_cost += disable_cost;
3151 :
3152 : /*
3153 : * Compute cost of the hashquals and qpquals (other restriction clauses)
3154 : * separately.
3155 : */
3156 7067 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
3157 7067 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3158 7067 : qp_qual_cost.startup -= hash_qual_cost.startup;
3159 7067 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
3160 :
3161 : /* CPU costs */
3162 :
3163 14096 : if (path->jpath.jointype == JOIN_SEMI ||
3164 13867 : path->jpath.jointype == JOIN_ANTI ||
3165 6838 : extra->inner_unique)
3166 2642 : {
3167 : double outer_matched_rows;
3168 : Selectivity inner_scan_frac;
3169 :
3170 : /*
3171 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3172 : * executor will stop after the first match.
3173 : *
3174 : * For an outer-rel row that has at least one match, we can expect the
3175 : * bucket scan to stop after a fraction 1/(match_count+1) of the
3176 : * bucket's rows, if the matches are evenly distributed. Since they
3177 : * probably aren't quite evenly distributed, we apply a fuzz factor of
3178 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
3179 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
3180 : * at least 1, no such clamp is needed now.)
3181 : */
3182 2642 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3183 2642 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3184 :
3185 2642 : startup_cost += hash_qual_cost.startup;
3186 5284 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
3187 2642 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
3188 :
3189 : /*
3190 : * For unmatched outer-rel rows, the picture is quite a lot different.
3191 : * In the first place, there is no reason to assume that these rows
3192 : * preferentially hit heavily-populated buckets; instead assume they
3193 : * are uncorrelated with the inner distribution and so they see an
3194 : * average bucket size of inner_path_rows / virtualbuckets. In the
3195 : * second place, it seems likely that they will have few if any exact
3196 : * hash-code matches and so very few of the tuples in the bucket will
3197 : * actually require eval of the hash quals. We don't have any good
3198 : * way to estimate how many will, but for the moment assume that the
3199 : * effective cost per bucket entry is one-tenth what it is for
3200 : * matchable tuples.
3201 : */
3202 5284 : run_cost += hash_qual_cost.per_tuple *
3203 : (outer_path_rows - outer_matched_rows) *
3204 2642 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
3205 :
3206 : /* Get # of tuples that will pass the basic join */
3207 2642 : if (path->jpath.jointype == JOIN_SEMI)
3208 38 : hashjointuples = outer_matched_rows;
3209 : else
3210 2604 : hashjointuples = outer_path_rows - outer_matched_rows;
3211 : }
3212 : else
3213 : {
3214 : /*
3215 : * The number of tuple comparisons needed is the number of outer
3216 : * tuples times the typical number of tuples in a hash bucket, which
3217 : * is the inner relation size times its bucketsize fraction. At each
3218 : * one, we need to evaluate the hashjoin quals. But actually,
3219 : * charging the full qual eval cost at each tuple is pessimistic,
3220 : * since we don't evaluate the quals unless the hash values match
3221 : * exactly. For lack of a better idea, halve the cost estimate to
3222 : * allow for that.
3223 : */
3224 4425 : startup_cost += hash_qual_cost.startup;
3225 8850 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
3226 4425 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
3227 :
3228 : /*
3229 : * Get approx # tuples passing the hashquals. We use
3230 : * approx_tuple_count here because we need an estimate done with
3231 : * JOIN_INNER semantics.
3232 : */
3233 4425 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
3234 : }
3235 :
3236 : /*
3237 : * For each tuple that gets through the hashjoin proper, we charge
3238 : * cpu_tuple_cost plus the cost of evaluating additional restriction
3239 : * clauses that are to be applied at the join. (This is pessimistic since
3240 : * not all of the quals may get evaluated at each tuple.)
3241 : */
3242 7067 : startup_cost += qp_qual_cost.startup;
3243 7067 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3244 7067 : run_cost += cpu_per_tuple * hashjointuples;
3245 :
3246 : /* tlist eval costs are paid per output row, not per tuple scanned */
3247 7067 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3248 7067 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3249 :
3250 7067 : path->jpath.path.startup_cost = startup_cost;
3251 7067 : path->jpath.path.total_cost = startup_cost + run_cost;
3252 7067 : }
3253 :
3254 :
3255 : /*
3256 : * cost_subplan
3257 : * Figure the costs for a SubPlan (or initplan).
3258 : *
3259 : * Note: we could dig the subplan's Plan out of the root list, but in practice
3260 : * all callers have it handy already, so we make them pass it.
3261 : */
3262 : void
3263 1927 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
3264 : {
3265 : QualCost sp_cost;
3266 :
3267 : /* Figure any cost for evaluating the testexpr */
3268 1927 : cost_qual_eval(&sp_cost,
3269 1927 : make_ands_implicit((Expr *) subplan->testexpr),
3270 : root);
3271 :
3272 1927 : if (subplan->useHashTable)
3273 : {
3274 : /*
3275 : * If we are using a hash table for the subquery outputs, then the
3276 : * cost of evaluating the query is a one-time cost. We charge one
3277 : * cpu_operator_cost per tuple for the work of loading the hashtable,
3278 : * too.
3279 : */
3280 222 : sp_cost.startup += plan->total_cost +
3281 111 : cpu_operator_cost * plan->plan_rows;
3282 :
3283 : /*
3284 : * The per-tuple costs include the cost of evaluating the lefthand
3285 : * expressions, plus the cost of probing the hashtable. We already
3286 : * accounted for the lefthand expressions as part of the testexpr, and
3287 : * will also have counted one cpu_operator_cost for each comparison
3288 : * operator. That is probably too low for the probing cost, but it's
3289 : * hard to make a better estimate, so live with it for now.
3290 : */
3291 : }
3292 : else
3293 : {
3294 : /*
3295 : * Otherwise we will be rescanning the subplan output on each
3296 : * evaluation. We need to estimate how much of the output we will
3297 : * actually need to scan. NOTE: this logic should agree with the
3298 : * tuple_fraction estimates used by make_subplan() in
3299 : * plan/subselect.c.
3300 : */
3301 1816 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
3302 :
3303 1816 : if (subplan->subLinkType == EXISTS_SUBLINK)
3304 : {
3305 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
3306 123 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
3307 : }
3308 3384 : else if (subplan->subLinkType == ALL_SUBLINK ||
3309 1691 : subplan->subLinkType == ANY_SUBLINK)
3310 : {
3311 : /* assume we need 50% of the tuples */
3312 14 : sp_cost.per_tuple += 0.50 * plan_run_cost;
3313 : /* also charge a cpu_operator_cost per row examined */
3314 14 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
3315 : }
3316 : else
3317 : {
3318 : /* assume we need all tuples */
3319 1679 : sp_cost.per_tuple += plan_run_cost;
3320 : }
3321 :
3322 : /*
3323 : * Also account for subplan's startup cost. If the subplan is
3324 : * uncorrelated or undirect correlated, AND its topmost node is one
3325 : * that materializes its output, assume that we'll only need to pay
3326 : * its startup cost once; otherwise assume we pay the startup cost
3327 : * every time.
3328 : */
3329 2391 : if (subplan->parParam == NIL &&
3330 575 : ExecMaterializesOutput(nodeTag(plan)))
3331 42 : sp_cost.startup += plan->startup_cost;
3332 : else
3333 1774 : sp_cost.per_tuple += plan->startup_cost;
3334 : }
3335 :
3336 1927 : subplan->startup_cost = sp_cost.startup;
3337 1927 : subplan->per_call_cost = sp_cost.per_tuple;
3338 1927 : }
3339 :
3340 :
3341 : /*
3342 : * cost_rescan
3343 : * Given a finished Path, estimate the costs of rescanning it after
3344 : * having done so the first time. For some Path types a rescan is
3345 : * cheaper than an original scan (if no parameters change), and this
3346 : * function embodies knowledge about that. The default is to return
3347 : * the same costs stored in the Path. (Note that the cost estimates
3348 : * actually stored in Paths are always for first scans.)
3349 : *
3350 : * This function is not currently intended to model effects such as rescans
3351 : * being cheaper due to disk block caching; what we are concerned with is
3352 : * plan types wherein the executor caches results explicitly, or doesn't
3353 : * redo startup calculations, etc.
3354 : */
3355 : static void
3356 49901 : cost_rescan(PlannerInfo *root, Path *path,
3357 : Cost *rescan_startup_cost, /* output parameters */
3358 : Cost *rescan_total_cost)
3359 : {
3360 49901 : switch (path->pathtype)
3361 : {
3362 : case T_FunctionScan:
3363 :
3364 : /*
3365 : * Currently, nodeFunctionscan.c always executes the function to
3366 : * completion before returning any rows, and caches the results in
3367 : * a tuplestore. So the function eval cost is all startup cost
3368 : * and isn't paid over again on rescans. However, all run costs
3369 : * will be paid over again.
3370 : */
3371 470 : *rescan_startup_cost = 0;
3372 470 : *rescan_total_cost = path->total_cost - path->startup_cost;
3373 470 : break;
3374 : case T_HashJoin:
3375 :
3376 : /*
3377 : * If it's a single-batch join, we don't need to rebuild the hash
3378 : * table during a rescan.
3379 : */
3380 2937 : if (((HashPath *) path)->num_batches == 1)
3381 : {
3382 : /* Startup cost is exactly the cost of hash table building */
3383 2937 : *rescan_startup_cost = 0;
3384 2937 : *rescan_total_cost = path->total_cost - path->startup_cost;
3385 : }
3386 : else
3387 : {
3388 : /* Otherwise, no special treatment */
3389 0 : *rescan_startup_cost = path->startup_cost;
3390 0 : *rescan_total_cost = path->total_cost;
3391 : }
3392 2937 : break;
3393 : case T_CteScan:
3394 : case T_WorkTableScan:
3395 : {
3396 : /*
3397 : * These plan types materialize their final result in a
3398 : * tuplestore or tuplesort object. So the rescan cost is only
3399 : * cpu_tuple_cost per tuple, unless the result is large enough
3400 : * to spill to disk.
3401 : */
3402 80 : Cost run_cost = cpu_tuple_cost * path->rows;
3403 80 : double nbytes = relation_byte_size(path->rows,
3404 80 : path->pathtarget->width);
3405 80 : long work_mem_bytes = work_mem * 1024L;
3406 :
3407 80 : if (nbytes > work_mem_bytes)
3408 : {
3409 : /* It will spill, so account for re-read cost */
3410 0 : double npages = ceil(nbytes / BLCKSZ);
3411 :
3412 0 : run_cost += seq_page_cost * npages;
3413 : }
3414 80 : *rescan_startup_cost = 0;
3415 80 : *rescan_total_cost = run_cost;
3416 : }
3417 80 : break;
3418 : case T_Material:
3419 : case T_Sort:
3420 : {
3421 : /*
3422 : * These plan types not only materialize their results, but do
3423 : * not implement qual filtering or projection. So they are
3424 : * even cheaper to rescan than the ones above. We charge only
3425 : * cpu_operator_cost per tuple. (Note: keep that in sync with
3426 : * the run_cost charge in cost_sort, and also see comments in
3427 : * cost_material before you change it.)
3428 : */
3429 19741 : Cost run_cost = cpu_operator_cost * path->rows;
3430 19741 : double nbytes = relation_byte_size(path->rows,
3431 19741 : path->pathtarget->width);
3432 19741 : long work_mem_bytes = work_mem * 1024L;
3433 :
3434 19741 : if (nbytes > work_mem_bytes)
3435 : {
3436 : /* It will spill, so account for re-read cost */
3437 160 : double npages = ceil(nbytes / BLCKSZ);
3438 :
3439 160 : run_cost += seq_page_cost * npages;
3440 : }
3441 19741 : *rescan_startup_cost = 0;
3442 19741 : *rescan_total_cost = run_cost;
3443 : }
3444 19741 : break;
3445 : default:
3446 26673 : *rescan_startup_cost = path->startup_cost;
3447 26673 : *rescan_total_cost = path->total_cost;
3448 26673 : break;
3449 : }
3450 49901 : }
3451 :
3452 :
3453 : /*
3454 : * cost_qual_eval
3455 : * Estimate the CPU costs of evaluating a WHERE clause.
3456 : * The input can be either an implicitly-ANDed list of boolean
3457 : * expressions, or a list of RestrictInfo nodes. (The latter is
3458 : * preferred since it allows caching of the results.)
3459 : * The result includes both a one-time (startup) component,
3460 : * and a per-evaluation component.
3461 : */
3462 : void
3463 94609 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
3464 : {
3465 : cost_qual_eval_context context;
3466 : ListCell *l;
3467 :
3468 94609 : context.root = root;
3469 94609 : context.total.startup = 0;
3470 94609 : context.total.per_tuple = 0;
3471 :
3472 : /* We don't charge any cost for the implicit ANDing at top level ... */
3473 :
3474 169209 : foreach(l, quals)
3475 : {
3476 74600 : Node *qual = (Node *) lfirst(l);
3477 :
3478 74600 : cost_qual_eval_walker(qual, &context);
3479 : }
3480 :
3481 94609 : *cost = context.total;
3482 94609 : }
3483 :
3484 : /*
3485 : * cost_qual_eval_node
3486 : * As above, for a single RestrictInfo or expression.
3487 : */
3488 : void
3489 64518 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
3490 : {
3491 : cost_qual_eval_context context;
3492 :
3493 64518 : context.root = root;
3494 64518 : context.total.startup = 0;
3495 64518 : context.total.per_tuple = 0;
3496 :
3497 64518 : cost_qual_eval_walker(qual, &context);
3498 :
3499 64518 : *cost = context.total;
3500 64518 : }
3501 :
3502 : static bool
3503 257156 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
3504 : {
3505 257156 : if (node == NULL)
3506 3150 : return false;
3507 :
3508 : /*
3509 : * RestrictInfo nodes contain an eval_cost field reserved for this
3510 : * routine's use, so that it's not necessary to evaluate the qual clause's
3511 : * cost more than once. If the clause's cost hasn't been computed yet,
3512 : * the field's startup value will contain -1.
3513 : */
3514 254006 : if (IsA(node, RestrictInfo))
3515 : {
3516 80261 : RestrictInfo *rinfo = (RestrictInfo *) node;
3517 :
3518 80261 : if (rinfo->eval_cost.startup < 0)
3519 : {
3520 : cost_qual_eval_context locContext;
3521 :
3522 19813 : locContext.root = context->root;
3523 19813 : locContext.total.startup = 0;
3524 19813 : locContext.total.per_tuple = 0;
3525 :
3526 : /*
3527 : * For an OR clause, recurse into the marked-up tree so that we
3528 : * set the eval_cost for contained RestrictInfos too.
3529 : */
3530 19813 : if (rinfo->orclause)
3531 335 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3532 : else
3533 19478 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3534 :
3535 : /*
3536 : * If the RestrictInfo is marked pseudoconstant, it will be tested
3537 : * only once, so treat its cost as all startup cost.
3538 : */
3539 19813 : if (rinfo->pseudoconstant)
3540 : {
3541 : /* count one execution during startup */
3542 447 : locContext.total.startup += locContext.total.per_tuple;
3543 447 : locContext.total.per_tuple = 0;
3544 : }
3545 19813 : rinfo->eval_cost = locContext.total;
3546 : }
3547 80261 : context->total.startup += rinfo->eval_cost.startup;
3548 80261 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
3549 : /* do NOT recurse into children */
3550 80261 : return false;
3551 : }
3552 :
3553 : /*
3554 : * For each operator or function node in the given tree, we charge the
3555 : * estimated execution cost given by pg_proc.procost (remember to multiply
3556 : * this by cpu_operator_cost).
3557 : *
3558 : * Vars and Consts are charged zero, and so are boolean operators (AND,
3559 : * OR, NOT). Simplistic, but a lot better than no model at all.
3560 : *
3561 : * Should we try to account for the possibility of short-circuit
3562 : * evaluation of AND/OR? Probably *not*, because that would make the
3563 : * results depend on the clause ordering, and we are not in any position
3564 : * to expect that the current ordering of the clauses is the one that's
3565 : * going to end up being used. The above per-RestrictInfo caching would
3566 : * not mix well with trying to re-order clauses anyway.
3567 : *
3568 : * Another issue that is entirely ignored here is that if a set-returning
3569 : * function is below top level in the tree, the functions/operators above
3570 : * it will need to be evaluated multiple times. In practical use, such
3571 : * cases arise so seldom as to not be worth the added complexity needed;
3572 : * moreover, since our rowcount estimates for functions tend to be pretty
3573 : * phony, the results would also be pretty phony.
3574 : */
3575 173745 : if (IsA(node, FuncExpr))
3576 : {
3577 15321 : context->total.per_tuple +=
3578 15321 : get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3579 : }
3580 293897 : else if (IsA(node, OpExpr) ||
3581 270922 : IsA(node, DistinctExpr) ||
3582 135449 : IsA(node, NullIfExpr))
3583 : {
3584 : /* rely on struct equivalence to treat these all alike */
3585 22987 : set_opfuncid((OpExpr *) node);
3586 22987 : context->total.per_tuple +=
3587 45974 : get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3588 : }
3589 135437 : else if (IsA(node, ScalarArrayOpExpr))
3590 : {
3591 : /*
3592 : * Estimate that the operator will be applied to about half of the
3593 : * array elements before the answer is determined.
3594 : */
3595 751 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3596 751 : Node *arraynode = (Node *) lsecond(saop->args);
3597 :
3598 751 : set_sa_opfuncid(saop);
3599 1502 : context->total.per_tuple += get_func_cost(saop->opfuncid) *
3600 751 : cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3601 : }
3602 266497 : else if (IsA(node, Aggref) ||
3603 131811 : IsA(node, WindowFunc))
3604 : {
3605 : /*
3606 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3607 : * ie, zero execution cost in the current model, because they behave
3608 : * essentially like Vars at execution. We disregard the costs of
3609 : * their input expressions for the same reason. The actual execution
3610 : * costs of the aggregate/window functions and their arguments have to
3611 : * be factored into plan-node-specific costing of the Agg or WindowAgg
3612 : * plan node.
3613 : */
3614 3066 : return false; /* don't recurse into children */
3615 : }
3616 131620 : else if (IsA(node, CoerceViaIO))
3617 : {
3618 691 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3619 : Oid iofunc;
3620 : Oid typioparam;
3621 : bool typisvarlena;
3622 :
3623 : /* check the result type's input function */
3624 691 : getTypeInputInfo(iocoerce->resulttype,
3625 : &iofunc, &typioparam);
3626 691 : context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3627 : /* check the input type's output function */
3628 691 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3629 : &iofunc, &typisvarlena);
3630 691 : context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3631 : }
3632 130929 : else if (IsA(node, ArrayCoerceExpr))
3633 : {
3634 13 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3635 13 : Node *arraynode = (Node *) acoerce->arg;
3636 :
3637 13 : if (OidIsValid(acoerce->elemfuncid))
3638 4 : context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3639 2 : cpu_operator_cost * estimate_array_length(arraynode);
3640 : }
3641 130916 : else if (IsA(node, RowCompareExpr))
3642 : {
3643 : /* Conservatively assume we will check all the columns */
3644 17 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3645 : ListCell *lc;
3646 :
3647 55 : foreach(lc, rcexpr->opnos)
3648 : {
3649 38 : Oid opid = lfirst_oid(lc);
3650 :
3651 76 : context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3652 38 : cpu_operator_cost;
3653 : }
3654 : }
3655 261785 : else if (IsA(node, MinMaxExpr) ||
3656 261444 : IsA(node, SQLValueFunction) ||
3657 261091 : IsA(node, XmlExpr) ||
3658 259846 : IsA(node, CoerceToDomain) ||
3659 129313 : IsA(node, NextValueExpr))
3660 : {
3661 : /* Treat all these as having cost 1 */
3662 1616 : context->total.per_tuple += cpu_operator_cost;
3663 : }
3664 129283 : else if (IsA(node, CurrentOfExpr))
3665 : {
3666 : /* Report high cost to prevent selection of anything but TID scan */
3667 114 : context->total.startup += disable_cost;
3668 : }
3669 129169 : else if (IsA(node, SubLink))
3670 : {
3671 : /* This routine should not be applied to un-planned expressions */
3672 0 : elog(ERROR, "cannot handle unplanned sub-select");
3673 : }
3674 129169 : else if (IsA(node, SubPlan))
3675 : {
3676 : /*
3677 : * A subplan node in an expression typically indicates that the
3678 : * subplan will be executed on each evaluation, so charge accordingly.
3679 : * (Sub-selects that can be executed as InitPlans have already been
3680 : * removed from the expression.)
3681 : */
3682 2183 : SubPlan *subplan = (SubPlan *) node;
3683 :
3684 2183 : context->total.startup += subplan->startup_cost;
3685 2183 : context->total.per_tuple += subplan->per_call_cost;
3686 :
3687 : /*
3688 : * We don't want to recurse into the testexpr, because it was already
3689 : * counted in the SubPlan node's costs. So we're done.
3690 : */
3691 2183 : return false;
3692 : }
3693 126986 : else if (IsA(node, AlternativeSubPlan))
3694 : {
3695 : /*
3696 : * Arbitrarily use the first alternative plan for costing. (We should
3697 : * certainly only include one alternative, and we don't yet have
3698 : * enough information to know which one the executor is most likely to
3699 : * use.)
3700 : */
3701 99 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3702 :
3703 99 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3704 : context);
3705 : }
3706 126887 : else if (IsA(node, PlaceHolderVar))
3707 : {
3708 : /*
3709 : * A PlaceHolderVar should be given cost zero when considering general
3710 : * expression evaluation costs. The expense of doing the contained
3711 : * expression is charged as part of the tlist eval costs of the scan
3712 : * or join where the PHV is first computed (see set_rel_width and
3713 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
3714 : * double-counting the cost for each level of plan that the PHV
3715 : * bubbles up through. Hence, return without recursing into the
3716 : * phexpr.
3717 : */
3718 85 : return false;
3719 : }
3720 :
3721 : /* recurse into children */
3722 168312 : return expression_tree_walker(node, cost_qual_eval_walker,
3723 : (void *) context);
3724 : }
3725 :
3726 : /*
3727 : * get_restriction_qual_cost
3728 : * Compute evaluation costs of a baserel's restriction quals, plus any
3729 : * movable join quals that have been pushed down to the scan.
3730 : * Results are returned into *qpqual_cost.
3731 : *
3732 : * This is a convenience subroutine that works for seqscans and other cases
3733 : * where all the given quals will be evaluated the hard way. It's not useful
3734 : * for cost_index(), for example, where the index machinery takes care of
3735 : * some of the quals. We assume baserestrictcost was previously set by
3736 : * set_baserel_size_estimates().
3737 : */
3738 : static void
3739 34210 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
3740 : ParamPathInfo *param_info,
3741 : QualCost *qpqual_cost)
3742 : {
3743 34210 : if (param_info)
3744 : {
3745 : /* Include costs of pushed-down clauses */
3746 5463 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
3747 :
3748 5463 : qpqual_cost->startup += baserel->baserestrictcost.startup;
3749 5463 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
3750 : }
3751 : else
3752 28747 : *qpqual_cost = baserel->baserestrictcost;
3753 34210 : }
3754 :
3755 :
3756 : /*
3757 : * compute_semi_anti_join_factors
3758 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
3759 : * can be expected to scan.
3760 : *
3761 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3762 : * inner rows as soon as it finds a match to the current outer row.
3763 : * The same happens if we have detected the inner rel is unique.
3764 : * We should therefore adjust some of the cost components for this effect.
3765 : * This function computes some estimates needed for these adjustments.
3766 : * These estimates will be the same regardless of the particular paths used
3767 : * for the outer and inner relation, so we compute these once and then pass
3768 : * them to all the join cost estimation functions.
3769 : *
3770 : * Input parameters:
3771 : * outerrel: outer relation under consideration
3772 : * innerrel: inner relation under consideration
3773 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
3774 : * sjinfo: SpecialJoinInfo relevant to this join
3775 : * restrictlist: join quals
3776 : * Output parameters:
3777 : * *semifactors is filled in (see relation.h for field definitions)
3778 : */
3779 : void
3780 4609 : compute_semi_anti_join_factors(PlannerInfo *root,
3781 : RelOptInfo *outerrel,
3782 : RelOptInfo *innerrel,
3783 : JoinType jointype,
3784 : SpecialJoinInfo *sjinfo,
3785 : List *restrictlist,
3786 : SemiAntiJoinFactors *semifactors)
3787 : {
3788 : Selectivity jselec;
3789 : Selectivity nselec;
3790 : Selectivity avgmatch;
3791 : SpecialJoinInfo norm_sjinfo;
3792 : List *joinquals;
3793 : ListCell *l;
3794 :
3795 : /*
3796 : * In an ANTI join, we must ignore clauses that are "pushed down", since
3797 : * those won't affect the match logic. In a SEMI join, we do not
3798 : * distinguish joinquals from "pushed down" quals, so just use the whole
3799 : * restrictinfo list. For other outer join types, we should consider only
3800 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
3801 : */
3802 4609 : if (IS_OUTER_JOIN(jointype))
3803 : {
3804 1111 : joinquals = NIL;
3805 2404 : foreach(l, restrictlist)
3806 : {
3807 1293 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
3808 :
3809 1293 : if (!rinfo->is_pushed_down)
3810 1274 : joinquals = lappend(joinquals, rinfo);
3811 : }
3812 : }
3813 : else
3814 3498 : joinquals = restrictlist;
3815 :
3816 : /*
3817 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3818 : */
3819 4609 : jselec = clauselist_selectivity(root,
3820 : joinquals,
3821 : 0,
3822 : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
3823 : sjinfo);
3824 :
3825 : /*
3826 : * Also get the normal inner-join selectivity of the join clauses.
3827 : */
3828 4609 : norm_sjinfo.type = T_SpecialJoinInfo;
3829 4609 : norm_sjinfo.min_lefthand = outerrel->relids;
3830 4609 : norm_sjinfo.min_righthand = innerrel->relids;
3831 4609 : norm_sjinfo.syn_lefthand = outerrel->relids;
3832 4609 : norm_sjinfo.syn_righthand = innerrel->relids;
3833 4609 : norm_sjinfo.jointype = JOIN_INNER;
3834 : /* we don't bother trying to make the remaining fields valid */
3835 4609 : norm_sjinfo.lhs_strict = false;
3836 4609 : norm_sjinfo.delay_upper_joins = false;
3837 4609 : norm_sjinfo.semi_can_btree = false;
3838 4609 : norm_sjinfo.semi_can_hash = false;
3839 4609 : norm_sjinfo.semi_operators = NIL;
3840 4609 : norm_sjinfo.semi_rhs_exprs = NIL;
3841 :
3842 4609 : nselec = clauselist_selectivity(root,
3843 : joinquals,
3844 : 0,
3845 : JOIN_INNER,
3846 : &norm_sjinfo);
3847 :
3848 : /* Avoid leaking a lot of ListCells */
3849 4609 : if (IS_OUTER_JOIN(jointype))
3850 1111 : list_free(joinquals);
3851 :
3852 : /*
3853 : * jselec can be interpreted as the fraction of outer-rel rows that have
3854 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
3855 : * the fraction of the Cartesian product that matches. So, the average
3856 : * number of matches for each outer-rel row that has at least one match is
3857 : * nselec * inner_rows / jselec.
3858 : *
3859 : * Note: it is correct to use the inner rel's "rows" count here, even
3860 : * though we might later be considering a parameterized inner path with
3861 : * fewer rows. This is because we have included all the join clauses in
3862 : * the selectivity estimate.
3863 : */
3864 4609 : if (jselec > 0) /* protect against zero divide */
3865 : {
3866 4603 : avgmatch = nselec * innerrel->rows / jselec;
3867 : /* Clamp to sane range */
3868 4603 : avgmatch = Max(1.0, avgmatch);
3869 : }
3870 : else
3871 6 : avgmatch = 1.0;
3872 :
3873 4609 : semifactors->outer_match_frac = jselec;
3874 4609 : semifactors->match_count = avgmatch;
3875 4609 : }
3876 :
3877 : /*
3878 : * has_indexed_join_quals
3879 : * Check whether all the joinquals of a nestloop join are used as
3880 : * inner index quals.
3881 : *
3882 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3883 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
3884 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3885 : * expensive.
3886 : */
3887 : static bool
3888 14862 : has_indexed_join_quals(NestPath *joinpath)
3889 : {
3890 14862 : Relids joinrelids = joinpath->path.parent->relids;
3891 14862 : Path *innerpath = joinpath->innerjoinpath;
3892 : List *indexclauses;
3893 : bool found_one;
3894 : ListCell *lc;
3895 :
3896 : /* If join still has quals to evaluate, it's not fast */
3897 14862 : if (joinpath->joinrestrictinfo != NIL)
3898 10390 : return false;
3899 : /* Nor if the inner path isn't parameterized at all */
3900 4472 : if (innerpath->param_info == NULL)
3901 792 : return false;
3902 :
3903 : /* Find the indexclauses list for the inner scan */
3904 3680 : switch (innerpath->pathtype)
3905 : {
3906 : case T_IndexScan:
3907 : case T_IndexOnlyScan:
3908 3677 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
3909 3677 : break;
3910 : case T_BitmapHeapScan:
3911 : {
3912 : /* Accept only a simple bitmap scan, not AND/OR cases */
3913 0 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3914 :
3915 0 : if (IsA(bmqual, IndexPath))
3916 0 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
3917 : else
3918 0 : return false;
3919 0 : break;
3920 : }
3921 : default:
3922 :
3923 : /*
3924 : * If it's not a simple indexscan, it probably doesn't run quickly
3925 : * for zero rows out, even if it's a parameterized path using all
3926 : * the joinquals.
3927 : */
3928 3 : return false;
3929 : }
3930 :
3931 : /*
3932 : * Examine the inner path's param clauses. Any that are from the outer
3933 : * path must be found in the indexclauses list, either exactly or in an
3934 : * equivalent form generated by equivclass.c. Also, we must find at least
3935 : * one such clause, else it's a clauseless join which isn't fast.
3936 : */
3937 3677 : found_one = false;
3938 7388 : foreach(lc, innerpath->param_info->ppi_clauses)
3939 : {
3940 3853 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3941 :
3942 3853 : if (join_clause_is_movable_into(rinfo,
3943 3853 : innerpath->parent->relids,
3944 : joinrelids))
3945 : {
3946 6822 : if (!(list_member_ptr(indexclauses, rinfo) ||
3947 2969 : is_redundant_derived_clause(rinfo, indexclauses)))
3948 142 : return false;
3949 3711 : found_one = true;
3950 : }
3951 : }
3952 3535 : return found_one;
3953 : }
3954 :
3955 :
3956 : /*
3957 : * approx_tuple_count
3958 : * Quick-and-dirty estimation of the number of join rows passing
3959 : * a set of qual conditions.
3960 : *
3961 : * The quals can be either an implicitly-ANDed list of boolean expressions,
3962 : * or a list of RestrictInfo nodes (typically the latter).
3963 : *
3964 : * We intentionally compute the selectivity under JOIN_INNER rules, even
3965 : * if it's some type of outer join. This is appropriate because we are
3966 : * trying to figure out how many tuples pass the initial merge or hash
3967 : * join step.
3968 : *
3969 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
3970 : * simply multiply the independent clause selectivities together. Now
3971 : * clauselist_selectivity often can't do any better than that anyhow, but
3972 : * for some situations (such as range constraints) it is smarter. However,
3973 : * we can't effectively cache the results of clauselist_selectivity, whereas
3974 : * the individual clause selectivities can be and are cached.
3975 : *
3976 : * Since we are only using the results to estimate how many potential
3977 : * output tuples are generated and passed through qpqual checking, it
3978 : * seems OK to live with the approximation.
3979 : */
3980 : static double
3981 10935 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
3982 : {
3983 : double tuples;
3984 10935 : double outer_tuples = path->outerjoinpath->rows;
3985 10935 : double inner_tuples = path->innerjoinpath->rows;
3986 : SpecialJoinInfo sjinfo;
3987 10935 : Selectivity selec = 1.0;
3988 : ListCell *l;
3989 :
3990 : /*
3991 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3992 : */
3993 10935 : sjinfo.type = T_SpecialJoinInfo;
3994 10935 : sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3995 10935 : sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3996 10935 : sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3997 10935 : sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3998 10935 : sjinfo.jointype = JOIN_INNER;
3999 : /* we don't bother trying to make the remaining fields valid */
4000 10935 : sjinfo.lhs_strict = false;
4001 10935 : sjinfo.delay_upper_joins = false;
4002 10935 : sjinfo.semi_can_btree = false;
4003 10935 : sjinfo.semi_can_hash = false;
4004 10935 : sjinfo.semi_operators = NIL;
4005 10935 : sjinfo.semi_rhs_exprs = NIL;
4006 :
4007 : /* Get the approximate selectivity */
4008 22600 : foreach(l, quals)
4009 : {
4010 11665 : Node *qual = (Node *) lfirst(l);
4011 :
4012 : /* Note that clause_selectivity will be able to cache its result */
4013 11665 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
4014 : }
4015 :
4016 : /* Apply it to the input relation sizes */
4017 10935 : tuples = selec * outer_tuples * inner_tuples;
4018 :
4019 10935 : return clamp_row_est(tuples);
4020 : }
4021 :
4022 :
4023 : /*
4024 : * set_baserel_size_estimates
4025 : * Set the size estimates for the given base relation.
4026 : *
4027 : * The rel's targetlist and restrictinfo list must have been constructed
4028 : * already, and rel->tuples must be set.
4029 : *
4030 : * We set the following fields of the rel node:
4031 : * rows: the estimated number of output tuples (after applying
4032 : * restriction clauses).
4033 : * width: the estimated average output tuple width in bytes.
4034 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
4035 : */
4036 : void
4037 18663 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4038 : {
4039 : double nrows;
4040 :
4041 : /* Should only be applied to base relations */
4042 18663 : Assert(rel->relid > 0);
4043 :
4044 37326 : nrows = rel->tuples *
4045 18663 : clauselist_selectivity(root,
4046 : rel->baserestrictinfo,
4047 : 0,
4048 : JOIN_INNER,
4049 : NULL);
4050 :
4051 18663 : rel->rows = clamp_row_est(nrows);
4052 :
4053 18663 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
4054 :
4055 18663 : set_rel_width(root, rel);
4056 18663 : }
4057 :
4058 : /*
4059 : * get_parameterized_baserel_size
4060 : * Make a size estimate for a parameterized scan of a base relation.
4061 : *
4062 : * 'param_clauses' lists the additional join clauses to be used.
4063 : *
4064 : * set_baserel_size_estimates must have been applied already.
4065 : */
4066 : double
4067 3659 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
4068 : List *param_clauses)
4069 : {
4070 : List *allclauses;
4071 : double nrows;
4072 :
4073 : /*
4074 : * Estimate the number of rows returned by the parameterized scan, knowing
4075 : * that it will apply all the extra join clauses as well as the rel's own
4076 : * restriction clauses. Note that we force the clauses to be treated as
4077 : * non-join clauses during selectivity estimation.
4078 : */
4079 3659 : allclauses = list_concat(list_copy(param_clauses),
4080 : rel->baserestrictinfo);
4081 7318 : nrows = rel->tuples *
4082 3659 : clauselist_selectivity(root,
4083 : allclauses,
4084 3659 : rel->relid, /* do not use 0! */
4085 : JOIN_INNER,
4086 : NULL);
4087 3659 : nrows = clamp_row_est(nrows);
4088 : /* For safety, make sure result is not more than the base estimate */
4089 3659 : if (nrows > rel->rows)
4090 0 : nrows = rel->rows;
4091 3659 : return nrows;
4092 : }
4093 :
4094 : /*
4095 : * set_joinrel_size_estimates
4096 : * Set the size estimates for the given join relation.
4097 : *
4098 : * The rel's targetlist must have been constructed already, and a
4099 : * restriction clause list that matches the given component rels must
4100 : * be provided.
4101 : *
4102 : * Since there is more than one way to make a joinrel for more than two
4103 : * base relations, the results we get here could depend on which component
4104 : * rel pair is provided. In theory we should get the same answers no matter
4105 : * which pair is provided; in practice, since the selectivity estimation
4106 : * routines don't handle all cases equally well, we might not. But there's
4107 : * not much to be done about it. (Would it make sense to repeat the
4108 : * calculations for each pair of input rels that's encountered, and somehow
4109 : * average the results? Probably way more trouble than it's worth, and
4110 : * anyway we must keep the rowcount estimate the same for all paths for the
4111 : * joinrel.)
4112 : *
4113 : * We set only the rows field here. The reltarget field was already set by
4114 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
4115 : */
4116 : void
4117 5351 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
4118 : RelOptInfo *outer_rel,
4119 : RelOptInfo *inner_rel,
4120 : SpecialJoinInfo *sjinfo,
4121 : List *restrictlist)
4122 : {
4123 5351 : rel->rows = calc_joinrel_size_estimate(root,
4124 : outer_rel,
4125 : inner_rel,
4126 : outer_rel->rows,
4127 : inner_rel->rows,
4128 : sjinfo,
4129 : restrictlist);
4130 5351 : }
4131 :
4132 : /*
4133 : * get_parameterized_joinrel_size
4134 : * Make a size estimate for a parameterized scan of a join relation.
4135 : *
4136 : * 'rel' is the joinrel under consideration.
4137 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
4138 : * produce the relations being joined.
4139 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
4140 : * 'restrict_clauses' lists the join clauses that need to be applied at the
4141 : * join node (including any movable clauses that were moved down to this join,
4142 : * and not including any movable clauses that were pushed down into the
4143 : * child paths).
4144 : *
4145 : * set_joinrel_size_estimates must have been applied already.
4146 : */
4147 : double
4148 176 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
4149 : Path *outer_path,
4150 : Path *inner_path,
4151 : SpecialJoinInfo *sjinfo,
4152 : List *restrict_clauses)
4153 : {
4154 : double nrows;
4155 :
4156 : /*
4157 : * Estimate the number of rows returned by the parameterized join as the
4158 : * sizes of the input paths times the selectivity of the clauses that have
4159 : * ended up at this join node.
4160 : *
4161 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
4162 : * on the pair of input paths provided, though ideally we'd get the same
4163 : * estimate for any pair with the same parameterization.
4164 : */
4165 176 : nrows = calc_joinrel_size_estimate(root,
4166 : outer_path->parent,
4167 : inner_path->parent,
4168 : outer_path->rows,
4169 : inner_path->rows,
4170 : sjinfo,
4171 : restrict_clauses);
4172 : /* For safety, make sure result is not more than the base estimate */
4173 176 : if (nrows > rel->rows)
4174 2 : nrows = rel->rows;
4175 176 : return nrows;
4176 : }
4177 :
4178 : /*
4179 : * calc_joinrel_size_estimate
4180 : * Workhorse for set_joinrel_size_estimates and
4181 : * get_parameterized_joinrel_size.
4182 : *
4183 : * outer_rel/inner_rel are the relations being joined, but they should be
4184 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
4185 : * than what rel->rows says, when we are considering parameterized paths.
4186 : */
4187 : static double
4188 5527 : calc_joinrel_size_estimate(PlannerInfo *root,
4189 : RelOptInfo *outer_rel,
4190 : RelOptInfo *inner_rel,
4191 : double outer_rows,
4192 : double inner_rows,
4193 : SpecialJoinInfo *sjinfo,
4194 : List *restrictlist_in)
4195 : {
4196 : /* This apparently-useless variable dodges a compiler bug in VS2013: */
4197 5527 : List *restrictlist = restrictlist_in;
4198 5527 : JoinType jointype = sjinfo->jointype;
4199 : Selectivity fkselec;
4200 : Selectivity jselec;
4201 : Selectivity pselec;
4202 : double nrows;
4203 :
4204 : /*
4205 : * Compute joinclause selectivity. Note that we are only considering
4206 : * clauses that become restriction clauses at this join level; we are not
4207 : * double-counting them because they were not considered in estimating the
4208 : * sizes of the component rels.
4209 : *
4210 : * First, see whether any of the joinclauses can be matched to known FK
4211 : * constraints. If so, drop those clauses from the restrictlist, and
4212 : * instead estimate their selectivity using FK semantics. (We do this
4213 : * without regard to whether said clauses are local or "pushed down".
4214 : * Probably, an FK-matching clause could never be seen as pushed down at
4215 : * an outer join, since it would be strict and hence would be grounds for
4216 : * join strength reduction.) fkselec gets the net selectivity for
4217 : * FK-matching clauses, or 1.0 if there are none.
4218 : */
4219 5527 : fkselec = get_foreign_key_join_selectivity(root,
4220 : outer_rel->relids,
4221 : inner_rel->relids,
4222 : sjinfo,
4223 : &restrictlist);
4224 :
4225 : /*
4226 : * For an outer join, we have to distinguish the selectivity of the join's
4227 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
4228 : * down". For inner joins we just count them all as joinclauses.
4229 : */
4230 5527 : if (IS_OUTER_JOIN(jointype))
4231 : {
4232 1262 : List *joinquals = NIL;
4233 1262 : List *pushedquals = NIL;
4234 : ListCell *l;
4235 :
4236 : /* Grovel through the clauses to separate into two lists */
4237 2722 : foreach(l, restrictlist)
4238 : {
4239 1460 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
4240 :
4241 1460 : if (rinfo->is_pushed_down)
4242 35 : pushedquals = lappend(pushedquals, rinfo);
4243 : else
4244 1425 : joinquals = lappend(joinquals, rinfo);
4245 : }
4246 :
4247 : /* Get the separate selectivities */
4248 1262 : jselec = clauselist_selectivity(root,
4249 : joinquals,
4250 : 0,
4251 : jointype,
4252 : sjinfo);
4253 1262 : pselec = clauselist_selectivity(root,
4254 : pushedquals,
4255 : 0,
4256 : jointype,
4257 : sjinfo);
4258 :
4259 : /* Avoid leaking a lot of ListCells */
4260 1262 : list_free(joinquals);
4261 1262 : list_free(pushedquals);
4262 : }
4263 : else
4264 : {
4265 4265 : jselec = clauselist_selectivity(root,
4266 : restrictlist,
4267 : 0,
4268 : jointype,
4269 : sjinfo);
4270 4265 : pselec = 0.0; /* not used, keep compiler quiet */
4271 : }
4272 :
4273 : /*
4274 : * Basically, we multiply size of Cartesian product by selectivity.
4275 : *
4276 : * If we are doing an outer join, take that into account: the joinqual
4277 : * selectivity has to be clamped using the knowledge that the output must
4278 : * be at least as large as the non-nullable input. However, any
4279 : * pushed-down quals are applied after the outer join, so their
4280 : * selectivity applies fully.
4281 : *
4282 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
4283 : * of LHS rows that have matches, and we apply that straightforwardly.
4284 : */
4285 5527 : switch (jointype)
4286 : {
4287 : case JOIN_INNER:
4288 3941 : nrows = outer_rows * inner_rows * fkselec * jselec;
4289 : /* pselec not used */
4290 3941 : break;
4291 : case JOIN_LEFT:
4292 988 : nrows = outer_rows * inner_rows * fkselec * jselec;
4293 988 : if (nrows < outer_rows)
4294 344 : nrows = outer_rows;
4295 988 : nrows *= pselec;
4296 988 : break;
4297 : case JOIN_FULL:
4298 28 : nrows = outer_rows * inner_rows * fkselec * jselec;
4299 28 : if (nrows < outer_rows)
4300 9 : nrows = outer_rows;
4301 28 : if (nrows < inner_rows)
4302 2 : nrows = inner_rows;
4303 28 : nrows *= pselec;
4304 28 : break;
4305 : case JOIN_SEMI:
4306 324 : nrows = outer_rows * fkselec * jselec;
4307 : /* pselec not used */
4308 324 : break;
4309 : case JOIN_ANTI:
4310 246 : nrows = outer_rows * (1.0 - fkselec * jselec);
4311 246 : nrows *= pselec;
4312 246 : break;
4313 : default:
4314 : /* other values not expected here */
4315 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
4316 : nrows = 0; /* keep compiler quiet */
4317 : break;
4318 : }
4319 :
4320 5527 : return clamp_row_est(nrows);
4321 : }
4322 :
4323 : /*
4324 : * get_foreign_key_join_selectivity
4325 : * Estimate join selectivity for foreign-key-related clauses.
4326 : *
4327 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
4328 : * and return a substitute estimate of their selectivity. 1.0 is returned
4329 : * when there are no such clauses.
4330 : *
4331 : * The reason for treating such clauses specially is that we can get better
4332 : * estimates this way than by relying on clauselist_selectivity(), especially
4333 : * for multi-column FKs where that function's assumption that the clauses are
4334 : * independent falls down badly. But even with single-column FKs, we may be
4335 : * able to get a better answer when the pg_statistic stats are missing or out
4336 : * of date.
4337 : */
4338 : static Selectivity
4339 5527 : get_foreign_key_join_selectivity(PlannerInfo *root,
4340 : Relids outer_relids,
4341 : Relids inner_relids,
4342 : SpecialJoinInfo *sjinfo,
4343 : List **restrictlist)
4344 : {
4345 5527 : Selectivity fkselec = 1.0;
4346 5527 : JoinType jointype = sjinfo->jointype;
4347 5527 : List *worklist = *restrictlist;
4348 : ListCell *lc;
4349 :
4350 : /* Consider each FK constraint that is known to match the query */
4351 5706 : foreach(lc, root->fkey_list)
4352 : {
4353 179 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
4354 : bool ref_is_outer;
4355 : List *removedlist;
4356 : ListCell *cell;
4357 : ListCell *prev;
4358 : ListCell *next;
4359 :
4360 : /*
4361 : * This FK is not relevant unless it connects a baserel on one side of
4362 : * this join to a baserel on the other side.
4363 : */
4364 309 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
4365 130 : bms_is_member(fkinfo->ref_relid, inner_relids))
4366 100 : ref_is_outer = false;
4367 127 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
4368 48 : bms_is_member(fkinfo->con_relid, inner_relids))
4369 15 : ref_is_outer = true;
4370 : else
4371 64 : continue;
4372 :
4373 : /*
4374 : * If we're dealing with a semi/anti join, and the FK's referenced
4375 : * relation is on the outside, then knowledge of the FK doesn't help
4376 : * us figure out what we need to know (which is the fraction of outer
4377 : * rows that have matches). On the other hand, if the referenced rel
4378 : * is on the inside, then all outer rows must have matches in the
4379 : * referenced table (ignoring nulls). But any restriction or join
4380 : * clauses that filter that table will reduce the fraction of matches.
4381 : * We can account for restriction clauses, but it's too hard to guess
4382 : * how many table rows would get through a join that's inside the RHS.
4383 : * Hence, if either case applies, punt and ignore the FK.
4384 : */
4385 115 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
4386 49 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
4387 0 : continue;
4388 :
4389 : /*
4390 : * Modify the restrictlist by removing clauses that match the FK (and
4391 : * putting them into removedlist instead). It seems unsafe to modify
4392 : * the originally-passed List structure, so we make a shallow copy the
4393 : * first time through.
4394 : */
4395 115 : if (worklist == *restrictlist)
4396 90 : worklist = list_copy(worklist);
4397 :
4398 115 : removedlist = NIL;
4399 115 : prev = NULL;
4400 223 : for (cell = list_head(worklist); cell; cell = next)
4401 : {
4402 108 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
4403 108 : bool remove_it = false;
4404 : int i;
4405 :
4406 108 : next = lnext(cell);
4407 : /* Drop this clause if it matches any column of the FK */
4408 131 : for (i = 0; i < fkinfo->nkeys; i++)
4409 : {
4410 127 : if (rinfo->parent_ec)
4411 : {
4412 : /*
4413 : * EC-derived clauses can only match by EC. It is okay to
4414 : * consider any clause derived from the same EC as
4415 : * matching the FK: even if equivclass.c chose to generate
4416 : * a clause equating some other pair of Vars, it could
4417 : * have generated one equating the FK's Vars. So for
4418 : * purposes of estimation, we can act as though it did so.
4419 : *
4420 : * Note: checking parent_ec is a bit of a cheat because
4421 : * there are EC-derived clauses that don't have parent_ec
4422 : * set; but such clauses must compare expressions that
4423 : * aren't just Vars, so they cannot match the FK anyway.
4424 : */
4425 39 : if (fkinfo->eclass[i] == rinfo->parent_ec)
4426 : {
4427 39 : remove_it = true;
4428 39 : break;
4429 : }
4430 : }
4431 : else
4432 : {
4433 : /*
4434 : * Otherwise, see if rinfo was previously matched to FK as
4435 : * a "loose" clause.
4436 : */
4437 88 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
4438 : {
4439 65 : remove_it = true;
4440 65 : break;
4441 : }
4442 : }
4443 : }
4444 108 : if (remove_it)
4445 : {
4446 104 : worklist = list_delete_cell(worklist, cell, prev);
4447 104 : removedlist = lappend(removedlist, rinfo);
4448 : }
4449 : else
4450 4 : prev = cell;
4451 : }
4452 :
4453 : /*
4454 : * If we failed to remove all the matching clauses we expected to
4455 : * find, chicken out and ignore this FK; applying its selectivity
4456 : * might result in double-counting. Put any clauses we did manage to
4457 : * remove back into the worklist.
4458 : *
4459 : * Since the matching clauses are known not outerjoin-delayed, they
4460 : * should certainly have appeared in the initial joinclause list. If
4461 : * we didn't find them, they must have been matched to, and removed
4462 : * by, some other FK in a previous iteration of this loop. (A likely
4463 : * case is that two FKs are matched to the same EC; there will be only
4464 : * one EC-derived clause in the initial list, so the first FK will
4465 : * consume it.) Applying both FKs' selectivity independently risks
4466 : * underestimating the join size; in particular, this would undo one
4467 : * of the main things that ECs were invented for, namely to avoid
4468 : * double-counting the selectivity of redundant equality conditions.
4469 : * Later we might think of a reasonable way to combine the estimates,
4470 : * but for now, just punt, since this is a fairly uncommon situation.
4471 : */
4472 230 : if (list_length(removedlist) !=
4473 115 : (fkinfo->nmatched_ec + fkinfo->nmatched_ri))
4474 : {
4475 27 : worklist = list_concat(worklist, removedlist);
4476 27 : continue;
4477 : }
4478 :
4479 : /*
4480 : * Finally we get to the payoff: estimate selectivity using the
4481 : * knowledge that each referencing row will match exactly one row in
4482 : * the referenced table.
4483 : *
4484 : * XXX that's not true in the presence of nulls in the referencing
4485 : * column(s), so in principle we should derate the estimate for those.
4486 : * However (1) if there are any strict restriction clauses for the
4487 : * referencing column(s) elsewhere in the query, derating here would
4488 : * be double-counting the null fraction, and (2) it's not very clear
4489 : * how to combine null fractions for multiple referencing columns. So
4490 : * we do nothing for now about correcting for nulls.
4491 : *
4492 : * XXX another point here is that if either side of an FK constraint
4493 : * is an inheritance parent, we estimate as though the constraint
4494 : * covers all its children as well. This is not an unreasonable
4495 : * assumption for a referencing table, ie the user probably applied
4496 : * identical constraints to all child tables (though perhaps we ought
4497 : * to check that). But it's not possible to have done that for a
4498 : * referenced table. Fortunately, precisely because that doesn't
4499 : * work, it is uncommon in practice to have an FK referencing a parent
4500 : * table. So, at least for now, disregard inheritance here.
4501 : */
4502 88 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
4503 24 : {
4504 : /*
4505 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
4506 : * referenced table is exactly the inside of the join. The join
4507 : * selectivity is defined as the fraction of LHS rows that have
4508 : * matches. The FK implies that every LHS row has a match *in the
4509 : * referenced table*; but any restriction clauses on it will
4510 : * reduce the number of matches. Hence we take the join
4511 : * selectivity as equal to the selectivity of the table's
4512 : * restriction clauses, which is rows / tuples; but we must guard
4513 : * against tuples == 0.
4514 : */
4515 24 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4516 24 : double ref_tuples = Max(ref_rel->tuples, 1.0);
4517 :
4518 24 : fkselec *= ref_rel->rows / ref_tuples;
4519 : }
4520 : else
4521 : {
4522 : /*
4523 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
4524 : * guard against tuples == 0. Note we should use the raw table
4525 : * tuple count, not any estimate of its filtered or joined size.
4526 : */
4527 64 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4528 64 : double ref_tuples = Max(ref_rel->tuples, 1.0);
4529 :
4530 64 : fkselec *= 1.0 / ref_tuples;
4531 : }
4532 : }
4533 :
4534 5527 : *restrictlist = worklist;
4535 5527 : return fkselec;
4536 : }
4537 :
4538 : /*
4539 : * set_subquery_size_estimates
4540 : * Set the size estimates for a base relation that is a subquery.
4541 : *
4542 : * The rel's targetlist and restrictinfo list must have been constructed
4543 : * already, and the Paths for the subquery must have been completed.
4544 : * We look at the subquery's PlannerInfo to extract data.
4545 : *
4546 : * We set the same fields as set_baserel_size_estimates.
4547 : */
4548 : void
4549 1059 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4550 : {
4551 1059 : PlannerInfo *subroot = rel->subroot;
4552 : RelOptInfo *sub_final_rel;
4553 : RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
4554 : ListCell *lc;
4555 :
4556 : /* Should only be applied to base relations that are subqueries */
4557 1059 : Assert(rel->relid > 0);
4558 : #ifdef USE_ASSERT_CHECKING
4559 1059 : rte = planner_rt_fetch(rel->relid, root);
4560 1059 : Assert(rte->rtekind == RTE_SUBQUERY);
4561 : #endif
4562 :
4563 : /*
4564 : * Copy raw number of output rows from subquery. All of its paths should
4565 : * have the same output rowcount, so just look at cheapest-total.
4566 : */
4567 1059 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
4568 1059 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
4569 :
4570 : /*
4571 : * Compute per-output-column width estimates by examining the subquery's
4572 : * targetlist. For any output that is a plain Var, get the width estimate
4573 : * that was made while planning the subquery. Otherwise, we leave it to
4574 : * set_rel_width to fill in a datatype-based default estimate.
4575 : */
4576 3282 : foreach(lc, subroot->parse->targetList)
4577 : {
4578 2223 : TargetEntry *te = lfirst_node(TargetEntry, lc);
4579 2223 : Node *texpr = (Node *) te->expr;
4580 2223 : int32 item_width = 0;
4581 :
4582 : /* junk columns aren't visible to upper query */
4583 2223 : if (te->resjunk)
4584 46 : continue;
4585 :
4586 : /*
4587 : * The subquery could be an expansion of a view that's had columns
4588 : * added to it since the current query was parsed, so that there are
4589 : * non-junk tlist columns in it that don't correspond to any column
4590 : * visible at our query level. Ignore such columns.
4591 : */
4592 2177 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4593 0 : continue;
4594 :
4595 : /*
4596 : * XXX This currently doesn't work for subqueries containing set
4597 : * operations, because the Vars in their tlists are bogus references
4598 : * to the first leaf subquery, which wouldn't give the right answer
4599 : * even if we could still get to its PlannerInfo.
4600 : *
4601 : * Also, the subquery could be an appendrel for which all branches are
4602 : * known empty due to constraint exclusion, in which case
4603 : * set_append_rel_pathlist will have left the attr_widths set to zero.
4604 : *
4605 : * In either case, we just leave the width estimate zero until
4606 : * set_rel_width fixes it.
4607 : */
4608 3220 : if (IsA(texpr, Var) &&
4609 1043 : subroot->parse->setOperations == NULL)
4610 : {
4611 1007 : Var *var = (Var *) texpr;
4612 1007 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
4613 :
4614 1007 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
4615 : }
4616 2177 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
4617 : }
4618 :
4619 : /* Now estimate number of output rows, etc */
4620 1059 : set_baserel_size_estimates(root, rel);
4621 1059 : }
4622 :
4623 : /*
4624 : * set_function_size_estimates
4625 : * Set the size estimates for a base relation that is a function call.
4626 : *
4627 : * The rel's targetlist and restrictinfo list must have been constructed
4628 : * already.
4629 : *
4630 : * We set the same fields as set_baserel_size_estimates.
4631 : */
4632 : void
4633 1327 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4634 : {
4635 : RangeTblEntry *rte;
4636 : ListCell *lc;
4637 :
4638 : /* Should only be applied to base relations that are functions */
4639 1327 : Assert(rel->relid > 0);
4640 1327 : rte = planner_rt_fetch(rel->relid, root);
4641 1327 : Assert(rte->rtekind == RTE_FUNCTION);
4642 :
4643 : /*
4644 : * Estimate number of rows the functions will return. The rowcount of the
4645 : * node is that of the largest function result.
4646 : */
4647 1327 : rel->tuples = 0;
4648 2700 : foreach(lc, rte->functions)
4649 : {
4650 1373 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
4651 1373 : double ntup = expression_returns_set_rows(rtfunc->funcexpr);
4652 :
4653 1373 : if (ntup > rel->tuples)
4654 1332 : rel->tuples = ntup;
4655 : }
4656 :
4657 : /* Now estimate number of output rows, etc */
4658 1327 : set_baserel_size_estimates(root, rel);
4659 1327 : }
4660 :
4661 : /*
4662 : * set_function_size_estimates
4663 : * Set the size estimates for a base relation that is a function call.
4664 : *
4665 : * The rel's targetlist and restrictinfo list must have been constructed
4666 : * already.
4667 : *
4668 : * We set the same fields as set_tablefunc_size_estimates.
4669 : */
4670 : void
4671 22 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4672 : {
4673 : RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
4674 :
4675 : /* Should only be applied to base relations that are functions */
4676 22 : Assert(rel->relid > 0);
4677 : #ifdef USE_ASSERT_CHECKING
4678 22 : rte = planner_rt_fetch(rel->relid, root);
4679 22 : Assert(rte->rtekind == RTE_TABLEFUNC);
4680 : #endif
4681 :
4682 22 : rel->tuples = 100;
4683 :
4684 : /* Now estimate number of output rows, etc */
4685 22 : set_baserel_size_estimates(root, rel);
4686 22 : }
4687 :
4688 : /*
4689 : * set_values_size_estimates
4690 : * Set the size estimates for a base relation that is a values list.
4691 : *
4692 : * The rel's targetlist and restrictinfo list must have been constructed
4693 : * already.
4694 : *
4695 : * We set the same fields as set_baserel_size_estimates.
4696 : */
4697 : void
4698 463 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4699 : {
4700 : RangeTblEntry *rte;
4701 :
4702 : /* Should only be applied to base relations that are values lists */
4703 463 : Assert(rel->relid > 0);
4704 463 : rte = planner_rt_fetch(rel->relid, root);
4705 463 : Assert(rte->rtekind == RTE_VALUES);
4706 :
4707 : /*
4708 : * Estimate number of rows the values list will return. We know this
4709 : * precisely based on the list length (well, barring set-returning
4710 : * functions in list items, but that's a refinement not catered for
4711 : * anywhere else either).
4712 : */
4713 463 : rel->tuples = list_length(rte->values_lists);
4714 :
4715 : /* Now estimate number of output rows, etc */
4716 463 : set_baserel_size_estimates(root, rel);
4717 463 : }
4718 :
4719 : /*
4720 : * set_cte_size_estimates
4721 : * Set the size estimates for a base relation that is a CTE reference.
4722 : *
4723 : * The rel's targetlist and restrictinfo list must have been constructed
4724 : * already, and we need an estimate of the number of rows returned by the CTE
4725 : * (if a regular CTE) or the non-recursive term (if a self-reference).
4726 : *
4727 : * We set the same fields as set_baserel_size_estimates.
4728 : */
4729 : void
4730 202 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
4731 : {
4732 : RangeTblEntry *rte;
4733 :
4734 : /* Should only be applied to base relations that are CTE references */
4735 202 : Assert(rel->relid > 0);
4736 202 : rte = planner_rt_fetch(rel->relid, root);
4737 202 : Assert(rte->rtekind == RTE_CTE);
4738 :
4739 202 : if (rte->self_reference)
4740 : {
4741 : /*
4742 : * In a self-reference, arbitrarily assume the average worktable size
4743 : * is about 10 times the nonrecursive term's size.
4744 : */
4745 40 : rel->tuples = 10 * cte_rows;
4746 : }
4747 : else
4748 : {
4749 : /* Otherwise just believe the CTE's rowcount estimate */
4750 162 : rel->tuples = cte_rows;
4751 : }
4752 :
4753 : /* Now estimate number of output rows, etc */
4754 202 : set_baserel_size_estimates(root, rel);
4755 202 : }
4756 :
4757 : /*
4758 : * set_namedtuplestore_size_estimates
4759 : * Set the size estimates for a base relation that is a tuplestore reference.
4760 : *
4761 : * The rel's targetlist and restrictinfo list must have been constructed
4762 : * already.
4763 : *
4764 : * We set the same fields as set_baserel_size_estimates.
4765 : */
4766 : void
4767 43 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4768 : {
4769 : RangeTblEntry *rte;
4770 :
4771 : /* Should only be applied to base relations that are tuplestore references */
4772 43 : Assert(rel->relid > 0);
4773 43 : rte = planner_rt_fetch(rel->relid, root);
4774 43 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
4775 :
4776 : /*
4777 : * Use the estimate provided by the code which is generating the named
4778 : * tuplestore. In some cases, the actual number might be available; in
4779 : * others the same plan will be re-used, so a "typical" value might be
4780 : * estimated and used.
4781 : */
4782 43 : rel->tuples = rte->enrtuples;
4783 43 : if (rel->tuples < 0)
4784 0 : rel->tuples = 1000;
4785 :
4786 : /* Now estimate number of output rows, etc */
4787 43 : set_baserel_size_estimates(root, rel);
4788 43 : }
4789 :
4790 : /*
4791 : * set_foreign_size_estimates
4792 : * Set the size estimates for a base relation that is a foreign table.
4793 : *
4794 : * There is not a whole lot that we can do here; the foreign-data wrapper
4795 : * is responsible for producing useful estimates. We can do a decent job
4796 : * of estimating baserestrictcost, so we set that, and we also set up width
4797 : * using what will be purely datatype-driven estimates from the targetlist.
4798 : * There is no way to do anything sane with the rows value, so we just put
4799 : * a default estimate and hope that the wrapper can improve on it. The
4800 : * wrapper's GetForeignRelSize function will be called momentarily.
4801 : *
4802 : * The rel's targetlist and restrictinfo list must have been constructed
4803 : * already.
4804 : */
4805 : void
4806 0 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4807 : {
4808 : /* Should only be applied to base relations */
4809 0 : Assert(rel->relid > 0);
4810 :
4811 0 : rel->rows = 1000; /* entirely bogus default estimate */
4812 :
4813 0 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
4814 :
4815 0 : set_rel_width(root, rel);
4816 0 : }
4817 :
4818 :
4819 : /*
4820 : * set_rel_width
4821 : * Set the estimated output width of a base relation.
4822 : *
4823 : * The estimated output width is the sum of the per-attribute width estimates
4824 : * for the actually-referenced columns, plus any PHVs or other expressions
4825 : * that have to be calculated at this relation. This is the amount of data
4826 : * we'd need to pass upwards in case of a sort, hash, etc.
4827 : *
4828 : * This function also sets reltarget->cost, so it's a bit misnamed now.
4829 : *
4830 : * NB: this works best on plain relations because it prefers to look at
4831 : * real Vars. For subqueries, set_subquery_size_estimates will already have
4832 : * copied up whatever per-column estimates were made within the subquery,
4833 : * and for other types of rels there isn't much we can do anyway. We fall
4834 : * back on (fairly stupid) datatype-based width estimates if we can't get
4835 : * any better number.
4836 : *
4837 : * The per-attribute width estimates are cached for possible re-use while
4838 : * building join relations or post-scan/join pathtargets.
4839 : */
4840 : static void
4841 18663 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
4842 : {
4843 18663 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
4844 18663 : int32 tuple_width = 0;
4845 18663 : bool have_wholerow_var = false;
4846 : ListCell *lc;
4847 :
4848 : /* Vars are assumed to have cost zero, but other exprs do not */
4849 18663 : rel->reltarget->cost.startup = 0;
4850 18663 : rel->reltarget->cost.per_tuple = 0;
4851 :
4852 60057 : foreach(lc, rel->reltarget->exprs)
4853 : {
4854 41394 : Node *node = (Node *) lfirst(lc);
4855 :
4856 : /*
4857 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
4858 : * but there are corner cases involving LATERAL references where that
4859 : * isn't so. If the Var has the wrong varno, fall through to the
4860 : * generic case (it doesn't seem worth the trouble to be any smarter).
4861 : */
4862 82607 : if (IsA(node, Var) &&
4863 41213 : ((Var *) node)->varno == rel->relid)
4864 12436 : {
4865 41210 : Var *var = (Var *) node;
4866 : int ndx;
4867 : int32 item_width;
4868 :
4869 41210 : Assert(var->varattno >= rel->min_attr);
4870 41210 : Assert(var->varattno <= rel->max_attr);
4871 :
4872 41210 : ndx = var->varattno - rel->min_attr;
4873 :
4874 : /*
4875 : * If it's a whole-row Var, we'll deal with it below after we have
4876 : * already cached as many attr widths as possible.
4877 : */
4878 41210 : if (var->varattno == 0)
4879 : {
4880 172 : have_wholerow_var = true;
4881 172 : continue;
4882 : }
4883 :
4884 : /*
4885 : * The width may have been cached already (especially if it's a
4886 : * subquery), so don't duplicate effort.
4887 : */
4888 41038 : if (rel->attr_widths[ndx] > 0)
4889 : {
4890 12327 : tuple_width += rel->attr_widths[ndx];
4891 12327 : continue;
4892 : }
4893 :
4894 : /* Try to get column width from statistics */
4895 28711 : if (reloid != InvalidOid && var->varattno > 0)
4896 : {
4897 18378 : item_width = get_attavgwidth(reloid, var->varattno);
4898 18378 : if (item_width > 0)
4899 : {
4900 16275 : rel->attr_widths[ndx] = item_width;
4901 16275 : tuple_width += item_width;
4902 16275 : continue;
4903 : }
4904 : }
4905 :
4906 : /*
4907 : * Not a plain relation, or can't find statistics for it. Estimate
4908 : * using just the type info.
4909 : */
4910 12436 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
4911 12436 : Assert(item_width > 0);
4912 12436 : rel->attr_widths[ndx] = item_width;
4913 12436 : tuple_width += item_width;
4914 : }
4915 184 : else if (IsA(node, PlaceHolderVar))
4916 : {
4917 : /*
4918 : * We will need to evaluate the PHV's contained expression while
4919 : * scanning this rel, so be sure to include it in reltarget->cost.
4920 : */
4921 37 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
4922 37 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
4923 : QualCost cost;
4924 :
4925 37 : tuple_width += phinfo->ph_width;
4926 37 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
4927 37 : rel->reltarget->cost.startup += cost.startup;
4928 37 : rel->reltarget->cost.per_tuple += cost.per_tuple;
4929 : }
4930 : else
4931 : {
4932 : /*
4933 : * We could be looking at an expression pulled up from a subquery,
4934 : * or a ROW() representing a whole-row child Var, etc. Do what we
4935 : * can using the expression type information.
4936 : */
4937 : int32 item_width;
4938 : QualCost cost;
4939 :
4940 147 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4941 147 : Assert(item_width > 0);
4942 147 : tuple_width += item_width;
4943 : /* Not entirely clear if we need to account for cost, but do so */
4944 147 : cost_qual_eval_node(&cost, node, root);
4945 147 : rel->reltarget->cost.startup += cost.startup;
4946 147 : rel->reltarget->cost.per_tuple += cost.per_tuple;
4947 : }
4948 : }
4949 :
4950 : /*
4951 : * If we have a whole-row reference, estimate its width as the sum of
4952 : * per-column widths plus heap tuple header overhead.
4953 : */
4954 18663 : if (have_wholerow_var)
4955 : {
4956 172 : int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
4957 :
4958 172 : if (reloid != InvalidOid)
4959 : {
4960 : /* Real relation, so estimate true tuple width */
4961 110 : wholerow_width += get_relation_data_width(reloid,
4962 110 : rel->attr_widths - rel->min_attr);
4963 : }
4964 : else
4965 : {
4966 : /* Do what we can with info for a phony rel */
4967 : AttrNumber i;
4968 :
4969 167 : for (i = 1; i <= rel->max_attr; i++)
4970 105 : wholerow_width += rel->attr_widths[i - rel->min_attr];
4971 : }
4972 :
4973 172 : rel->attr_widths[0 - rel->min_attr] = wholerow_width;
4974 :
4975 : /*
4976 : * Include the whole-row Var as part of the output tuple. Yes, that
4977 : * really is what happens at runtime.
4978 : */
4979 172 : tuple_width += wholerow_width;
4980 : }
4981 :
4982 18663 : Assert(tuple_width >= 0);
4983 18663 : rel->reltarget->width = tuple_width;
4984 18663 : }
4985 :
4986 : /*
4987 : * set_pathtarget_cost_width
4988 : * Set the estimated eval cost and output width of a PathTarget tlist.
4989 : *
4990 : * As a notational convenience, returns the same PathTarget pointer passed in.
4991 : *
4992 : * Most, though not quite all, uses of this function occur after we've run
4993 : * set_rel_width() for base relations; so we can usually obtain cached width
4994 : * estimates for Vars. If we can't, fall back on datatype-based width
4995 : * estimates. Present early-planning uses of PathTargets don't need accurate
4996 : * widths badly enough to justify going to the catalogs for better data.
4997 : */
4998 : PathTarget *
4999 28702 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
5000 : {
5001 28702 : int32 tuple_width = 0;
5002 : ListCell *lc;
5003 :
5004 : /* Vars are assumed to have cost zero, but other exprs do not */
5005 28702 : target->cost.startup = 0;
5006 28702 : target->cost.per_tuple = 0;
5007 :
5008 86714 : foreach(lc, target->exprs)
5009 : {
5010 58012 : Node *node = (Node *) lfirst(lc);
5011 :
5012 58012 : if (IsA(node, Var))
5013 : {
5014 27877 : Var *var = (Var *) node;
5015 : int32 item_width;
5016 :
5017 : /* We should not see any upper-level Vars here */
5018 27877 : Assert(var->varlevelsup == 0);
5019 :
5020 : /* Try to get data from RelOptInfo cache */
5021 27877 : if (var->varno < root->simple_rel_array_size)
5022 : {
5023 27877 : RelOptInfo *rel = root->simple_rel_array[var->varno];
5024 :
5025 55430 : if (rel != NULL &&
5026 55106 : var->varattno >= rel->min_attr &&
5027 27553 : var->varattno <= rel->max_attr)
5028 : {
5029 27553 : int ndx = var->varattno - rel->min_attr;
5030 :
5031 27553 : if (rel->attr_widths[ndx] > 0)
5032 : {
5033 27459 : tuple_width += rel->attr_widths[ndx];
5034 27459 : continue;
5035 : }
5036 : }
5037 : }
5038 :
5039 : /*
5040 : * No cached data available, so estimate using just the type info.
5041 : */
5042 418 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
5043 418 : Assert(item_width > 0);
5044 418 : tuple_width += item_width;
5045 : }
5046 : else
5047 : {
5048 : /*
5049 : * Handle general expressions using type info.
5050 : */
5051 : int32 item_width;
5052 : QualCost cost;
5053 :
5054 30135 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
5055 30135 : Assert(item_width > 0);
5056 30135 : tuple_width += item_width;
5057 :
5058 : /* Account for cost, too */
5059 30135 : cost_qual_eval_node(&cost, node, root);
5060 30135 : target->cost.startup += cost.startup;
5061 30135 : target->cost.per_tuple += cost.per_tuple;
5062 : }
5063 : }
5064 :
5065 28702 : Assert(tuple_width >= 0);
5066 28702 : target->width = tuple_width;
5067 :
5068 28702 : return target;
5069 : }
5070 :
5071 : /*
5072 : * relation_byte_size
5073 : * Estimate the storage space in bytes for a given number of tuples
5074 : * of a given width (size in bytes).
5075 : */
5076 : static double
5077 74509 : relation_byte_size(double tuples, int width)
5078 : {
5079 74509 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
5080 : }
5081 :
5082 : /*
5083 : * page_size
5084 : * Returns an estimate of the number of pages covered by a given
5085 : * number of tuples of a given width (size in bytes).
5086 : */
5087 : static double
5088 242 : page_size(double tuples, int width)
5089 : {
5090 242 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
5091 : }
5092 :
5093 : /*
5094 : * Estimate the fraction of the work that each worker will do given the
5095 : * number of workers budgeted for the path.
5096 : */
5097 : static double
5098 1089 : get_parallel_divisor(Path *path)
5099 : {
5100 1089 : double parallel_divisor = path->parallel_workers;
5101 : double leader_contribution;
5102 :
5103 : /*
5104 : * Early experience with parallel query suggests that when there is only
5105 : * one worker, the leader often makes a very substantial contribution to
5106 : * executing the parallel portion of the plan, but as more workers are
5107 : * added, it does less and less, because it's busy reading tuples from the
5108 : * workers and doing whatever non-parallel post-processing is needed. By
5109 : * the time we reach 4 workers, the leader no longer makes a meaningful
5110 : * contribution. Thus, for now, estimate that the leader spends 30% of
5111 : * its time servicing each worker, and the remainder executing the
5112 : * parallel plan.
5113 : */
5114 1089 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
5115 1089 : if (leader_contribution > 0)
5116 1019 : parallel_divisor += leader_contribution;
5117 :
5118 1089 : return parallel_divisor;
5119 : }
5120 :
5121 : /*
5122 : * compute_bitmap_pages
5123 : *
5124 : * compute number of pages fetched from heap in bitmap heap scan.
5125 : */
5126 : double
5127 19595 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
5128 : int loop_count, Cost *cost, double *tuple)
5129 : {
5130 : Cost indexTotalCost;
5131 : Selectivity indexSelectivity;
5132 : double T;
5133 : double pages_fetched;
5134 : double tuples_fetched;
5135 :
5136 : /*
5137 : * Fetch total cost of obtaining the bitmap, as well as its total
5138 : * selectivity.
5139 : */
5140 19595 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
5141 :
5142 : /*
5143 : * Estimate number of main-table pages fetched.
5144 : */
5145 19595 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
5146 :
5147 19595 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
5148 :
5149 19595 : if (loop_count > 1)
5150 : {
5151 : /*
5152 : * For repeated bitmap scans, scale up the number of tuples fetched in
5153 : * the Mackert and Lohman formula by the number of scans, so that we
5154 : * estimate the number of pages fetched by all the scans. Then
5155 : * pro-rate for one scan.
5156 : */
5157 3960 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
5158 : baserel->pages,
5159 : get_indexpath_pages(bitmapqual),
5160 : root);
5161 3960 : pages_fetched /= loop_count;
5162 : }
5163 : else
5164 : {
5165 : /*
5166 : * For a single scan, the number of heap pages that need to be fetched
5167 : * is the same as the Mackert and Lohman formula for the case T <= b
5168 : * (ie, no re-reads needed).
5169 : */
5170 15635 : pages_fetched =
5171 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
5172 : }
5173 :
5174 19595 : if (pages_fetched >= T)
5175 1963 : pages_fetched = T;
5176 : else
5177 17632 : pages_fetched = ceil(pages_fetched);
5178 :
5179 19595 : if (cost)
5180 14743 : *cost = indexTotalCost;
5181 19595 : if (tuple)
5182 14743 : *tuple = tuples_fetched;
5183 :
5184 19595 : return pages_fetched;
5185 : }
|