• R/O
  • HTTP
  • SSH
  • HTTPS

pg_hint_plan: Commit

firtst release


Commit MetaInfo

Revision5f581e498e3739caa637cc25c7900da458550e41 (tree)
Time2018-06-14 19:15:14
AuthorKyotaro Horiguchi <horiguchi.kyotaro@lab....>
CommiterKyotaro Horiguchi

Log Message

Support PG11

Support PG11. Just adapted to PG11 in this version. No behavior
changes except Parallel hint. PG11 gets many new parallel features and
as a matter of course they made deadly impact on Parallel hint. It no
longer works in the same way with the earlier version except for quite
simple cases.

Change Summary

Incremental Difference

--- a/core.c
+++ b/core.c
@@ -14,11 +14,8 @@
1414 *
1515 * static functions:
1616 * set_plain_rel_pathlist()
17- * set_append_rel_pathlist()
1817 * add_paths_to_append_rel()
19- * generate_mergeappend_paths()
20- * get_cheapest_parameterized_child_path()
21- * accumulate_append_subpath()
18+ * try_partitionwise_join()
2219 *
2320 * public functions:
2421 * standard_join_search(): This funcion is not static. The reason for
@@ -48,6 +45,9 @@
4845 *-------------------------------------------------------------------------
4946 */
5047
48+static void populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
49+ RelOptInfo *rel2, RelOptInfo *joinrel,
50+ SpecialJoinInfo *sjinfo, List *restrictlist);
5151
5252 /*
5353 * set_plain_rel_pathlist
@@ -132,483 +132,22 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
132132 if (IS_DUMMY_REL(childrel))
133133 continue;
134134
135+ /* Bubble up childrel's partitioned children. */
136+ if (rel->part_scheme)
137+ rel->partitioned_child_rels =
138+ list_concat(rel->partitioned_child_rels,
139+ list_copy(childrel->partitioned_child_rels));
140+
135141 /*
136142 * Child is live, so add it to the live_childrels list for use below.
137143 */
138144 live_childrels = lappend(live_childrels, childrel);
139145 }
140146
141- /* Add paths to the "append" relation. */
147+ /* Add paths to the append relation. */
142148 add_paths_to_append_rel(root, rel, live_childrels);
143149 }
144150
145-/*
146- * add_paths_to_append_rel
147- * Generate paths for given "append" relation given the set of non-dummy
148- * child rels.
149- *
150- * The function collects all parameterizations and orderings supported by the
151- * non-dummy children. For every such parameterization or ordering, it creates
152- * an append path collecting one path from each non-dummy child with given
153- * parameterization or ordering. Similarly it collects partial paths from
154- * non-dummy children to create partial append paths.
155- */
156-static void
157-add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
158- List *live_childrels)
159-{
160- List *subpaths = NIL;
161- bool subpaths_valid = true;
162- List *partial_subpaths = NIL;
163- bool partial_subpaths_valid = true;
164- List *all_child_pathkeys = NIL;
165- List *all_child_outers = NIL;
166- ListCell *l;
167- List *partitioned_rels = NIL;
168- RangeTblEntry *rte;
169- bool build_partitioned_rels = false;
170-
171- /*
172- * A plain relation will already have a PartitionedChildRelInfo if it is
173- * partitioned. For a subquery RTE, no PartitionedChildRelInfo exists; we
174- * collect all partitioned_rels associated with any child. (This assumes
175- * that we don't need to look through multiple levels of subquery RTEs; if
176- * we ever do, we could create a PartitionedChildRelInfo with the
177- * accumulated list of partitioned_rels which would then be found when
178- * populated our parent rel with paths. For the present, that appears to
179- * be unnecessary.)
180- */
181- rte = planner_rt_fetch(rel->relid, root);
182- switch (rte->rtekind)
183- {
184- case RTE_RELATION:
185- if (rte->relkind == RELKIND_PARTITIONED_TABLE)
186- {
187- partitioned_rels =
188- get_partitioned_child_rels(root, rel->relid);
189- Assert(list_length(partitioned_rels) >= 1);
190- }
191- break;
192- case RTE_SUBQUERY:
193- build_partitioned_rels = true;
194- break;
195- default:
196- elog(ERROR, "unexpcted rtekind: %d", (int) rte->rtekind);
197- }
198-
199- /*
200- * For every non-dummy child, remember the cheapest path. Also, identify
201- * all pathkeys (orderings) and parameterizations (required_outer sets)
202- * available for the non-dummy member relations.
203- */
204- foreach(l, live_childrels)
205- {
206- RelOptInfo *childrel = lfirst(l);
207- ListCell *lcp;
208-
209- /*
210- * If we need to build partitioned_rels, accumulate the partitioned
211- * rels for this child.
212- */
213- if (build_partitioned_rels)
214- {
215- List *cprels;
216-
217- cprels = get_partitioned_child_rels(root, childrel->relid);
218- partitioned_rels = list_concat(partitioned_rels,
219- list_copy(cprels));
220- }
221-
222- /*
223- * If child has an unparameterized cheapest-total path, add that to
224- * the unparameterized Append path we are constructing for the parent.
225- * If not, there's no workable unparameterized path.
226- */
227- if (childrel->cheapest_total_path->param_info == NULL)
228- subpaths = accumulate_append_subpath(subpaths,
229- childrel->cheapest_total_path);
230- else
231- subpaths_valid = false;
232-
233- /* Same idea, but for a partial plan. */
234- if (childrel->partial_pathlist != NIL)
235- partial_subpaths = accumulate_append_subpath(partial_subpaths,
236- linitial(childrel->partial_pathlist));
237- else
238- partial_subpaths_valid = false;
239-
240- /*
241- * Collect lists of all the available path orderings and
242- * parameterizations for all the children. We use these as a
243- * heuristic to indicate which sort orderings and parameterizations we
244- * should build Append and MergeAppend paths for.
245- */
246- foreach(lcp, childrel->pathlist)
247- {
248- Path *childpath = (Path *) lfirst(lcp);
249- List *childkeys = childpath->pathkeys;
250- Relids childouter = PATH_REQ_OUTER(childpath);
251-
252- /* Unsorted paths don't contribute to pathkey list */
253- if (childkeys != NIL)
254- {
255- ListCell *lpk;
256- bool found = false;
257-
258- /* Have we already seen this ordering? */
259- foreach(lpk, all_child_pathkeys)
260- {
261- List *existing_pathkeys = (List *) lfirst(lpk);
262-
263- if (compare_pathkeys(existing_pathkeys,
264- childkeys) == PATHKEYS_EQUAL)
265- {
266- found = true;
267- break;
268- }
269- }
270- if (!found)
271- {
272- /* No, so add it to all_child_pathkeys */
273- all_child_pathkeys = lappend(all_child_pathkeys,
274- childkeys);
275- }
276- }
277-
278- /* Unparameterized paths don't contribute to param-set list */
279- if (childouter)
280- {
281- ListCell *lco;
282- bool found = false;
283-
284- /* Have we already seen this param set? */
285- foreach(lco, all_child_outers)
286- {
287- Relids existing_outers = (Relids) lfirst(lco);
288-
289- if (bms_equal(existing_outers, childouter))
290- {
291- found = true;
292- break;
293- }
294- }
295- if (!found)
296- {
297- /* No, so add it to all_child_outers */
298- all_child_outers = lappend(all_child_outers,
299- childouter);
300- }
301- }
302- }
303- }
304-
305- /*
306- * If we found unparameterized paths for all children, build an unordered,
307- * unparameterized Append path for the rel. (Note: this is correct even
308- * if we have zero or one live subpath due to constraint exclusion.)
309- */
310- if (subpaths_valid)
311- add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0,
312- partitioned_rels));
313-
314- /*
315- * Consider an append of partial unordered, unparameterized partial paths.
316- */
317- if (partial_subpaths_valid)
318- {
319- AppendPath *appendpath;
320- ListCell *lc;
321- int parallel_workers = 0;
322-
323- /*
324- * Decide on the number of workers to request for this append path.
325- * For now, we just use the maximum value from among the members. It
326- * might be useful to use a higher number if the Append node were
327- * smart enough to spread out the workers, but it currently isn't.
328- */
329- foreach(lc, partial_subpaths)
330- {
331- Path *path = lfirst(lc);
332-
333- parallel_workers = Max(parallel_workers, path->parallel_workers);
334- }
335- Assert(parallel_workers > 0);
336-
337- /* Generate a partial append path. */
338- appendpath = create_append_path(rel, partial_subpaths, NULL,
339- parallel_workers, partitioned_rels);
340- add_partial_path(rel, (Path *) appendpath);
341- }
342-
343- /*
344- * Also build unparameterized MergeAppend paths based on the collected
345- * list of child pathkeys.
346- */
347- if (subpaths_valid)
348- generate_mergeappend_paths(root, rel, live_childrels,
349- all_child_pathkeys,
350- partitioned_rels);
351-
352- /*
353- * Build Append paths for each parameterization seen among the child rels.
354- * (This may look pretty expensive, but in most cases of practical
355- * interest, the child rels will expose mostly the same parameterizations,
356- * so that not that many cases actually get considered here.)
357- *
358- * The Append node itself cannot enforce quals, so all qual checking must
359- * be done in the child paths. This means that to have a parameterized
360- * Append path, we must have the exact same parameterization for each
361- * child path; otherwise some children might be failing to check the
362- * moved-down quals. To make them match up, we can try to increase the
363- * parameterization of lesser-parameterized paths.
364- */
365- foreach(l, all_child_outers)
366- {
367- Relids required_outer = (Relids) lfirst(l);
368- ListCell *lcr;
369-
370- /* Select the child paths for an Append with this parameterization */
371- subpaths = NIL;
372- subpaths_valid = true;
373- foreach(lcr, live_childrels)
374- {
375- RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
376- Path *subpath;
377-
378- subpath = get_cheapest_parameterized_child_path(root,
379- childrel,
380- required_outer);
381- if (subpath == NULL)
382- {
383- /* failed to make a suitable path for this child */
384- subpaths_valid = false;
385- break;
386- }
387- subpaths = accumulate_append_subpath(subpaths, subpath);
388- }
389-
390- if (subpaths_valid)
391- add_path(rel, (Path *)
392- create_append_path(rel, subpaths, required_outer, 0,
393- partitioned_rels));
394- }
395-}
396-
397-
398-/*
399- * generate_mergeappend_paths
400- * Generate MergeAppend paths for an append relation
401- *
402- * Generate a path for each ordering (pathkey list) appearing in
403- * all_child_pathkeys.
404- *
405- * We consider both cheapest-startup and cheapest-total cases, ie, for each
406- * interesting ordering, collect all the cheapest startup subpaths and all the
407- * cheapest total paths, and build a MergeAppend path for each case.
408- *
409- * We don't currently generate any parameterized MergeAppend paths. While
410- * it would not take much more code here to do so, it's very unclear that it
411- * is worth the planning cycles to investigate such paths: there's little
412- * use for an ordered path on the inside of a nestloop. In fact, it's likely
413- * that the current coding of add_path would reject such paths out of hand,
414- * because add_path gives no credit for sort ordering of parameterized paths,
415- * and a parameterized MergeAppend is going to be more expensive than the
416- * corresponding parameterized Append path. If we ever try harder to support
417- * parameterized mergejoin plans, it might be worth adding support for
418- * parameterized MergeAppends to feed such joins. (See notes in
419- * optimizer/README for why that might not ever happen, though.)
420- */
421-static void
422-generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
423- List *live_childrels,
424- List *all_child_pathkeys,
425- List *partitioned_rels)
426-{
427- ListCell *lcp;
428-
429- foreach(lcp, all_child_pathkeys)
430- {
431- List *pathkeys = (List *) lfirst(lcp);
432- List *startup_subpaths = NIL;
433- List *total_subpaths = NIL;
434- bool startup_neq_total = false;
435- ListCell *lcr;
436-
437- /* Select the child paths for this ordering... */
438- foreach(lcr, live_childrels)
439- {
440- RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
441- Path *cheapest_startup,
442- *cheapest_total;
443-
444- /* Locate the right paths, if they are available. */
445- cheapest_startup =
446- get_cheapest_path_for_pathkeys(childrel->pathlist,
447- pathkeys,
448- NULL,
449- STARTUP_COST,
450- false);
451- cheapest_total =
452- get_cheapest_path_for_pathkeys(childrel->pathlist,
453- pathkeys,
454- NULL,
455- TOTAL_COST,
456- false);
457-
458- /*
459- * If we can't find any paths with the right order just use the
460- * cheapest-total path; we'll have to sort it later.
461- */
462- if (cheapest_startup == NULL || cheapest_total == NULL)
463- {
464- cheapest_startup = cheapest_total =
465- childrel->cheapest_total_path;
466- /* Assert we do have an unparameterized path for this child */
467- Assert(cheapest_total->param_info == NULL);
468- }
469-
470- /*
471- * Notice whether we actually have different paths for the
472- * "cheapest" and "total" cases; frequently there will be no point
473- * in two create_merge_append_path() calls.
474- */
475- if (cheapest_startup != cheapest_total)
476- startup_neq_total = true;
477-
478- startup_subpaths =
479- accumulate_append_subpath(startup_subpaths, cheapest_startup);
480- total_subpaths =
481- accumulate_append_subpath(total_subpaths, cheapest_total);
482- }
483-
484- /* ... and build the MergeAppend paths */
485- add_path(rel, (Path *) create_merge_append_path(root,
486- rel,
487- startup_subpaths,
488- pathkeys,
489- NULL,
490- partitioned_rels));
491- if (startup_neq_total)
492- add_path(rel, (Path *) create_merge_append_path(root,
493- rel,
494- total_subpaths,
495- pathkeys,
496- NULL,
497- partitioned_rels));
498- }
499-}
500-
501-
502-/*
503- * get_cheapest_parameterized_child_path
504- * Get cheapest path for this relation that has exactly the requested
505- * parameterization.
506- *
507- * Returns NULL if unable to create such a path.
508- */
509-static Path *
510-get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
511- Relids required_outer)
512-{
513- Path *cheapest;
514- ListCell *lc;
515-
516- /*
517- * Look up the cheapest existing path with no more than the needed
518- * parameterization. If it has exactly the needed parameterization, we're
519- * done.
520- */
521- cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
522- NIL,
523- required_outer,
524- TOTAL_COST,
525- false);
526- Assert(cheapest != NULL);
527- if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
528- return cheapest;
529-
530- /*
531- * Otherwise, we can "reparameterize" an existing path to match the given
532- * parameterization, which effectively means pushing down additional
533- * joinquals to be checked within the path's scan. However, some existing
534- * paths might check the available joinquals already while others don't;
535- * therefore, it's not clear which existing path will be cheapest after
536- * reparameterization. We have to go through them all and find out.
537- */
538- cheapest = NULL;
539- foreach(lc, rel->pathlist)
540- {
541- Path *path = (Path *) lfirst(lc);
542-
543- /* Can't use it if it needs more than requested parameterization */
544- if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
545- continue;
546-
547- /*
548- * Reparameterization can only increase the path's cost, so if it's
549- * already more expensive than the current cheapest, forget it.
550- */
551- if (cheapest != NULL &&
552- compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
553- continue;
554-
555- /* Reparameterize if needed, then recheck cost */
556- if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
557- {
558- path = reparameterize_path(root, path, required_outer, 1.0);
559- if (path == NULL)
560- continue; /* failed to reparameterize this one */
561- Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
562-
563- if (cheapest != NULL &&
564- compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
565- continue;
566- }
567-
568- /* We have a new best path */
569- cheapest = path;
570- }
571-
572- /* Return the best path, or NULL if we found no suitable candidate */
573- return cheapest;
574-}
575-
576-
577-/*
578- * accumulate_append_subpath
579- * Add a subpath to the list being built for an Append or MergeAppend
580- *
581- * It's possible that the child is itself an Append or MergeAppend path, in
582- * which case we can "cut out the middleman" and just add its child paths to
583- * our own list. (We don't try to do this earlier because we need to apply
584- * both levels of transformation to the quals.)
585- *
586- * Note that if we omit a child MergeAppend in this way, we are effectively
587- * omitting a sort step, which seems fine: if the parent is to be an Append,
588- * its result would be unsorted anyway, while if the parent is to be a
589- * MergeAppend, there's no point in a separate sort on a child.
590- */
591-static List *
592-accumulate_append_subpath(List *subpaths, Path *path)
593-{
594- if (IsA(path, AppendPath))
595- {
596- AppendPath *apath = (AppendPath *) path;
597-
598- /* list_copy is important here to avoid sharing list substructure */
599- return list_concat(subpaths, list_copy(apath->subpaths));
600- }
601- else if (IsA(path, MergeAppendPath))
602- {
603- MergeAppendPath *mpath = (MergeAppendPath *) path;
604-
605- /* list_copy is important here to avoid sharing list substructure */
606- return list_concat(subpaths, list_copy(mpath->subpaths));
607- }
608- else
609- return lappend(subpaths, path);
610-}
611-
612151
613152 /*
614153 * standard_join_search
@@ -678,18 +217,28 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
678217 join_search_one_level(root, lev);
679218
680219 /*
681- * Run generate_gather_paths() for each just-processed joinrel. We
682- * could not do this earlier because both regular and partial paths
683- * can get added to a particular joinrel at multiple times within
684- * join_search_one_level. After that, we're done creating paths for
685- * the joinrel, so run set_cheapest().
220+ * Run generate_partitionwise_join_paths() and generate_gather_paths()
221+ * for each just-processed joinrel. We could not do this earlier
222+ * because both regular and partial paths can get added to a
223+ * particular joinrel at multiple times within join_search_one_level.
224+ *
225+ * After that, we're done creating paths for the joinrel, so run
226+ * set_cheapest().
686227 */
687228 foreach(lc, root->join_rel_level[lev])
688229 {
689230 rel = (RelOptInfo *) lfirst(lc);
690231
691- /* Create GatherPaths for any useful partial paths for rel */
692- generate_gather_paths(root, rel);
232+ /* Create paths for partitionwise joins. */
233+ generate_partitionwise_join_paths(root, rel);
234+
235+ /*
236+ * Except for the topmost scan/join rel, consider gathering
237+ * partial paths. We'll do the same for the topmost scan/join rel
238+ * once we know the final targetlist (see grouping_planner).
239+ */
240+ if (lev < levels_needed)
241+ generate_gather_paths(root, rel, false);
693242
694243 /* Find and save the cheapest paths for this rel */
695244 set_cheapest(rel);
@@ -723,7 +272,8 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
723272 {
724273 int parallel_workers;
725274
726- parallel_workers = compute_parallel_worker(rel, rel->pages, -1);
275+ parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
276+ max_parallel_workers_per_gather);
727277
728278 /* If any limit was set to zero, the user doesn't want a parallel scan. */
729279 if (parallel_workers <= 0)
@@ -1022,7 +572,7 @@ make_rels_by_clauseless_joins(PlannerInfo *root,
1022572 *
1023573 * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
1024574 * else it's set to point to the associated SpecialJoinInfo node. Also,
1025- * *reversed_p is set TRUE if the given relations need to be swapped to
575+ * *reversed_p is set true if the given relations need to be swapped to
1026576 * match the SpecialJoinInfo node.
1027577 */
1028578 static bool
@@ -1400,6 +950,50 @@ is_dummy_rel(RelOptInfo *rel)
1400950 return IS_DUMMY_REL(rel);
1401951 }
1402952
953+/*
954+ * Mark a relation as proven empty.
955+ *
956+ * During GEQO planning, this can get invoked more than once on the same
957+ * baserel struct, so it's worth checking to see if the rel is already marked
958+ * dummy.
959+ *
960+ * Also, when called during GEQO join planning, we are in a short-lived
961+ * memory context. We must make sure that the dummy path attached to a
962+ * baserel survives the GEQO cycle, else the baserel is trashed for future
963+ * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
964+ * we don't want the dummy path to clutter the main planning context. Upshot
965+ * is that the best solution is to explicitly make the dummy path in the same
966+ * context the given RelOptInfo is in.
967+ */
968+void
969+mark_dummy_rel(RelOptInfo *rel)
970+{
971+ MemoryContext oldcontext;
972+
973+ /* Already marked? */
974+ if (is_dummy_rel(rel))
975+ return;
976+
977+ /* No, so choose correct context to make the dummy path in */
978+ oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
979+
980+ /* Set dummy size estimate */
981+ rel->rows = 0;
982+
983+ /* Evict any previously chosen paths */
984+ rel->pathlist = NIL;
985+ rel->partial_pathlist = NIL;
986+
987+ /* Set up the dummy path */
988+ add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL, NULL,
989+ 0, false, NIL, -1));
990+
991+ /* Set or update cheapest_total_path and related fields */
992+ set_cheapest(rel);
993+
994+ MemoryContextSwitchTo(oldcontext);
995+}
996+
1403997
1404998 /*
1405999 * restriction_is_constant_false --- is a restrictlist just FALSE?
@@ -1410,10 +1004,13 @@ is_dummy_rel(RelOptInfo *rel)
14101004 * decide there's no match for an outer row, which is pretty stupid. So,
14111005 * we need to detect the case.
14121006 *
1413- * If only_pushed_down is TRUE, then consider only pushed-down quals.
1007+ * If only_pushed_down is true, then consider only quals that are pushed-down
1008+ * from the point of view of the joinrel.
14141009 */
14151010 static bool
1416-restriction_is_constant_false(List *restrictlist, bool only_pushed_down)
1011+restriction_is_constant_false(List *restrictlist,
1012+ RelOptInfo *joinrel,
1013+ bool only_pushed_down)
14171014 {
14181015 ListCell *lc;
14191016
@@ -1427,7 +1024,7 @@ restriction_is_constant_false(List *restrictlist, bool only_pushed_down)
14271024 {
14281025 RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
14291026
1430- if (only_pushed_down && !rinfo->is_pushed_down)
1027+ if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
14311028 continue;
14321029
14331030 if (rinfo->clause && IsA(rinfo->clause, Const))
@@ -1443,3 +1040,127 @@ restriction_is_constant_false(List *restrictlist, bool only_pushed_down)
14431040 }
14441041 return false;
14451042 }
1043+
1044+/*
1045+ * Assess whether join between given two partitioned relations can be broken
1046+ * down into joins between matching partitions; a technique called
1047+ * "partitionwise join"
1048+ *
1049+ * Partitionwise join is possible when a. Joining relations have same
1050+ * partitioning scheme b. There exists an equi-join between the partition keys
1051+ * of the two relations.
1052+ *
1053+ * Partitionwise join is planned as follows (details: optimizer/README.)
1054+ *
1055+ * 1. Create the RelOptInfos for joins between matching partitions i.e
1056+ * child-joins and add paths to them.
1057+ *
1058+ * 2. Construct Append or MergeAppend paths across the set of child joins.
1059+ * This second phase is implemented by generate_partitionwise_join_paths().
1060+ *
1061+ * The RelOptInfo, SpecialJoinInfo and restrictlist for each child join are
1062+ * obtained by translating the respective parent join structures.
1063+ */
1064+static void
1065+try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
1066+ RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo,
1067+ List *parent_restrictlist)
1068+{
1069+ int nparts;
1070+ int cnt_parts;
1071+
1072+ /* Guard against stack overflow due to overly deep partition hierarchy. */
1073+ check_stack_depth();
1074+
1075+ /* Nothing to do, if the join relation is not partitioned. */
1076+ if (!IS_PARTITIONED_REL(joinrel))
1077+ return;
1078+
1079+ /*
1080+ * Since this join relation is partitioned, all the base relations
1081+ * participating in this join must be partitioned and so are all the
1082+ * intermediate join relations.
1083+ */
1084+ Assert(IS_PARTITIONED_REL(rel1) && IS_PARTITIONED_REL(rel2));
1085+ Assert(REL_HAS_ALL_PART_PROPS(rel1) && REL_HAS_ALL_PART_PROPS(rel2));
1086+
1087+ /*
1088+ * The partition scheme of the join relation should match that of the
1089+ * joining relations.
1090+ */
1091+ Assert(joinrel->part_scheme == rel1->part_scheme &&
1092+ joinrel->part_scheme == rel2->part_scheme);
1093+
1094+ /*
1095+ * Since we allow partitionwise join only when the partition bounds of the
1096+ * joining relations exactly match, the partition bounds of the join
1097+ * should match those of the joining relations.
1098+ */
1099+ Assert(partition_bounds_equal(joinrel->part_scheme->partnatts,
1100+ joinrel->part_scheme->parttyplen,
1101+ joinrel->part_scheme->parttypbyval,
1102+ joinrel->boundinfo, rel1->boundinfo));
1103+ Assert(partition_bounds_equal(joinrel->part_scheme->partnatts,
1104+ joinrel->part_scheme->parttyplen,
1105+ joinrel->part_scheme->parttypbyval,
1106+ joinrel->boundinfo, rel2->boundinfo));
1107+
1108+ nparts = joinrel->nparts;
1109+
1110+ /*
1111+ * Create child-join relations for this partitioned join, if those don't
1112+ * exist. Add paths to child-joins for a pair of child relations
1113+ * corresponding to the given pair of parent relations.
1114+ */
1115+ for (cnt_parts = 0; cnt_parts < nparts; cnt_parts++)
1116+ {
1117+ RelOptInfo *child_rel1 = rel1->part_rels[cnt_parts];
1118+ RelOptInfo *child_rel2 = rel2->part_rels[cnt_parts];
1119+ SpecialJoinInfo *child_sjinfo;
1120+ List *child_restrictlist;
1121+ RelOptInfo *child_joinrel;
1122+ Relids child_joinrelids;
1123+ AppendRelInfo **appinfos;
1124+ int nappinfos;
1125+
1126+ /* We should never try to join two overlapping sets of rels. */
1127+ Assert(!bms_overlap(child_rel1->relids, child_rel2->relids));
1128+ child_joinrelids = bms_union(child_rel1->relids, child_rel2->relids);
1129+ appinfos = find_appinfos_by_relids(root, child_joinrelids, &nappinfos);
1130+
1131+ /*
1132+ * Construct SpecialJoinInfo from parent join relations's
1133+ * SpecialJoinInfo.
1134+ */
1135+ child_sjinfo = build_child_join_sjinfo(root, parent_sjinfo,
1136+ child_rel1->relids,
1137+ child_rel2->relids);
1138+
1139+ /*
1140+ * Construct restrictions applicable to the child join from those
1141+ * applicable to the parent join.
1142+ */
1143+ child_restrictlist =
1144+ (List *) adjust_appendrel_attrs(root,
1145+ (Node *) parent_restrictlist,
1146+ nappinfos, appinfos);
1147+ pfree(appinfos);
1148+
1149+ child_joinrel = joinrel->part_rels[cnt_parts];
1150+ if (!child_joinrel)
1151+ {
1152+ child_joinrel = build_child_join_rel(root, child_rel1, child_rel2,
1153+ joinrel, child_restrictlist,
1154+ child_sjinfo,
1155+ child_sjinfo->jointype);
1156+ joinrel->part_rels[cnt_parts] = child_joinrel;
1157+ }
1158+
1159+ Assert(bms_equal(child_joinrel->relids, child_joinrelids));
1160+
1161+ populate_joinrel_with_paths(root, child_rel1, child_rel2,
1162+ child_joinrel, child_sjinfo,
1163+ child_restrictlist);
1164+ }
1165+}
1166+
--- a/expected/init.out
+++ b/expected/init.out
@@ -159,44 +159,53 @@ SELECT name, setting, category
159159 OR name = 'client_min_messages'
160160 ORDER BY category, name;
161161 SELECT * FROM settings;
162- name | setting | category
163-------------------------------+-----------+---------------------------------------------
164- geqo | on | Query Tuning / Genetic Query Optimizer
165- geqo_effort | 5 | Query Tuning / Genetic Query Optimizer
166- geqo_generations | 0 | Query Tuning / Genetic Query Optimizer
167- geqo_pool_size | 0 | Query Tuning / Genetic Query Optimizer
168- geqo_seed | 0 | Query Tuning / Genetic Query Optimizer
169- geqo_selection_bias | 2 | Query Tuning / Genetic Query Optimizer
170- geqo_threshold | 12 | Query Tuning / Genetic Query Optimizer
171- constraint_exclusion | partition | Query Tuning / Other Planner Options
172- cursor_tuple_fraction | 0.1 | Query Tuning / Other Planner Options
173- default_statistics_target | 100 | Query Tuning / Other Planner Options
174- force_parallel_mode | off | Query Tuning / Other Planner Options
175- from_collapse_limit | 8 | Query Tuning / Other Planner Options
176- join_collapse_limit | 8 | Query Tuning / Other Planner Options
177- cpu_index_tuple_cost | 0.005 | Query Tuning / Planner Cost Constants
178- cpu_operator_cost | 0.0025 | Query Tuning / Planner Cost Constants
179- cpu_tuple_cost | 0.01 | Query Tuning / Planner Cost Constants
180- effective_cache_size | 16384 | Query Tuning / Planner Cost Constants
181- min_parallel_index_scan_size | 64 | Query Tuning / Planner Cost Constants
182- min_parallel_table_scan_size | 1024 | Query Tuning / Planner Cost Constants
183- parallel_setup_cost | 1000 | Query Tuning / Planner Cost Constants
184- parallel_tuple_cost | 0.1 | Query Tuning / Planner Cost Constants
185- random_page_cost | 4 | Query Tuning / Planner Cost Constants
186- seq_page_cost | 1 | Query Tuning / Planner Cost Constants
187- enable_bitmapscan | on | Query Tuning / Planner Method Configuration
188- enable_gathermerge | on | Query Tuning / Planner Method Configuration
189- enable_hashagg | on | Query Tuning / Planner Method Configuration
190- enable_hashjoin | on | Query Tuning / Planner Method Configuration
191- enable_indexonlyscan | on | Query Tuning / Planner Method Configuration
192- enable_indexscan | on | Query Tuning / Planner Method Configuration
193- enable_material | on | Query Tuning / Planner Method Configuration
194- enable_mergejoin | on | Query Tuning / Planner Method Configuration
195- enable_nestloop | on | Query Tuning / Planner Method Configuration
196- enable_seqscan | on | Query Tuning / Planner Method Configuration
197- enable_sort | on | Query Tuning / Planner Method Configuration
198- enable_tidscan | on | Query Tuning / Planner Method Configuration
199- client_min_messages | notice | Reporting and Logging / When to Log
200-(36 rows)
162+ name | setting | category
163+--------------------------------+-----------+---------------------------------------------
164+ geqo | on | Query Tuning / Genetic Query Optimizer
165+ geqo_effort | 5 | Query Tuning / Genetic Query Optimizer
166+ geqo_generations | 0 | Query Tuning / Genetic Query Optimizer
167+ geqo_pool_size | 0 | Query Tuning / Genetic Query Optimizer
168+ geqo_seed | 0 | Query Tuning / Genetic Query Optimizer
169+ geqo_selection_bias | 2 | Query Tuning / Genetic Query Optimizer
170+ geqo_threshold | 12 | Query Tuning / Genetic Query Optimizer
171+ constraint_exclusion | partition | Query Tuning / Other Planner Options
172+ cursor_tuple_fraction | 0.1 | Query Tuning / Other Planner Options
173+ default_statistics_target | 100 | Query Tuning / Other Planner Options
174+ force_parallel_mode | off | Query Tuning / Other Planner Options
175+ from_collapse_limit | 8 | Query Tuning / Other Planner Options
176+ jit | on | Query Tuning / Other Planner Options
177+ join_collapse_limit | 8 | Query Tuning / Other Planner Options
178+ cpu_index_tuple_cost | 0.005 | Query Tuning / Planner Cost Constants
179+ cpu_operator_cost | 0.0025 | Query Tuning / Planner Cost Constants
180+ cpu_tuple_cost | 0.01 | Query Tuning / Planner Cost Constants
181+ effective_cache_size | 16384 | Query Tuning / Planner Cost Constants
182+ jit_above_cost | 100000 | Query Tuning / Planner Cost Constants
183+ jit_inline_above_cost | 500000 | Query Tuning / Planner Cost Constants
184+ jit_optimize_above_cost | 500000 | Query Tuning / Planner Cost Constants
185+ min_parallel_index_scan_size | 64 | Query Tuning / Planner Cost Constants
186+ min_parallel_table_scan_size | 1024 | Query Tuning / Planner Cost Constants
187+ parallel_setup_cost | 1000 | Query Tuning / Planner Cost Constants
188+ parallel_tuple_cost | 0.1 | Query Tuning / Planner Cost Constants
189+ random_page_cost | 4 | Query Tuning / Planner Cost Constants
190+ seq_page_cost | 1 | Query Tuning / Planner Cost Constants
191+ enable_bitmapscan | on | Query Tuning / Planner Method Configuration
192+ enable_gathermerge | on | Query Tuning / Planner Method Configuration
193+ enable_hashagg | on | Query Tuning / Planner Method Configuration
194+ enable_hashjoin | on | Query Tuning / Planner Method Configuration
195+ enable_indexonlyscan | on | Query Tuning / Planner Method Configuration
196+ enable_indexscan | on | Query Tuning / Planner Method Configuration
197+ enable_material | on | Query Tuning / Planner Method Configuration
198+ enable_mergejoin | on | Query Tuning / Planner Method Configuration
199+ enable_nestloop | on | Query Tuning / Planner Method Configuration
200+ enable_parallel_append | on | Query Tuning / Planner Method Configuration
201+ enable_parallel_hash | on | Query Tuning / Planner Method Configuration
202+ enable_partition_pruning | on | Query Tuning / Planner Method Configuration
203+ enable_partitionwise_aggregate | off | Query Tuning / Planner Method Configuration
204+ enable_partitionwise_join | off | Query Tuning / Planner Method Configuration
205+ enable_seqscan | on | Query Tuning / Planner Method Configuration
206+ enable_sort | on | Query Tuning / Planner Method Configuration
207+ enable_tidscan | on | Query Tuning / Planner Method Configuration
208+ client_min_messages | notice | Reporting and Logging / When to Log
209+(45 rows)
201210
202211 ANALYZE;
--- a/expected/ut-A.out
+++ b/expected/ut-A.out
@@ -1786,31 +1786,31 @@ SHOW pg_hint_plan.debug_print;
17861786 ---- No. A-8-4 original GUC parameter pg_hint_plan.parse_messages
17871787 ----
17881788 SET client_min_messages TO debug5;
1789-DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGR, xid/subid/cid: 0/1/0
1789+DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGRESS, xid/subid/cid: 0/1/0
17901790 -- No. A-8-4-1
17911791 SET pg_hint_plan.parse_messages TO debug5;
1792-DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGR, xid/subid/cid: 0/1/0
1793-DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGR, xid/subid/cid: 0/1/0
1792+DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGRESS, xid/subid/cid: 0/1/0
1793+DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGRESS, xid/subid/cid: 0/1/0
17941794 SHOW pg_hint_plan.parse_messages;
1795-DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGR, xid/subid/cid: 0/1/0
1796-DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGR, xid/subid/cid: 0/1/0
1795+DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGRESS, xid/subid/cid: 0/1/0
1796+DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGRESS, xid/subid/cid: 0/1/0
17971797 pg_hint_plan.parse_messages
17981798 -----------------------------
17991799 debug5
18001800 (1 row)
18011801
18021802 /*+Set*/SELECT 1;
1803-DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGR, xid/subid/cid: 0/1/0
1803+DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGRESS, xid/subid/cid: 0/1/0
18041804 DEBUG: pg_hint_plan: hint syntax error at or near ""
18051805 DETAIL: Opening parenthesis is necessary.
1806-DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGR, xid/subid/cid: 0/1/0
1806+DEBUG: CommitTransaction(1) name: unnamed; blockState: STARTED; state: INPROGRESS, xid/subid/cid: 0/1/0
18071807 ?column?
18081808 ----------
18091809 1
18101810 (1 row)
18111811
18121812 SET client_min_messages TO debug4;
1813-DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGR, xid/subid/cid: 0/1/0
1813+DEBUG: StartTransaction(1) name: unnamed; blockState: DEFAULT; state: INPROGRESS, xid/subid/cid: 0/1/0
18141814 /*+Set*/SELECT 1;
18151815 ?column?
18161816 ----------
@@ -3224,45 +3224,54 @@ NestLoop(t1 t1)
32243224 -- No. A-12-1-1
32253225 -- No. A-12-2-1
32263226 SELECT name, setting FROM settings;
3227- name | setting
3228-------------------------------+-----------
3229- geqo | on
3230- geqo_effort | 5
3231- geqo_generations | 0
3232- geqo_pool_size | 0
3233- geqo_seed | 0
3234- geqo_selection_bias | 2
3235- geqo_threshold | 12
3236- constraint_exclusion | partition
3237- cursor_tuple_fraction | 0.1
3238- default_statistics_target | 100
3239- force_parallel_mode | off
3240- from_collapse_limit | 8
3241- join_collapse_limit | 8
3242- cpu_index_tuple_cost | 0.005
3243- cpu_operator_cost | 0.0025
3244- cpu_tuple_cost | 0.01
3245- effective_cache_size | 16384
3246- min_parallel_index_scan_size | 64
3247- min_parallel_table_scan_size | 1024
3248- parallel_setup_cost | 1000
3249- parallel_tuple_cost | 0.1
3250- random_page_cost | 4
3251- seq_page_cost | 1
3252- enable_bitmapscan | on
3253- enable_gathermerge | on
3254- enable_hashagg | on
3255- enable_hashjoin | on
3256- enable_indexonlyscan | on
3257- enable_indexscan | on
3258- enable_material | on
3259- enable_mergejoin | on
3260- enable_nestloop | on
3261- enable_seqscan | on
3262- enable_sort | on
3263- enable_tidscan | on
3264- client_min_messages | log
3265-(36 rows)
3227+ name | setting
3228+--------------------------------+-----------
3229+ geqo | on
3230+ geqo_effort | 5
3231+ geqo_generations | 0
3232+ geqo_pool_size | 0
3233+ geqo_seed | 0
3234+ geqo_selection_bias | 2
3235+ geqo_threshold | 12
3236+ constraint_exclusion | partition
3237+ cursor_tuple_fraction | 0.1
3238+ default_statistics_target | 100
3239+ force_parallel_mode | off
3240+ from_collapse_limit | 8
3241+ jit | on
3242+ join_collapse_limit | 8
3243+ cpu_index_tuple_cost | 0.005
3244+ cpu_operator_cost | 0.0025
3245+ cpu_tuple_cost | 0.01
3246+ effective_cache_size | 16384
3247+ jit_above_cost | 100000
3248+ jit_inline_above_cost | 500000
3249+ jit_optimize_above_cost | 500000
3250+ min_parallel_index_scan_size | 64
3251+ min_parallel_table_scan_size | 1024
3252+ parallel_setup_cost | 1000
3253+ parallel_tuple_cost | 0.1
3254+ random_page_cost | 4
3255+ seq_page_cost | 1
3256+ enable_bitmapscan | on
3257+ enable_gathermerge | on
3258+ enable_hashagg | on
3259+ enable_hashjoin | on
3260+ enable_indexonlyscan | on
3261+ enable_indexscan | on
3262+ enable_material | on
3263+ enable_mergejoin | on
3264+ enable_nestloop | on
3265+ enable_parallel_append | on
3266+ enable_parallel_hash | on
3267+ enable_partition_pruning | on
3268+ enable_partitionwise_aggregate | off
3269+ enable_partitionwise_join | off
3270+ enable_seqscan | on
3271+ enable_sort | on
3272+ enable_tidscan | on
3273+ client_min_messages | log
3274+(45 rows)
32663275
32673276 SET pg_hint_plan.parse_messages TO error;
32683277 /*+Set(enable_seqscan off)Set(geqo_threshold 100)SeqScan(t1)MergeJoin(t1 t2)NestLoop(t1 t1)*/
@@ -3270,45 +3279,54 @@ EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
32703279 ERROR: pg_hint_plan: hint syntax error at or near "NestLoop(t1 t1)"
32713280 DETAIL: Relation name "t1" is duplicated.
32723281 SELECT name, setting FROM settings;
3273- name | setting
3274-------------------------------+-----------
3275- geqo | on
3276- geqo_effort | 5
3277- geqo_generations | 0
3278- geqo_pool_size | 0
3279- geqo_seed | 0
3280- geqo_selection_bias | 2
3281- geqo_threshold | 12
3282- constraint_exclusion | partition
3283- cursor_tuple_fraction | 0.1
3284- default_statistics_target | 100
3285- force_parallel_mode | off
3286- from_collapse_limit | 8
3287- join_collapse_limit | 8
3288- cpu_index_tuple_cost | 0.005
3289- cpu_operator_cost | 0.0025
3290- cpu_tuple_cost | 0.01
3291- effective_cache_size | 16384
3292- min_parallel_index_scan_size | 64
3293- min_parallel_table_scan_size | 1024
3294- parallel_setup_cost | 1000
3295- parallel_tuple_cost | 0.1
3296- random_page_cost | 4
3297- seq_page_cost | 1
3298- enable_bitmapscan | on
3299- enable_gathermerge | on
3300- enable_hashagg | on
3301- enable_hashjoin | on
3302- enable_indexonlyscan | on
3303- enable_indexscan | on
3304- enable_material | on
3305- enable_mergejoin | on
3306- enable_nestloop | on
3307- enable_seqscan | on
3308- enable_sort | on
3309- enable_tidscan | on
3310- client_min_messages | log
3311-(36 rows)
3282+ name | setting
3283+--------------------------------+-----------
3284+ geqo | on
3285+ geqo_effort | 5
3286+ geqo_generations | 0
3287+ geqo_pool_size | 0
3288+ geqo_seed | 0
3289+ geqo_selection_bias | 2
3290+ geqo_threshold | 12
3291+ constraint_exclusion | partition
3292+ cursor_tuple_fraction | 0.1
3293+ default_statistics_target | 100
3294+ force_parallel_mode | off
3295+ from_collapse_limit | 8
3296+ jit | on
3297+ join_collapse_limit | 8
3298+ cpu_index_tuple_cost | 0.005
3299+ cpu_operator_cost | 0.0025
3300+ cpu_tuple_cost | 0.01
3301+ effective_cache_size | 16384
3302+ jit_above_cost | 100000
3303+ jit_inline_above_cost | 500000
3304+ jit_optimize_above_cost | 500000
3305+ min_parallel_index_scan_size | 64
3306+ min_parallel_table_scan_size | 1024
3307+ parallel_setup_cost | 1000
3308+ parallel_tuple_cost | 0.1
3309+ random_page_cost | 4
3310+ seq_page_cost | 1
3311+ enable_bitmapscan | on
3312+ enable_gathermerge | on
3313+ enable_hashagg | on
3314+ enable_hashjoin | on
3315+ enable_indexonlyscan | on
3316+ enable_indexscan | on
3317+ enable_material | on
3318+ enable_mergejoin | on
3319+ enable_nestloop | on
3320+ enable_parallel_append | on
3321+ enable_parallel_hash | on
3322+ enable_partition_pruning | on
3323+ enable_partitionwise_aggregate | off
3324+ enable_partitionwise_join | off
3325+ enable_seqscan | on
3326+ enable_sort | on
3327+ enable_tidscan | on
3328+ client_min_messages | log
3329+(45 rows)
33123330
33133331 /*+Set(enable_seqscan off)Set(geqo_threshold 100)SeqScan(t1)MergeJoin(t1 t2)*/
33143332 EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
@@ -3335,45 +3353,54 @@ error hint:
33353353 -- No. A-12-1-2
33363354 -- No. A-12-2-2
33373355 SELECT name, setting FROM settings;
3338- name | setting
3339-------------------------------+-----------
3340- geqo | on
3341- geqo_effort | 5
3342- geqo_generations | 0
3343- geqo_pool_size | 0
3344- geqo_seed | 0
3345- geqo_selection_bias | 2
3346- geqo_threshold | 12
3347- constraint_exclusion | partition
3348- cursor_tuple_fraction | 0.1
3349- default_statistics_target | 100
3350- force_parallel_mode | off
3351- from_collapse_limit | 8
3352- join_collapse_limit | 8
3353- cpu_index_tuple_cost | 0.005
3354- cpu_operator_cost | 0.0025
3355- cpu_tuple_cost | 0.01
3356- effective_cache_size | 16384
3357- min_parallel_index_scan_size | 64
3358- min_parallel_table_scan_size | 1024
3359- parallel_setup_cost | 1000
3360- parallel_tuple_cost | 0.1
3361- random_page_cost | 4
3362- seq_page_cost | 1
3363- enable_bitmapscan | on
3364- enable_gathermerge | on
3365- enable_hashagg | on
3366- enable_hashjoin | on
3367- enable_indexonlyscan | on
3368- enable_indexscan | on
3369- enable_material | on
3370- enable_mergejoin | on
3371- enable_nestloop | on
3372- enable_seqscan | on
3373- enable_sort | on
3374- enable_tidscan | on
3375- client_min_messages | log
3376-(36 rows)
3356+ name | setting
3357+--------------------------------+-----------
3358+ geqo | on
3359+ geqo_effort | 5
3360+ geqo_generations | 0
3361+ geqo_pool_size | 0
3362+ geqo_seed | 0
3363+ geqo_selection_bias | 2
3364+ geqo_threshold | 12
3365+ constraint_exclusion | partition
3366+ cursor_tuple_fraction | 0.1
3367+ default_statistics_target | 100
3368+ force_parallel_mode | off
3369+ from_collapse_limit | 8
3370+ jit | on
3371+ join_collapse_limit | 8
3372+ cpu_index_tuple_cost | 0.005
3373+ cpu_operator_cost | 0.0025
3374+ cpu_tuple_cost | 0.01
3375+ effective_cache_size | 16384
3376+ jit_above_cost | 100000
3377+ jit_inline_above_cost | 500000
3378+ jit_optimize_above_cost | 500000
3379+ min_parallel_index_scan_size | 64
3380+ min_parallel_table_scan_size | 1024
3381+ parallel_setup_cost | 1000
3382+ parallel_tuple_cost | 0.1
3383+ random_page_cost | 4
3384+ seq_page_cost | 1
3385+ enable_bitmapscan | on
3386+ enable_gathermerge | on
3387+ enable_hashagg | on
3388+ enable_hashjoin | on
3389+ enable_indexonlyscan | on
3390+ enable_indexscan | on
3391+ enable_material | on
3392+ enable_mergejoin | on
3393+ enable_nestloop | on
3394+ enable_parallel_append | on
3395+ enable_parallel_hash | on
3396+ enable_partition_pruning | on
3397+ enable_partitionwise_aggregate | off
3398+ enable_partitionwise_join | off
3399+ enable_seqscan | on
3400+ enable_sort | on
3401+ enable_tidscan | on
3402+ client_min_messages | log
3403+(45 rows)
33773404
33783405 SET pg_hint_plan.parse_messages TO error;
33793406 /*+Set(enable_seqscan off)Set(geqo_threshold 100)SeqScan(t1)MergeJoin(t1 t2)NestLoop(t1 t1)*/
@@ -3381,45 +3408,54 @@ EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
33813408 ERROR: pg_hint_plan: hint syntax error at or near "NestLoop(t1 t1)"
33823409 DETAIL: Relation name "t1" is duplicated.
33833410 SELECT name, setting FROM settings;
3384- name | setting
3385-------------------------------+-----------
3386- geqo | on
3387- geqo_effort | 5
3388- geqo_generations | 0
3389- geqo_pool_size | 0
3390- geqo_seed | 0
3391- geqo_selection_bias | 2
3392- geqo_threshold | 12
3393- constraint_exclusion | partition
3394- cursor_tuple_fraction | 0.1
3395- default_statistics_target | 100
3396- force_parallel_mode | off
3397- from_collapse_limit | 8
3398- join_collapse_limit | 8
3399- cpu_index_tuple_cost | 0.005
3400- cpu_operator_cost | 0.0025
3401- cpu_tuple_cost | 0.01
3402- effective_cache_size | 16384
3403- min_parallel_index_scan_size | 64
3404- min_parallel_table_scan_size | 1024
3405- parallel_setup_cost | 1000
3406- parallel_tuple_cost | 0.1
3407- random_page_cost | 4
3408- seq_page_cost | 1
3409- enable_bitmapscan | on
3410- enable_gathermerge | on
3411- enable_hashagg | on
3412- enable_hashjoin | on
3413- enable_indexonlyscan | on
3414- enable_indexscan | on
3415- enable_material | on
3416- enable_mergejoin | on
3417- enable_nestloop | on
3418- enable_seqscan | on
3419- enable_sort | on
3420- enable_tidscan | on
3421- client_min_messages | log
3422-(36 rows)
3411+ name | setting
3412+--------------------------------+-----------
3413+ geqo | on
3414+ geqo_effort | 5
3415+ geqo_generations | 0
3416+ geqo_pool_size | 0
3417+ geqo_seed | 0
3418+ geqo_selection_bias | 2
3419+ geqo_threshold | 12
3420+ constraint_exclusion | partition
3421+ cursor_tuple_fraction | 0.1
3422+ default_statistics_target | 100
3423+ force_parallel_mode | off
3424+ from_collapse_limit | 8
3425+ jit | on
3426+ join_collapse_limit | 8
3427+ cpu_index_tuple_cost | 0.005
3428+ cpu_operator_cost | 0.0025
3429+ cpu_tuple_cost | 0.01
3430+ effective_cache_size | 16384
3431+ jit_above_cost | 100000
3432+ jit_inline_above_cost | 500000
3433+ jit_optimize_above_cost | 500000
3434+ min_parallel_index_scan_size | 64
3435+ min_parallel_table_scan_size | 1024
3436+ parallel_setup_cost | 1000
3437+ parallel_tuple_cost | 0.1
3438+ random_page_cost | 4
3439+ seq_page_cost | 1
3440+ enable_bitmapscan | on
3441+ enable_gathermerge | on
3442+ enable_hashagg | on
3443+ enable_hashjoin | on
3444+ enable_indexonlyscan | on
3445+ enable_indexscan | on
3446+ enable_material | on
3447+ enable_mergejoin | on
3448+ enable_nestloop | on
3449+ enable_parallel_append | on
3450+ enable_parallel_hash | on
3451+ enable_partition_pruning | on
3452+ enable_partitionwise_aggregate | off
3453+ enable_partitionwise_join | off
3454+ enable_seqscan | on
3455+ enable_sort | on
3456+ enable_tidscan | on
3457+ client_min_messages | log
3458+(45 rows)
34233459
34243460 EXPLAIN (COSTS false) EXECUTE p1;
34253461 QUERY PLAN
@@ -3435,45 +3471,54 @@ EXPLAIN (COSTS false) EXECUTE p1;
34353471 -- No. A-12-1-3
34363472 -- No. A-12-2-3
34373473 SELECT name, setting FROM settings;
3438- name | setting
3439-------------------------------+-----------
3440- geqo | on
3441- geqo_effort | 5
3442- geqo_generations | 0
3443- geqo_pool_size | 0
3444- geqo_seed | 0
3445- geqo_selection_bias | 2
3446- geqo_threshold | 12
3447- constraint_exclusion | partition
3448- cursor_tuple_fraction | 0.1
3449- default_statistics_target | 100
3450- force_parallel_mode | off
3451- from_collapse_limit | 8
3452- join_collapse_limit | 8
3453- cpu_index_tuple_cost | 0.005
3454- cpu_operator_cost | 0.0025
3455- cpu_tuple_cost | 0.01
3456- effective_cache_size | 16384
3457- min_parallel_index_scan_size | 64
3458- min_parallel_table_scan_size | 1024
3459- parallel_setup_cost | 1000
3460- parallel_tuple_cost | 0.1
3461- random_page_cost | 4
3462- seq_page_cost | 1
3463- enable_bitmapscan | on
3464- enable_gathermerge | on
3465- enable_hashagg | on
3466- enable_hashjoin | on
3467- enable_indexonlyscan | on
3468- enable_indexscan | on
3469- enable_material | on
3470- enable_mergejoin | on
3471- enable_nestloop | on
3472- enable_seqscan | on
3473- enable_sort | on
3474- enable_tidscan | on
3475- client_min_messages | log
3476-(36 rows)
3474+ name | setting
3475+--------------------------------+-----------
3476+ geqo | on
3477+ geqo_effort | 5
3478+ geqo_generations | 0
3479+ geqo_pool_size | 0
3480+ geqo_seed | 0
3481+ geqo_selection_bias | 2
3482+ geqo_threshold | 12
3483+ constraint_exclusion | partition
3484+ cursor_tuple_fraction | 0.1
3485+ default_statistics_target | 100
3486+ force_parallel_mode | off
3487+ from_collapse_limit | 8
3488+ jit | on
3489+ join_collapse_limit | 8
3490+ cpu_index_tuple_cost | 0.005
3491+ cpu_operator_cost | 0.0025
3492+ cpu_tuple_cost | 0.01
3493+ effective_cache_size | 16384
3494+ jit_above_cost | 100000
3495+ jit_inline_above_cost | 500000
3496+ jit_optimize_above_cost | 500000
3497+ min_parallel_index_scan_size | 64
3498+ min_parallel_table_scan_size | 1024
3499+ parallel_setup_cost | 1000
3500+ parallel_tuple_cost | 0.1
3501+ random_page_cost | 4
3502+ seq_page_cost | 1
3503+ enable_bitmapscan | on
3504+ enable_gathermerge | on
3505+ enable_hashagg | on
3506+ enable_hashjoin | on
3507+ enable_indexonlyscan | on
3508+ enable_indexscan | on
3509+ enable_material | on
3510+ enable_mergejoin | on
3511+ enable_nestloop | on
3512+ enable_parallel_append | on
3513+ enable_parallel_hash | on
3514+ enable_partition_pruning | on
3515+ enable_partitionwise_aggregate | off
3516+ enable_partitionwise_join | off
3517+ enable_seqscan | on
3518+ enable_sort | on
3519+ enable_tidscan | on
3520+ client_min_messages | log
3521+(45 rows)
34773522
34783523 SET pg_hint_plan.parse_messages TO error;
34793524 EXPLAIN (COSTS false) EXECUTE p2;
@@ -3512,88 +3557,106 @@ EXPLAIN (COSTS false) EXECUTE p1;
35123557 (6 rows)
35133558
35143559 SELECT name, setting FROM settings;
3515- name | setting
3516-------------------------------+-----------
3517- geqo | on
3518- geqo_effort | 5
3519- geqo_generations | 0
3520- geqo_pool_size | 0
3521- geqo_seed | 0
3522- geqo_selection_bias | 2
3523- geqo_threshold | 12
3524- constraint_exclusion | partition
3525- cursor_tuple_fraction | 0.1
3526- default_statistics_target | 100
3527- force_parallel_mode | off
3528- from_collapse_limit | 8
3529- join_collapse_limit | 8
3530- cpu_index_tuple_cost | 0.005
3531- cpu_operator_cost | 0.0025
3532- cpu_tuple_cost | 0.01
3533- effective_cache_size | 16384
3534- min_parallel_index_scan_size | 64
3535- min_parallel_table_scan_size | 1024
3536- parallel_setup_cost | 1000
3537- parallel_tuple_cost | 0.1
3538- random_page_cost | 4
3539- seq_page_cost | 1
3540- enable_bitmapscan | on
3541- enable_gathermerge | on
3542- enable_hashagg | on
3543- enable_hashjoin | on
3544- enable_indexonlyscan | on
3545- enable_indexscan | on
3546- enable_material | on
3547- enable_mergejoin | on
3548- enable_nestloop | on
3549- enable_seqscan | on
3550- enable_sort | on
3551- enable_tidscan | on
3552- client_min_messages | log
3553-(36 rows)
3560+ name | setting
3561+--------------------------------+-----------
3562+ geqo | on
3563+ geqo_effort | 5
3564+ geqo_generations | 0
3565+ geqo_pool_size | 0
3566+ geqo_seed | 0
3567+ geqo_selection_bias | 2
3568+ geqo_threshold | 12
3569+ constraint_exclusion | partition
3570+ cursor_tuple_fraction | 0.1
3571+ default_statistics_target | 100
3572+ force_parallel_mode | off
3573+ from_collapse_limit | 8
3574+ jit | on
3575+ join_collapse_limit | 8
3576+ cpu_index_tuple_cost | 0.005
3577+ cpu_operator_cost | 0.0025
3578+ cpu_tuple_cost | 0.01
3579+ effective_cache_size | 16384
3580+ jit_above_cost | 100000
3581+ jit_inline_above_cost | 500000
3582+ jit_optimize_above_cost | 500000
3583+ min_parallel_index_scan_size | 64
3584+ min_parallel_table_scan_size | 1024
3585+ parallel_setup_cost | 1000
3586+ parallel_tuple_cost | 0.1
3587+ random_page_cost | 4
3588+ seq_page_cost | 1
3589+ enable_bitmapscan | on
3590+ enable_gathermerge | on
3591+ enable_hashagg | on
3592+ enable_hashjoin | on
3593+ enable_indexonlyscan | on
3594+ enable_indexscan | on
3595+ enable_material | on
3596+ enable_mergejoin | on
3597+ enable_nestloop | on
3598+ enable_parallel_append | on
3599+ enable_parallel_hash | on
3600+ enable_partition_pruning | on
3601+ enable_partitionwise_aggregate | off
3602+ enable_partitionwise_join | off
3603+ enable_seqscan | on
3604+ enable_sort | on
3605+ enable_tidscan | on
3606+ client_min_messages | log
3607+(45 rows)
35543608
35553609 -- No. A-12-1-4
35563610 -- No. A-12-2-4
35573611 SELECT name, setting FROM settings;
3558- name | setting
3559-------------------------------+-----------
3560- geqo | on
3561- geqo_effort | 5
3562- geqo_generations | 0
3563- geqo_pool_size | 0
3564- geqo_seed | 0
3565- geqo_selection_bias | 2
3566- geqo_threshold | 12
3567- constraint_exclusion | partition
3568- cursor_tuple_fraction | 0.1
3569- default_statistics_target | 100
3570- force_parallel_mode | off
3571- from_collapse_limit | 8
3572- join_collapse_limit | 8
3573- cpu_index_tuple_cost | 0.005
3574- cpu_operator_cost | 0.0025
3575- cpu_tuple_cost | 0.01
3576- effective_cache_size | 16384
3577- min_parallel_index_scan_size | 64
3578- min_parallel_table_scan_size | 1024
3579- parallel_setup_cost | 1000
3580- parallel_tuple_cost | 0.1
3581- random_page_cost | 4
3582- seq_page_cost | 1
3583- enable_bitmapscan | on
3584- enable_gathermerge | on
3585- enable_hashagg | on
3586- enable_hashjoin | on
3587- enable_indexonlyscan | on
3588- enable_indexscan | on
3589- enable_material | on
3590- enable_mergejoin | on
3591- enable_nestloop | on
3592- enable_seqscan | on
3593- enable_sort | on
3594- enable_tidscan | on
3595- client_min_messages | log
3596-(36 rows)
3612+ name | setting
3613+--------------------------------+-----------
3614+ geqo | on
3615+ geqo_effort | 5
3616+ geqo_generations | 0
3617+ geqo_pool_size | 0
3618+ geqo_seed | 0
3619+ geqo_selection_bias | 2
3620+ geqo_threshold | 12
3621+ constraint_exclusion | partition
3622+ cursor_tuple_fraction | 0.1
3623+ default_statistics_target | 100
3624+ force_parallel_mode | off
3625+ from_collapse_limit | 8
3626+ jit | on
3627+ join_collapse_limit | 8
3628+ cpu_index_tuple_cost | 0.005
3629+ cpu_operator_cost | 0.0025
3630+ cpu_tuple_cost | 0.01
3631+ effective_cache_size | 16384
3632+ jit_above_cost | 100000
3633+ jit_inline_above_cost | 500000
3634+ jit_optimize_above_cost | 500000
3635+ min_parallel_index_scan_size | 64
3636+ min_parallel_table_scan_size | 1024
3637+ parallel_setup_cost | 1000
3638+ parallel_tuple_cost | 0.1
3639+ random_page_cost | 4
3640+ seq_page_cost | 1
3641+ enable_bitmapscan | on
3642+ enable_gathermerge | on
3643+ enable_hashagg | on
3644+ enable_hashjoin | on
3645+ enable_indexonlyscan | on
3646+ enable_indexscan | on
3647+ enable_material | on
3648+ enable_mergejoin | on
3649+ enable_nestloop | on
3650+ enable_parallel_append | on
3651+ enable_parallel_hash | on
3652+ enable_partition_pruning | on
3653+ enable_partitionwise_aggregate | off
3654+ enable_partitionwise_join | off
3655+ enable_seqscan | on
3656+ enable_sort | on
3657+ enable_tidscan | on
3658+ client_min_messages | log
3659+(45 rows)
35973660
35983661 SET pg_hint_plan.parse_messages TO error;
35993662 EXPLAIN (COSTS false) EXECUTE p2;
@@ -3610,45 +3673,54 @@ EXPLAIN (COSTS false) EXECUTE p1;
36103673 (6 rows)
36113674
36123675 SELECT name, setting FROM settings;
3613- name | setting
3614-------------------------------+-----------
3615- geqo | on
3616- geqo_effort | 5
3617- geqo_generations | 0
3618- geqo_pool_size | 0
3619- geqo_seed | 0
3620- geqo_selection_bias | 2
3621- geqo_threshold | 12
3622- constraint_exclusion | partition
3623- cursor_tuple_fraction | 0.1
3624- default_statistics_target | 100
3625- force_parallel_mode | off
3626- from_collapse_limit | 8
3627- join_collapse_limit | 8
3628- cpu_index_tuple_cost | 0.005
3629- cpu_operator_cost | 0.0025
3630- cpu_tuple_cost | 0.01
3631- effective_cache_size | 16384
3632- min_parallel_index_scan_size | 64
3633- min_parallel_table_scan_size | 1024
3634- parallel_setup_cost | 1000
3635- parallel_tuple_cost | 0.1
3636- random_page_cost | 4
3637- seq_page_cost | 1
3638- enable_bitmapscan | on
3639- enable_gathermerge | on
3640- enable_hashagg | on
3641- enable_hashjoin | on
3642- enable_indexonlyscan | on
3643- enable_indexscan | on
3644- enable_material | on
3645- enable_mergejoin | on
3646- enable_nestloop | on
3647- enable_seqscan | on
3648- enable_sort | on
3649- enable_tidscan | on
3650- client_min_messages | log
3651-(36 rows)
3676+ name | setting
3677+--------------------------------+-----------
3678+ geqo | on
3679+ geqo_effort | 5
3680+ geqo_generations | 0
3681+ geqo_pool_size | 0
3682+ geqo_seed | 0
3683+ geqo_selection_bias | 2
3684+ geqo_threshold | 12
3685+ constraint_exclusion | partition
3686+ cursor_tuple_fraction | 0.1
3687+ default_statistics_target | 100
3688+ force_parallel_mode | off
3689+ from_collapse_limit | 8
3690+ jit | on
3691+ join_collapse_limit | 8
3692+ cpu_index_tuple_cost | 0.005
3693+ cpu_operator_cost | 0.0025
3694+ cpu_tuple_cost | 0.01
3695+ effective_cache_size | 16384
3696+ jit_above_cost | 100000
3697+ jit_inline_above_cost | 500000
3698+ jit_optimize_above_cost | 500000
3699+ min_parallel_index_scan_size | 64
3700+ min_parallel_table_scan_size | 1024
3701+ parallel_setup_cost | 1000
3702+ parallel_tuple_cost | 0.1
3703+ random_page_cost | 4
3704+ seq_page_cost | 1
3705+ enable_bitmapscan | on
3706+ enable_gathermerge | on
3707+ enable_hashagg | on
3708+ enable_hashjoin | on
3709+ enable_indexonlyscan | on
3710+ enable_indexscan | on
3711+ enable_material | on
3712+ enable_mergejoin | on
3713+ enable_nestloop | on
3714+ enable_parallel_append | on
3715+ enable_parallel_hash | on
3716+ enable_partition_pruning | on
3717+ enable_partitionwise_aggregate | off
3718+ enable_partitionwise_join | off
3719+ enable_seqscan | on
3720+ enable_sort | on
3721+ enable_tidscan | on
3722+ client_min_messages | log
3723+(45 rows)
36523724
36533725 DEALLOCATE p1;
36543726 SET pg_hint_plan.parse_messages TO LOG;
@@ -3680,45 +3752,54 @@ EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
36803752 (5 rows)
36813753
36823754 SELECT name, setting FROM settings;
3683- name | setting
3684-------------------------------+-----------
3685- geqo | on
3686- geqo_effort | 5
3687- geqo_generations | 0
3688- geqo_pool_size | 0
3689- geqo_seed | 0
3690- geqo_selection_bias | 2
3691- geqo_threshold | 12
3692- constraint_exclusion | partition
3693- cursor_tuple_fraction | 0.1
3694- default_statistics_target | 100
3695- force_parallel_mode | off
3696- from_collapse_limit | 8
3697- join_collapse_limit | 8
3698- cpu_index_tuple_cost | 0.005
3699- cpu_operator_cost | 0.0025
3700- cpu_tuple_cost | 0.01
3701- effective_cache_size | 16384
3702- min_parallel_index_scan_size | 64
3703- min_parallel_table_scan_size | 1024
3704- parallel_setup_cost | 1000
3705- parallel_tuple_cost | 0.1
3706- random_page_cost | 4
3707- seq_page_cost | 1
3708- enable_bitmapscan | on
3709- enable_gathermerge | on
3710- enable_hashagg | on
3711- enable_hashjoin | on
3712- enable_indexonlyscan | on
3713- enable_indexscan | off
3714- enable_material | on
3715- enable_mergejoin | off
3716- enable_nestloop | on
3717- enable_seqscan | on
3718- enable_sort | on
3719- enable_tidscan | on
3720- client_min_messages | log
3721-(36 rows)
3755+ name | setting
3756+--------------------------------+-----------
3757+ geqo | on
3758+ geqo_effort | 5
3759+ geqo_generations | 0
3760+ geqo_pool_size | 0
3761+ geqo_seed | 0
3762+ geqo_selection_bias | 2
3763+ geqo_threshold | 12
3764+ constraint_exclusion | partition
3765+ cursor_tuple_fraction | 0.1
3766+ default_statistics_target | 100
3767+ force_parallel_mode | off
3768+ from_collapse_limit | 8
3769+ jit | on
3770+ join_collapse_limit | 8
3771+ cpu_index_tuple_cost | 0.005
3772+ cpu_operator_cost | 0.0025
3773+ cpu_tuple_cost | 0.01
3774+ effective_cache_size | 16384
3775+ jit_above_cost | 100000
3776+ jit_inline_above_cost | 500000
3777+ jit_optimize_above_cost | 500000
3778+ min_parallel_index_scan_size | 64
3779+ min_parallel_table_scan_size | 1024
3780+ parallel_setup_cost | 1000
3781+ parallel_tuple_cost | 0.1
3782+ random_page_cost | 4
3783+ seq_page_cost | 1
3784+ enable_bitmapscan | on
3785+ enable_gathermerge | on
3786+ enable_hashagg | on
3787+ enable_hashjoin | on
3788+ enable_indexonlyscan | on
3789+ enable_indexscan | off
3790+ enable_material | on
3791+ enable_mergejoin | off
3792+ enable_nestloop | on
3793+ enable_parallel_append | on
3794+ enable_parallel_hash | on
3795+ enable_partition_pruning | on
3796+ enable_partitionwise_aggregate | off
3797+ enable_partitionwise_join | off
3798+ enable_seqscan | on
3799+ enable_sort | on
3800+ enable_tidscan | on
3801+ client_min_messages | log
3802+(45 rows)
37223803
37233804 /*+Set(enable_indexscan on)Set(geqo_threshold 100)IndexScan(t2)MergeJoin(t1 t2)Leading(t2 t1)*/
37243805 EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
@@ -3742,45 +3823,54 @@ error hint:
37423823 (4 rows)
37433824
37443825 SELECT name, setting FROM settings;
3745- name | setting
3746-------------------------------+-----------
3747- geqo | on
3748- geqo_effort | 5
3749- geqo_generations | 0
3750- geqo_pool_size | 0
3751- geqo_seed | 0
3752- geqo_selection_bias | 2
3753- geqo_threshold | 12
3754- constraint_exclusion | partition
3755- cursor_tuple_fraction | 0.1
3756- default_statistics_target | 100
3757- force_parallel_mode | off
3758- from_collapse_limit | 8
3759- join_collapse_limit | 8
3760- cpu_index_tuple_cost | 0.005
3761- cpu_operator_cost | 0.0025
3762- cpu_tuple_cost | 0.01
3763- effective_cache_size | 16384
3764- min_parallel_index_scan_size | 64
3765- min_parallel_table_scan_size | 1024
3766- parallel_setup_cost | 1000
3767- parallel_tuple_cost | 0.1
3768- random_page_cost | 4
3769- seq_page_cost | 1
3770- enable_bitmapscan | on
3771- enable_gathermerge | on
3772- enable_hashagg | on
3773- enable_hashjoin | on
3774- enable_indexonlyscan | on
3775- enable_indexscan | off
3776- enable_material | on
3777- enable_mergejoin | off
3778- enable_nestloop | on
3779- enable_seqscan | on
3780- enable_sort | on
3781- enable_tidscan | on
3782- client_min_messages | log
3783-(36 rows)
3826+ name | setting
3827+--------------------------------+-----------
3828+ geqo | on
3829+ geqo_effort | 5
3830+ geqo_generations | 0
3831+ geqo_pool_size | 0
3832+ geqo_seed | 0
3833+ geqo_selection_bias | 2
3834+ geqo_threshold | 12
3835+ constraint_exclusion | partition
3836+ cursor_tuple_fraction | 0.1
3837+ default_statistics_target | 100
3838+ force_parallel_mode | off
3839+ from_collapse_limit | 8
3840+ jit | on
3841+ join_collapse_limit | 8
3842+ cpu_index_tuple_cost | 0.005
3843+ cpu_operator_cost | 0.0025
3844+ cpu_tuple_cost | 0.01
3845+ effective_cache_size | 16384
3846+ jit_above_cost | 100000
3847+ jit_inline_above_cost | 500000
3848+ jit_optimize_above_cost | 500000
3849+ min_parallel_index_scan_size | 64
3850+ min_parallel_table_scan_size | 1024
3851+ parallel_setup_cost | 1000
3852+ parallel_tuple_cost | 0.1
3853+ random_page_cost | 4
3854+ seq_page_cost | 1
3855+ enable_bitmapscan | on
3856+ enable_gathermerge | on
3857+ enable_hashagg | on
3858+ enable_hashjoin | on
3859+ enable_indexonlyscan | on
3860+ enable_indexscan | off
3861+ enable_material | on
3862+ enable_mergejoin | off
3863+ enable_nestloop | on
3864+ enable_parallel_append | on
3865+ enable_parallel_hash | on
3866+ enable_partition_pruning | on
3867+ enable_partitionwise_aggregate | off
3868+ enable_partitionwise_join | off
3869+ enable_seqscan | on
3870+ enable_sort | on
3871+ enable_tidscan | on
3872+ client_min_messages | log
3873+(45 rows)
37843874
37853875 EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
37863876 QUERY PLAN
@@ -3806,45 +3896,54 @@ EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
38063896 (5 rows)
38073897
38083898 SELECT name, setting FROM settings;
3809- name | setting
3810-------------------------------+-----------
3811- geqo | on
3812- geqo_effort | 5
3813- geqo_generations | 0
3814- geqo_pool_size | 0
3815- geqo_seed | 0
3816- geqo_selection_bias | 2
3817- geqo_threshold | 12
3818- constraint_exclusion | partition
3819- cursor_tuple_fraction | 0.1
3820- default_statistics_target | 100
3821- force_parallel_mode | off
3822- from_collapse_limit | 8
3823- join_collapse_limit | 8
3824- cpu_index_tuple_cost | 0.005
3825- cpu_operator_cost | 0.0025
3826- cpu_tuple_cost | 0.01
3827- effective_cache_size | 16384
3828- min_parallel_index_scan_size | 64
3829- min_parallel_table_scan_size | 1024
3830- parallel_setup_cost | 1000
3831- parallel_tuple_cost | 0.1
3832- random_page_cost | 4
3833- seq_page_cost | 1
3834- enable_bitmapscan | on
3835- enable_gathermerge | on
3836- enable_hashagg | on
3837- enable_hashjoin | on
3838- enable_indexonlyscan | on
3839- enable_indexscan | off
3840- enable_material | on
3841- enable_mergejoin | off
3842- enable_nestloop | on
3843- enable_seqscan | on
3844- enable_sort | on
3845- enable_tidscan | on
3846- client_min_messages | log
3847-(36 rows)
3899+ name | setting
3900+--------------------------------+-----------
3901+ geqo | on
3902+ geqo_effort | 5
3903+ geqo_generations | 0
3904+ geqo_pool_size | 0
3905+ geqo_seed | 0
3906+ geqo_selection_bias | 2
3907+ geqo_threshold | 12
3908+ constraint_exclusion | partition
3909+ cursor_tuple_fraction | 0.1
3910+ default_statistics_target | 100
3911+ force_parallel_mode | off
3912+ from_collapse_limit | 8
3913+ jit | on
3914+ join_collapse_limit | 8
3915+ cpu_index_tuple_cost | 0.005
3916+ cpu_operator_cost | 0.0025
3917+ cpu_tuple_cost | 0.01
3918+ effective_cache_size | 16384
3919+ jit_above_cost | 100000
3920+ jit_inline_above_cost | 500000
3921+ jit_optimize_above_cost | 500000
3922+ min_parallel_index_scan_size | 64
3923+ min_parallel_table_scan_size | 1024
3924+ parallel_setup_cost | 1000
3925+ parallel_tuple_cost | 0.1
3926+ random_page_cost | 4
3927+ seq_page_cost | 1
3928+ enable_bitmapscan | on
3929+ enable_gathermerge | on
3930+ enable_hashagg | on
3931+ enable_hashjoin | on
3932+ enable_indexonlyscan | on
3933+ enable_indexscan | off
3934+ enable_material | on
3935+ enable_mergejoin | off
3936+ enable_nestloop | on
3937+ enable_parallel_append | on
3938+ enable_parallel_hash | on
3939+ enable_partition_pruning | on
3940+ enable_partitionwise_aggregate | off
3941+ enable_partitionwise_join | off
3942+ enable_seqscan | on
3943+ enable_sort | on
3944+ enable_tidscan | on
3945+ client_min_messages | log
3946+(45 rows)
38483947
38493948 BEGIN;
38503949 /*+Set(enable_indexscan on)Set(geqo_threshold 100)IndexScan(t2)MergeJoin(t1 t2)Leading(t2 t1)*/
@@ -3871,45 +3970,54 @@ error hint:
38713970 COMMIT;
38723971 BEGIN;
38733972 SELECT name, setting FROM settings;
3874- name | setting
3875-------------------------------+-----------
3876- geqo | on
3877- geqo_effort | 5
3878- geqo_generations | 0
3879- geqo_pool_size | 0
3880- geqo_seed | 0
3881- geqo_selection_bias | 2
3882- geqo_threshold | 12
3883- constraint_exclusion | partition
3884- cursor_tuple_fraction | 0.1
3885- default_statistics_target | 100
3886- force_parallel_mode | off
3887- from_collapse_limit | 8
3888- join_collapse_limit | 8
3889- cpu_index_tuple_cost | 0.005
3890- cpu_operator_cost | 0.0025
3891- cpu_tuple_cost | 0.01
3892- effective_cache_size | 16384
3893- min_parallel_index_scan_size | 64
3894- min_parallel_table_scan_size | 1024
3895- parallel_setup_cost | 1000
3896- parallel_tuple_cost | 0.1
3897- random_page_cost | 4
3898- seq_page_cost | 1
3899- enable_bitmapscan | on
3900- enable_gathermerge | on
3901- enable_hashagg | on
3902- enable_hashjoin | on
3903- enable_indexonlyscan | on
3904- enable_indexscan | off
3905- enable_material | on
3906- enable_mergejoin | off
3907- enable_nestloop | on
3908- enable_seqscan | on
3909- enable_sort | on
3910- enable_tidscan | on
3911- client_min_messages | log
3912-(36 rows)
3973+ name | setting
3974+--------------------------------+-----------
3975+ geqo | on
3976+ geqo_effort | 5
3977+ geqo_generations | 0
3978+ geqo_pool_size | 0
3979+ geqo_seed | 0
3980+ geqo_selection_bias | 2
3981+ geqo_threshold | 12
3982+ constraint_exclusion | partition
3983+ cursor_tuple_fraction | 0.1
3984+ default_statistics_target | 100
3985+ force_parallel_mode | off
3986+ from_collapse_limit | 8
3987+ jit | on
3988+ join_collapse_limit | 8
3989+ cpu_index_tuple_cost | 0.005
3990+ cpu_operator_cost | 0.0025
3991+ cpu_tuple_cost | 0.01
3992+ effective_cache_size | 16384
3993+ jit_above_cost | 100000
3994+ jit_inline_above_cost | 500000
3995+ jit_optimize_above_cost | 500000
3996+ min_parallel_index_scan_size | 64
3997+ min_parallel_table_scan_size | 1024
3998+ parallel_setup_cost | 1000
3999+ parallel_tuple_cost | 0.1
4000+ random_page_cost | 4
4001+ seq_page_cost | 1
4002+ enable_bitmapscan | on
4003+ enable_gathermerge | on
4004+ enable_hashagg | on
4005+ enable_hashjoin | on
4006+ enable_indexonlyscan | on
4007+ enable_indexscan | off
4008+ enable_material | on
4009+ enable_mergejoin | off
4010+ enable_nestloop | on
4011+ enable_parallel_append | on
4012+ enable_parallel_hash | on
4013+ enable_partition_pruning | on
4014+ enable_partitionwise_aggregate | off
4015+ enable_partitionwise_join | off
4016+ enable_seqscan | on
4017+ enable_sort | on
4018+ enable_tidscan | on
4019+ client_min_messages | log
4020+(45 rows)
39134021
39144022 EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
39154023 QUERY PLAN
@@ -3936,45 +4044,54 @@ EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
39364044 (5 rows)
39374045
39384046 SELECT name, setting FROM settings;
3939- name | setting
3940-------------------------------+-----------
3941- geqo | on
3942- geqo_effort | 5
3943- geqo_generations | 0
3944- geqo_pool_size | 0
3945- geqo_seed | 0
3946- geqo_selection_bias | 2
3947- geqo_threshold | 12
3948- constraint_exclusion | partition
3949- cursor_tuple_fraction | 0.1
3950- default_statistics_target | 100
3951- force_parallel_mode | off
3952- from_collapse_limit | 8
3953- join_collapse_limit | 8
3954- cpu_index_tuple_cost | 0.005
3955- cpu_operator_cost | 0.0025
3956- cpu_tuple_cost | 0.01
3957- effective_cache_size | 16384
3958- min_parallel_index_scan_size | 64
3959- min_parallel_table_scan_size | 1024
3960- parallel_setup_cost | 1000
3961- parallel_tuple_cost | 0.1
3962- random_page_cost | 4
3963- seq_page_cost | 1
3964- enable_bitmapscan | on
3965- enable_gathermerge | on
3966- enable_hashagg | on
3967- enable_hashjoin | on
3968- enable_indexonlyscan | on
3969- enable_indexscan | off
3970- enable_material | on
3971- enable_mergejoin | off
3972- enable_nestloop | on
3973- enable_seqscan | on
3974- enable_sort | on
3975- enable_tidscan | on
3976- client_min_messages | log
3977-(36 rows)
4047+ name | setting
4048+--------------------------------+-----------
4049+ geqo | on
4050+ geqo_effort | 5
4051+ geqo_generations | 0
4052+ geqo_pool_size | 0
4053+ geqo_seed | 0
4054+ geqo_selection_bias | 2
4055+ geqo_threshold | 12
4056+ constraint_exclusion | partition
4057+ cursor_tuple_fraction | 0.1
4058+ default_statistics_target | 100
4059+ force_parallel_mode | off
4060+ from_collapse_limit | 8
4061+ jit | on
4062+ join_collapse_limit | 8
4063+ cpu_index_tuple_cost | 0.005
4064+ cpu_operator_cost | 0.0025
4065+ cpu_tuple_cost | 0.01
4066+ effective_cache_size | 16384
4067+ jit_above_cost | 100000
4068+ jit_inline_above_cost | 500000
4069+ jit_optimize_above_cost | 500000
4070+ min_parallel_index_scan_size | 64
4071+ min_parallel_table_scan_size | 1024
4072+ parallel_setup_cost | 1000
4073+ parallel_tuple_cost | 0.1
4074+ random_page_cost | 4
4075+ seq_page_cost | 1
4076+ enable_bitmapscan | on
4077+ enable_gathermerge | on
4078+ enable_hashagg | on
4079+ enable_hashjoin | on
4080+ enable_indexonlyscan | on
4081+ enable_indexscan | off
4082+ enable_material | on
4083+ enable_mergejoin | off
4084+ enable_nestloop | on
4085+ enable_parallel_append | on
4086+ enable_parallel_hash | on
4087+ enable_partition_pruning | on
4088+ enable_partitionwise_aggregate | off
4089+ enable_partitionwise_join | off
4090+ enable_seqscan | on
4091+ enable_sort | on
4092+ enable_tidscan | on
4093+ client_min_messages | log
4094+(45 rows)
39784095
39794096 /*+Set(enable_indexscan on)Set(geqo_threshold 100)IndexScan(t2)MergeJoin(t1 t2)Leading(t2 t1)*/
39804097 EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
@@ -4002,45 +4119,54 @@ SET enable_indexscan TO off;
40024119 SET enable_mergejoin TO off;
40034120 LOAD 'pg_hint_plan';
40044121 SELECT name, setting FROM settings;
4005- name | setting
4006-------------------------------+-----------
4007- geqo | on
4008- geqo_effort | 5
4009- geqo_generations | 0
4010- geqo_pool_size | 0
4011- geqo_seed | 0
4012- geqo_selection_bias | 2
4013- geqo_threshold | 12
4014- constraint_exclusion | partition
4015- cursor_tuple_fraction | 0.1
4016- default_statistics_target | 100
4017- force_parallel_mode | off
4018- from_collapse_limit | 8
4019- join_collapse_limit | 8
4020- cpu_index_tuple_cost | 0.005
4021- cpu_operator_cost | 0.0025
4022- cpu_tuple_cost | 0.01
4023- effective_cache_size | 16384
4024- min_parallel_index_scan_size | 64
4025- min_parallel_table_scan_size | 1024
4026- parallel_setup_cost | 1000
4027- parallel_tuple_cost | 0.1
4028- random_page_cost | 4
4029- seq_page_cost | 1
4030- enable_bitmapscan | on
4031- enable_gathermerge | on
4032- enable_hashagg | on
4033- enable_hashjoin | on
4034- enable_indexonlyscan | on
4035- enable_indexscan | off
4036- enable_material | on
4037- enable_mergejoin | off
4038- enable_nestloop | on
4039- enable_seqscan | on
4040- enable_sort | on
4041- enable_tidscan | on
4042- client_min_messages | notice
4043-(36 rows)
4122+ name | setting
4123+--------------------------------+-----------
4124+ geqo | on
4125+ geqo_effort | 5
4126+ geqo_generations | 0
4127+ geqo_pool_size | 0
4128+ geqo_seed | 0
4129+ geqo_selection_bias | 2
4130+ geqo_threshold | 12
4131+ constraint_exclusion | partition
4132+ cursor_tuple_fraction | 0.1
4133+ default_statistics_target | 100
4134+ force_parallel_mode | off
4135+ from_collapse_limit | 8
4136+ jit | on
4137+ join_collapse_limit | 8
4138+ cpu_index_tuple_cost | 0.005
4139+ cpu_operator_cost | 0.0025
4140+ cpu_tuple_cost | 0.01
4141+ effective_cache_size | 16384
4142+ jit_above_cost | 100000
4143+ jit_inline_above_cost | 500000
4144+ jit_optimize_above_cost | 500000
4145+ min_parallel_index_scan_size | 64
4146+ min_parallel_table_scan_size | 1024
4147+ parallel_setup_cost | 1000
4148+ parallel_tuple_cost | 0.1
4149+ random_page_cost | 4
4150+ seq_page_cost | 1
4151+ enable_bitmapscan | on
4152+ enable_gathermerge | on
4153+ enable_hashagg | on
4154+ enable_hashjoin | on
4155+ enable_indexonlyscan | on
4156+ enable_indexscan | off
4157+ enable_material | on
4158+ enable_mergejoin | off
4159+ enable_nestloop | on
4160+ enable_parallel_append | on
4161+ enable_parallel_hash | on
4162+ enable_partition_pruning | on
4163+ enable_partitionwise_aggregate | off
4164+ enable_partitionwise_join | off
4165+ enable_seqscan | on
4166+ enable_sort | on
4167+ enable_tidscan | on
4168+ client_min_messages | notice
4169+(45 rows)
40444170
40454171 EXPLAIN (COSTS false) SELECT * FROM s1.t1, s1.t2 WHERE t1.c1 = t2.c1;
40464172 QUERY PLAN
--- a/expected/ut-fdw.out~
+++ /dev/null
@@ -1,144 +0,0 @@
1-LOAD 'pg_hint_plan';
2-SET search_path TO public;
3-SET pg_hint_plan.debug_print TO on;
4-SET client_min_messages TO LOG;
5-SET pg_hint_plan.enable_hint TO on;
6-CREATE EXTENSION file_fdw;
7-CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw;
8-CREATE USER MAPPING FOR PUBLIC SERVER file_server;
9-CREATE FOREIGN TABLE ft1 (id int, val int) SERVER file_server OPTIONS (format 'csv', filename '/home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv');
10--- foreign table test
11-SELECT * FROM ft1;
12- id | val
13-----+-----
14- 1 | 1
15- 2 | 2
16- 3 | 3
17- 4 | 4
18- 5 | 5
19- 6 | 6
20- 7 | 7
21- 8 | 8
22- 9 | 9
23- 10 | 10
24-(10 rows)
25-
26-\t
27-EXPLAIN (COSTS false) SELECT * FROM s1.t1, ft1 ft_1, ft1 ft_2 WHERE t1.c1 = ft_1.id AND t1.c1 = ft_2.id;
28- Nested Loop
29- Join Filter: (t1.c1 = ft_2.id)
30- -> Nested Loop
31- -> Foreign Scan on ft1 ft_1
32- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
33- -> Index Scan using t1_i1 on t1
34- Index Cond: (c1 = ft_1.id)
35- -> Foreign Scan on ft1 ft_2
36- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
37-
38-----
39----- No. S-1-5 object type for the hint
40-----
41--- No. S-1-5-6
42-/*+SeqScan(t1)SeqScan(ft_1)SeqScan(ft_2)*/
43-EXPLAIN (COSTS false) SELECT * FROM s1.t1, ft1 ft_1, ft1 ft_2 WHERE t1.c1 = ft_1.id AND t1.c1 = ft_2.id;
44-LOG: pg_hint_plan:
45-used hint:
46-SeqScan(t1)
47-not used hint:
48-SeqScan(ft_1)
49-SeqScan(ft_2)
50-duplication hint:
51-error hint:
52-
53- Nested Loop
54- Join Filter: (t1.c1 = ft_2.id)
55- -> Hash Join
56- Hash Cond: (t1.c1 = ft_1.id)
57- -> Seq Scan on t1
58- -> Hash
59- -> Foreign Scan on ft1 ft_1
60- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
61- -> Foreign Scan on ft1 ft_2
62- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
63-
64-----
65----- No. J-1-6 object type for the hint
66-----
67--- No. J-1-6-6
68-/*+MergeJoin(ft_1 ft_2)Leading(ft_1 ft_2 t1)*/
69-EXPLAIN (COSTS false) SELECT * FROM s1.t1, ft1 ft_1, ft1 ft_2 WHERE t1.c1 = ft_1.id AND t1.c1 = ft_2.id;
70-LOG: pg_hint_plan:
71-used hint:
72-MergeJoin(ft_1 ft_2)
73-Leading(ft_1 ft_2 t1)
74-not used hint:
75-duplication hint:
76-error hint:
77-
78- Nested Loop
79- -> Merge Join
80- Merge Cond: (ft_1.id = ft_2.id)
81- -> Sort
82- Sort Key: ft_1.id
83- -> Foreign Scan on ft1 ft_1
84- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
85- -> Sort
86- Sort Key: ft_2.id
87- -> Foreign Scan on ft1 ft_2
88- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
89- -> Index Scan using t1_i1 on t1
90- Index Cond: (c1 = ft_1.id)
91-
92-----
93----- No. L-1-6 object type for the hint
94-----
95--- No. L-1-6-6
96-/*+Leading(ft_1 ft_2 t1)*/
97-EXPLAIN (COSTS false) SELECT * FROM s1.t1, ft1 ft_1, ft1 ft_2 WHERE t1.c1 = ft_1.id AND t1.c1 = ft_2.id;
98-LOG: pg_hint_plan:
99-used hint:
100-Leading(ft_1 ft_2 t1)
101-not used hint:
102-duplication hint:
103-error hint:
104-
105- Nested Loop
106- -> Nested Loop
107- Join Filter: (ft_1.id = ft_2.id)
108- -> Foreign Scan on ft1 ft_1
109- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
110- -> Foreign Scan on ft1 ft_2
111- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
112- -> Index Scan using t1_i1 on t1
113- Index Cond: (c1 = ft_1.id)
114-
115-----
116----- No. R-1-6 object type for the hint
117-----
118--- No. R-1-6-6
119-\o results/ut-fdw.tmpout
120-/*+Rows(ft_1 ft_2 #1)Leading(ft_1 ft_2 t1)*/
121-EXPLAIN SELECT * FROM s1.t1, ft1 ft_1, ft1 ft_2 WHERE t1.c1 = ft_1.id AND t1.c1 = ft_2.id;
122-LOG: pg_hint_plan:
123-used hint:
124-Leading(ft_1 ft_2 t1)
125-Rows(ft_1 ft_2 #1)
126-not used hint:
127-duplication hint:
128-error hint:
129-
130-\o
131-\! sql/maskout.sh results/ut-fdw.tmpout
132- Nested Loop (cost=xxx rows=1 width=xxx)
133- -> Nested Loop (cost=xxx rows=1 width=xxx)
134- Join Filter: (ft_1.id = ft_2.id)
135- -> Foreign Scan on ft1 ft_1 (cost=xxx rows=1 width=xxx)
136- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
137- Foreign File Size: 42
138- -> Foreign Scan on ft1 ft_2 (cost=xxx rows=1 width=xxx)
139- Foreign File: /home/horiguti/work/pg_hint_plan/pg_hint_plan/data/data.csv
140- Foreign File Size: 42
141- -> Index Scan using t1_i1 on t1 (cost=xxx rows=1 width=xxx)
142- Index Cond: (c1 = ft_1.id)
143-
144-\! rm results/ut-fdw.tmpout
--- a/make_join_rel.c
+++ b/make_join_rel.c
@@ -15,9 +15,9 @@
1515 */
1616
1717 static void populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
18- RelOptInfo *rel2, RelOptInfo *joinrel,
19- SpecialJoinInfo *sjinfo,
20- List *restrictlist);
18+ RelOptInfo *rel2, RelOptInfo *joinrel,
19+ SpecialJoinInfo *sjinfo, List *restrictlist);
20+
2121 /*
2222 * adjust_rows: tweak estimated row numbers according to the hint.
2323 */
@@ -255,7 +255,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
255255 {
256256 case JOIN_INNER:
257257 if (is_dummy_rel(rel1) || is_dummy_rel(rel2) ||
258- restriction_is_constant_false(restrictlist, false))
258+ restriction_is_constant_false(restrictlist, joinrel, false))
259259 {
260260 mark_dummy_rel(joinrel);
261261 break;
@@ -269,12 +269,12 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
269269 break;
270270 case JOIN_LEFT:
271271 if (is_dummy_rel(rel1) ||
272- restriction_is_constant_false(restrictlist, true))
272+ restriction_is_constant_false(restrictlist, joinrel, true))
273273 {
274274 mark_dummy_rel(joinrel);
275275 break;
276276 }
277- if (restriction_is_constant_false(restrictlist, false) &&
277+ if (restriction_is_constant_false(restrictlist, joinrel, false) &&
278278 bms_is_subset(rel2->relids, sjinfo->syn_righthand))
279279 mark_dummy_rel(rel2);
280280 add_paths_to_joinrel(root, joinrel, rel1, rel2,
@@ -286,7 +286,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
286286 break;
287287 case JOIN_FULL:
288288 if ((is_dummy_rel(rel1) && is_dummy_rel(rel2)) ||
289- restriction_is_constant_false(restrictlist, true))
289+ restriction_is_constant_false(restrictlist, joinrel, true))
290290 {
291291 mark_dummy_rel(joinrel);
292292 break;
@@ -322,7 +322,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
322322 bms_is_subset(sjinfo->min_righthand, rel2->relids))
323323 {
324324 if (is_dummy_rel(rel1) || is_dummy_rel(rel2) ||
325- restriction_is_constant_false(restrictlist, false))
325+ restriction_is_constant_false(restrictlist, joinrel, false))
326326 {
327327 mark_dummy_rel(joinrel);
328328 break;
@@ -345,7 +345,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
345345 sjinfo) != NULL)
346346 {
347347 if (is_dummy_rel(rel1) || is_dummy_rel(rel2) ||
348- restriction_is_constant_false(restrictlist, false))
348+ restriction_is_constant_false(restrictlist, joinrel, false))
349349 {
350350 mark_dummy_rel(joinrel);
351351 break;
@@ -360,12 +360,12 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
360360 break;
361361 case JOIN_ANTI:
362362 if (is_dummy_rel(rel1) ||
363- restriction_is_constant_false(restrictlist, true))
363+ restriction_is_constant_false(restrictlist, joinrel, true))
364364 {
365365 mark_dummy_rel(joinrel);
366366 break;
367367 }
368- if (restriction_is_constant_false(restrictlist, false) &&
368+ if (restriction_is_constant_false(restrictlist, joinrel, false) &&
369369 bms_is_subset(rel2->relids, sjinfo->syn_righthand))
370370 mark_dummy_rel(rel2);
371371 add_paths_to_joinrel(root, joinrel, rel1, rel2,
@@ -377,4 +377,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
377377 elog(ERROR, "unrecognized join type: %d", (int) sjinfo->jointype);
378378 break;
379379 }
380+
381+ /* Apply partitionwise join technique, if possible. */
382+ try_partitionwise_join(root, rel1, rel2, joinrel, sjinfo, restrictlist);
380383 }
--- a/output/ut-W.source
+++ b/output/ut-W.source
@@ -87,6 +87,7 @@ SET parallel_setup_cost to 0;
8787 SET parallel_tuple_cost to 0;
8888 SET min_parallel_table_scan_size to 0;
8989 SET min_parallel_index_scan_size to 0;
90+SET enable_parallel_append to false;
9091 /*+Parallel(p1 8)*/
9192 EXPLAIN (COSTS false) SELECT * FROM p1;
9293 LOG: pg_hint_plan:
@@ -112,10 +113,37 @@ error hint:
112113 -> Parallel Seq Scan on p1_c3_c2
113114 (12 rows)
114115
116+SET enable_parallel_append to true;
117+/*+Parallel(p1 8)*/
118+EXPLAIN (COSTS false) SELECT * FROM p1;
119+LOG: pg_hint_plan:
120+used hint:
121+Parallel(p1 8 soft)
122+not used hint:
123+duplication hint:
124+error hint:
125+
126+ QUERY PLAN
127+-------------------------------------------
128+ Gather
129+ Workers Planned: 2
130+ -> Parallel Append
131+ -> Seq Scan on p1
132+ -> Seq Scan on p1_c1
133+ -> Seq Scan on p1_c3
134+ -> Parallel Seq Scan on p1_c2
135+ -> Parallel Seq Scan on p1_c4
136+ -> Parallel Seq Scan on p1_c1_c1
137+ -> Parallel Seq Scan on p1_c1_c2
138+ -> Parallel Seq Scan on p1_c3_c1
139+ -> Parallel Seq Scan on p1_c3_c2
140+(12 rows)
141+
115142 SET parallel_setup_cost to DEFAULT;
116143 SET parallel_tuple_cost to DEFAULT;
117144 SET min_parallel_table_scan_size to DEFAULT;
118145 SET min_parallel_index_scan_size to DEFAULT;
146+SET enable_parallel_append to false;
119147 /*+Parallel(p1 8 hard)*/
120148 EXPLAIN (COSTS false) SELECT * FROM p1;
121149 LOG: pg_hint_plan:
@@ -141,12 +169,12 @@ error hint:
141169 -> Parallel Seq Scan on p1_c3_c2
142170 (12 rows)
143171
144--- hinting on children makes the whole inheritance parallel
145-/*+Parallel(p1_c1 8 hard)*/
172+SET enable_parallel_append to true;
173+/*+Parallel(p1 8 hard)*/
146174 EXPLAIN (COSTS false) SELECT * FROM p1;
147175 LOG: pg_hint_plan:
148176 used hint:
149-Parallel(p1_c1 8 hard)
177+Parallel(p1 8 hard)
150178 not used hint:
151179 duplication hint:
152180 error hint:
@@ -155,11 +183,11 @@ error hint:
155183 -------------------------------------------
156184 Gather
157185 Workers Planned: 8
158- -> Append
159- -> Parallel Seq Scan on p1
160- -> Parallel Seq Scan on p1_c1
186+ -> Parallel Append
187+ -> Seq Scan on p1
188+ -> Seq Scan on p1_c1
189+ -> Seq Scan on p1_c3
161190 -> Parallel Seq Scan on p1_c2
162- -> Parallel Seq Scan on p1_c3
163191 -> Parallel Seq Scan on p1_c4
164192 -> Parallel Seq Scan on p1_c1_c1
165193 -> Parallel Seq Scan on p1_c1_c2
@@ -167,6 +195,55 @@ error hint:
167195 -> Parallel Seq Scan on p1_c3_c2
168196 (12 rows)
169197
198+-- hinting on children doesn't work (changed as of pg_hint_plan 10)
199+SET enable_parallel_append to false;
200+/*+Parallel(p1_c1 8 hard)*/
201+EXPLAIN (COSTS false) SELECT * FROM p1;
202+LOG: pg_hint_plan:
203+used hint:
204+Parallel(p1_c1 8 hard)
205+not used hint:
206+duplication hint:
207+error hint:
208+
209+ QUERY PLAN
210+----------------------------
211+ Append
212+ -> Seq Scan on p1
213+ -> Seq Scan on p1_c1
214+ -> Seq Scan on p1_c2
215+ -> Seq Scan on p1_c3
216+ -> Seq Scan on p1_c4
217+ -> Seq Scan on p1_c1_c1
218+ -> Seq Scan on p1_c1_c2
219+ -> Seq Scan on p1_c3_c1
220+ -> Seq Scan on p1_c3_c2
221+(10 rows)
222+
223+SET enable_parallel_append to true;
224+/*+Parallel(p1_c1 8 hard)*/
225+EXPLAIN (COSTS false) SELECT * FROM p1;
226+LOG: pg_hint_plan:
227+used hint:
228+Parallel(p1_c1 8 hard)
229+not used hint:
230+duplication hint:
231+error hint:
232+
233+ QUERY PLAN
234+----------------------------
235+ Append
236+ -> Seq Scan on p1
237+ -> Seq Scan on p1_c1
238+ -> Seq Scan on p1_c2
239+ -> Seq Scan on p1_c3
240+ -> Seq Scan on p1_c4
241+ -> Seq Scan on p1_c1_c1
242+ -> Seq Scan on p1_c1_c2
243+ -> Seq Scan on p1_c3_c1
244+ -> Seq Scan on p1_c3_c2
245+(10 rows)
246+
170247 -- Joins
171248 EXPLAIN (COSTS false) SELECT * FROM p1_c1_c1 join p2_c1_c1 on p1_c1_c1.id = p2_c1_c1.id;
172249 QUERY PLAN
@@ -256,22 +333,21 @@ error hint:
256333
257334 QUERY PLAN
258335 -------------------------------------------------
259- Hash Join
260- Hash Cond: (p1_c1_c1.id = p2_c1_c1.id)
261- -> Gather
262- Workers Planned: 8
336+ Gather
337+ Workers Planned: 8
338+ -> Parallel Hash Join
339+ Hash Cond: (p1_c1_c1.id = p2_c1_c1.id)
263340 -> Parallel Seq Scan on p1_c1_c1
264- -> Hash
265- -> Gather
266- Workers Planned: 8
341+ -> Parallel Hash
267342 -> Parallel Seq Scan on p2_c1_c1
268-(9 rows)
343+(7 rows)
269344
270345 -- Joins on inheritance tables
271346 SET parallel_setup_cost to 0;
272347 SET parallel_tuple_cost to 0;
273348 SET min_parallel_table_scan_size to 0;
274349 SET min_parallel_index_scan_size to 0;
350+SET enable_parallel_append to false;
275351 /*+Parallel(p1 8)*/
276352 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
277353 LOG: pg_hint_plan:
@@ -281,11 +357,11 @@ not used hint:
281357 duplication hint:
282358 error hint:
283359
284- QUERY PLAN
285--------------------------------------------------
360+ QUERY PLAN
361+-------------------------------------------------------
286362 Gather
287363 Workers Planned: 1
288- -> Hash Join
364+ -> Parallel Hash Join
289365 Hash Cond: (p1.id = p2.id)
290366 -> Append
291367 -> Parallel Seq Scan on p1
@@ -297,19 +373,59 @@ error hint:
297373 -> Parallel Seq Scan on p1_c1_c2
298374 -> Parallel Seq Scan on p1_c3_c1
299375 -> Parallel Seq Scan on p1_c3_c2
300- -> Hash
376+ -> Parallel Hash
301377 -> Append
378+ -> Parallel Seq Scan on p2
379+ -> Parallel Seq Scan on p2_c1
380+ -> Parallel Seq Scan on p2_c2
381+ -> Parallel Seq Scan on p2_c3
382+ -> Parallel Seq Scan on p2_c4
383+ -> Parallel Seq Scan on p2_c1_c1
384+ -> Parallel Seq Scan on p2_c1_c2
385+ -> Parallel Seq Scan on p2_c3_c1
386+ -> Parallel Seq Scan on p2_c3_c2
387+(25 rows)
388+
389+SET enable_parallel_append to true;
390+/*+Parallel(p1 8)*/
391+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
392+LOG: pg_hint_plan:
393+used hint:
394+Parallel(p1 8 soft)
395+not used hint:
396+duplication hint:
397+error hint:
398+
399+ QUERY PLAN
400+-------------------------------------------------------
401+ Gather
402+ Workers Planned: 2
403+ -> Parallel Hash Join
404+ Hash Cond: (p1.id = p2.id)
405+ -> Parallel Append
406+ -> Seq Scan on p1
407+ -> Seq Scan on p1_c1
408+ -> Seq Scan on p1_c3
409+ -> Parallel Seq Scan on p1_c2
410+ -> Parallel Seq Scan on p1_c4
411+ -> Parallel Seq Scan on p1_c1_c1
412+ -> Parallel Seq Scan on p1_c1_c2
413+ -> Parallel Seq Scan on p1_c3_c1
414+ -> Parallel Seq Scan on p1_c3_c2
415+ -> Parallel Hash
416+ -> Parallel Append
302417 -> Seq Scan on p2
303418 -> Seq Scan on p2_c1
304- -> Seq Scan on p2_c2
305419 -> Seq Scan on p2_c3
306- -> Seq Scan on p2_c4
307- -> Seq Scan on p2_c1_c1
308- -> Seq Scan on p2_c1_c2
309- -> Seq Scan on p2_c3_c1
310- -> Seq Scan on p2_c3_c2
420+ -> Parallel Seq Scan on p2_c2
421+ -> Parallel Seq Scan on p2_c4
422+ -> Parallel Seq Scan on p2_c1_c1
423+ -> Parallel Seq Scan on p2_c1_c2
424+ -> Parallel Seq Scan on p2_c3_c1
425+ -> Parallel Seq Scan on p2_c3_c2
311426 (25 rows)
312427
428+SET enable_parallel_append to false;
313429 /*+Parallel(p1 8)Parallel(p2 0)*/
314430 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
315431 LOG: pg_hint_plan:
@@ -349,6 +465,46 @@ error hint:
349465 -> Seq Scan on p2_c3_c2
350466 (25 rows)
351467
468+SET enable_parallel_append to true;
469+/*+Parallel(p1 8)Parallel(p2 0)*/
470+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
471+LOG: pg_hint_plan:
472+used hint:
473+Parallel(p1 8 soft)
474+Parallel(p2 0 soft)
475+not used hint:
476+duplication hint:
477+error hint:
478+
479+ QUERY PLAN
480+-------------------------------------------------
481+ Gather
482+ Workers Planned: 2
483+ -> Parallel Hash Join
484+ Hash Cond: (p1.id = p2_c2.id)
485+ -> Parallel Append
486+ -> Seq Scan on p1
487+ -> Seq Scan on p1_c1
488+ -> Seq Scan on p1_c3
489+ -> Parallel Seq Scan on p1_c2
490+ -> Parallel Seq Scan on p1_c4
491+ -> Parallel Seq Scan on p1_c1_c1
492+ -> Parallel Seq Scan on p1_c1_c2
493+ -> Parallel Seq Scan on p1_c3_c1
494+ -> Parallel Seq Scan on p1_c3_c2
495+ -> Parallel Hash
496+ -> Parallel Append
497+ -> Seq Scan on p2_c2
498+ -> Seq Scan on p2_c4
499+ -> Seq Scan on p2_c1_c1
500+ -> Seq Scan on p2_c1_c2
501+ -> Seq Scan on p2_c3_c1
502+ -> Seq Scan on p2_c3_c2
503+ -> Seq Scan on p2
504+ -> Seq Scan on p2_c1
505+ -> Seq Scan on p2_c3
506+(25 rows)
507+
352508 SET parallel_setup_cost to DEFAULT;
353509 SET parallel_tuple_cost to DEFAULT;
354510 SET min_parallel_table_scan_size to DEFAULT;
@@ -362,34 +518,32 @@ not used hint:
362518 duplication hint:
363519 error hint:
364520
365- QUERY PLAN
366--------------------------------------------------
367- Gather
368- Workers Planned: 1
369- -> Hash Join
370- Hash Cond: (p2.id = p1.id)
521+ QUERY PLAN
522+----------------------------------------
523+ Hash Join
524+ Hash Cond: (p1.id = p2.id)
525+ -> Append
526+ -> Seq Scan on p1
527+ -> Seq Scan on p1_c1
528+ -> Seq Scan on p1_c2
529+ -> Seq Scan on p1_c3
530+ -> Seq Scan on p1_c4
531+ -> Seq Scan on p1_c1_c1
532+ -> Seq Scan on p1_c1_c2
533+ -> Seq Scan on p1_c3_c1
534+ -> Seq Scan on p1_c3_c2
535+ -> Hash
371536 -> Append
372- -> Parallel Seq Scan on p2
373- -> Parallel Seq Scan on p2_c1
374- -> Parallel Seq Scan on p2_c2
375- -> Parallel Seq Scan on p2_c3
376- -> Parallel Seq Scan on p2_c4
377- -> Parallel Seq Scan on p2_c1_c1
378- -> Parallel Seq Scan on p2_c1_c2
379- -> Parallel Seq Scan on p2_c3_c1
380- -> Parallel Seq Scan on p2_c3_c2
381- -> Hash
382- -> Append
383- -> Seq Scan on p1
384- -> Seq Scan on p1_c1
385- -> Seq Scan on p1_c2
386- -> Seq Scan on p1_c3
387- -> Seq Scan on p1_c4
388- -> Seq Scan on p1_c1_c1
389- -> Seq Scan on p1_c1_c2
390- -> Seq Scan on p1_c3_c1
391- -> Seq Scan on p1_c3_c2
392-(25 rows)
537+ -> Seq Scan on p2
538+ -> Seq Scan on p2_c1
539+ -> Seq Scan on p2_c2
540+ -> Seq Scan on p2_c3
541+ -> Seq Scan on p2_c4
542+ -> Seq Scan on p2_c1_c1
543+ -> Seq Scan on p2_c1_c2
544+ -> Seq Scan on p2_c3_c1
545+ -> Seq Scan on p2_c3_c2
546+(23 rows)
393547
394548 /*+Parallel(p2 8 hard)*/
395549 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
@@ -400,35 +554,37 @@ not used hint:
400554 duplication hint:
401555 error hint:
402556
403- QUERY PLAN
404--------------------------------------------------
557+ QUERY PLAN
558+-------------------------------------------------------
405559 Gather
406560 Workers Planned: 8
407- -> Hash Join
561+ -> Parallel Hash Join
408562 Hash Cond: (p2.id = p1.id)
409- -> Append
410- -> Parallel Seq Scan on p2
411- -> Parallel Seq Scan on p2_c1
563+ -> Parallel Append
564+ -> Seq Scan on p2
565+ -> Seq Scan on p2_c1
566+ -> Seq Scan on p2_c3
412567 -> Parallel Seq Scan on p2_c2
413- -> Parallel Seq Scan on p2_c3
414568 -> Parallel Seq Scan on p2_c4
415569 -> Parallel Seq Scan on p2_c1_c1
416570 -> Parallel Seq Scan on p2_c1_c2
417571 -> Parallel Seq Scan on p2_c3_c1
418572 -> Parallel Seq Scan on p2_c3_c2
419- -> Hash
420- -> Append
573+ -> Parallel Hash
574+ -> Parallel Append
421575 -> Seq Scan on p1
422576 -> Seq Scan on p1_c1
423- -> Seq Scan on p1_c2
424577 -> Seq Scan on p1_c3
425- -> Seq Scan on p1_c4
426- -> Seq Scan on p1_c1_c1
427- -> Seq Scan on p1_c1_c2
428- -> Seq Scan on p1_c3_c1
429- -> Seq Scan on p1_c3_c2
578+ -> Parallel Seq Scan on p1_c2
579+ -> Parallel Seq Scan on p1_c4
580+ -> Parallel Seq Scan on p1_c1_c1
581+ -> Parallel Seq Scan on p1_c1_c2
582+ -> Parallel Seq Scan on p1_c3_c1
583+ -> Parallel Seq Scan on p1_c3_c2
430584 (25 rows)
431585
586+-- Number of workers results to the largest number
587+SET enable_parallel_append to false;
432588 /*+Parallel(p2 8 hard) Parallel(p1 5 hard) */
433589 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
434590 LOG: pg_hint_plan:
@@ -441,10 +597,10 @@ error hint:
441597
442598 QUERY PLAN
443599 -------------------------------------------------------
444- Hash Join
445- Hash Cond: (p1.id = p2.id)
446- -> Gather
447- Workers Planned: 5
600+ Gather
601+ Workers Planned: 8
602+ -> Parallel Hash Join
603+ Hash Cond: (p1.id = p2.id)
448604 -> Append
449605 -> Parallel Seq Scan on p1
450606 -> Parallel Seq Scan on p1_c1
@@ -455,9 +611,7 @@ error hint:
455611 -> Parallel Seq Scan on p1_c1_c2
456612 -> Parallel Seq Scan on p1_c3_c1
457613 -> Parallel Seq Scan on p1_c3_c2
458- -> Hash
459- -> Gather
460- Workers Planned: 8
614+ -> Parallel Hash
461615 -> Append
462616 -> Parallel Seq Scan on p2
463617 -> Parallel Seq Scan on p2_c1
@@ -468,42 +622,41 @@ error hint:
468622 -> Parallel Seq Scan on p2_c1_c2
469623 -> Parallel Seq Scan on p2_c3_c1
470624 -> Parallel Seq Scan on p2_c3_c2
471-(27 rows)
625+(25 rows)
472626
473--- Mixture with a scan hint
474--- p1 can be parallel
475-/*+Parallel(p1 8 hard) IndexScan(p2) */
627+SET enable_parallel_append to true;
628+/*+Parallel(p2 8 hard) Parallel(p1 5 hard) */
476629 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
477630 LOG: pg_hint_plan:
478631 used hint:
479-IndexScan(p2)
480-Parallel(p1 8 hard)
632+Parallel(p1 5 hard)
633+Parallel(p2 8 hard)
481634 not used hint:
482635 duplication hint:
483636 error hint:
484637
485- QUERY PLAN
486---------------------------------------------------------------
487- Hash Join
488- Hash Cond: (p2.id = p1.id)
489- -> Append
490- -> Index Scan using p2_id2_val on p2
491- -> Index Scan using p2_c1_id2_val on p2_c1
492- -> Index Scan using p2_c2_id2_val on p2_c2
493- -> Index Scan using p2_c3_id_val_idx on p2_c3
494- -> Index Scan using p2_c4_id_val_idx on p2_c4
495- -> Index Scan using p2_c1_c1_id_val_idx on p2_c1_c1
496- -> Index Scan using p2_c1_c2_id_val_idx on p2_c1_c2
497- -> Index Scan using p2_c3_c1_id_val_idx on p2_c3_c1
498- -> Index Scan using p2_c3_c2_id_val_idx on p2_c3_c2
499- -> Hash
500- -> Gather
501- Workers Planned: 8
502- -> Append
503- -> Parallel Seq Scan on p1
504- -> Parallel Seq Scan on p1_c1
638+ QUERY PLAN
639+-------------------------------------------------------
640+ Gather
641+ Workers Planned: 8
642+ -> Parallel Hash Join
643+ Hash Cond: (p2.id = p1.id)
644+ -> Parallel Append
645+ -> Seq Scan on p2
646+ -> Seq Scan on p2_c1
647+ -> Seq Scan on p2_c3
648+ -> Parallel Seq Scan on p2_c2
649+ -> Parallel Seq Scan on p2_c4
650+ -> Parallel Seq Scan on p2_c1_c1
651+ -> Parallel Seq Scan on p2_c1_c2
652+ -> Parallel Seq Scan on p2_c3_c1
653+ -> Parallel Seq Scan on p2_c3_c2
654+ -> Parallel Hash
655+ -> Parallel Append
656+ -> Seq Scan on p1
657+ -> Seq Scan on p1_c1
658+ -> Seq Scan on p1_c3
505659 -> Parallel Seq Scan on p1_c2
506- -> Parallel Seq Scan on p1_c3
507660 -> Parallel Seq Scan on p1_c4
508661 -> Parallel Seq Scan on p1_c1_c1
509662 -> Parallel Seq Scan on p1_c1_c2
@@ -511,7 +664,90 @@ error hint:
511664 -> Parallel Seq Scan on p1_c3_c2
512665 (25 rows)
513666
667+-- Mixture with scan hints
668+-- p1 can be parallel
669+SET enable_parallel_append to false;
670+/*+Parallel(p1 8 hard) IndexScan(p2) */
671+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
672+LOG: pg_hint_plan:
673+used hint:
674+IndexScan(p2)
675+Parallel(p1 8 hard)
676+not used hint:
677+duplication hint:
678+error hint:
679+
680+ QUERY PLAN
681+-----------------------------------------------------------------------------------
682+ Gather
683+ Workers Planned: 8
684+ -> Parallel Hash Join
685+ Hash Cond: (p1.id = p2.id)
686+ -> Append
687+ -> Parallel Seq Scan on p1
688+ -> Parallel Seq Scan on p1_c1
689+ -> Parallel Seq Scan on p1_c2
690+ -> Parallel Seq Scan on p1_c3
691+ -> Parallel Seq Scan on p1_c4
692+ -> Parallel Seq Scan on p1_c1_c1
693+ -> Parallel Seq Scan on p1_c1_c2
694+ -> Parallel Seq Scan on p1_c3_c1
695+ -> Parallel Seq Scan on p1_c3_c2
696+ -> Parallel Hash
697+ -> Append
698+ -> Parallel Index Scan using p2_id2_val on p2
699+ -> Parallel Index Scan using p2_c1_id2_val on p2_c1
700+ -> Parallel Index Scan using p2_c2_id2_val on p2_c2
701+ -> Parallel Index Scan using p2_c3_id_val_idx on p2_c3
702+ -> Parallel Index Scan using p2_c4_id_val_idx on p2_c4
703+ -> Parallel Index Scan using p2_c1_c1_id_val_idx on p2_c1_c1
704+ -> Parallel Index Scan using p2_c1_c2_id_val_idx on p2_c1_c2
705+ -> Parallel Index Scan using p2_c3_c1_id_val_idx on p2_c3_c1
706+ -> Parallel Index Scan using p2_c3_c2_id_val_idx on p2_c3_c2
707+(25 rows)
708+
709+SET enable_parallel_append to true;
710+/*+Parallel(p1 8 hard) IndexScan(p2) */
711+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
712+LOG: pg_hint_plan:
713+used hint:
714+IndexScan(p2)
715+Parallel(p1 8 hard)
716+not used hint:
717+duplication hint:
718+error hint:
719+
720+ QUERY PLAN
721+-----------------------------------------------------------------------------------
722+ Gather
723+ Workers Planned: 8
724+ -> Parallel Hash Join
725+ Hash Cond: (p1.id = p2_c2.id)
726+ -> Parallel Append
727+ -> Seq Scan on p1
728+ -> Seq Scan on p1_c1
729+ -> Seq Scan on p1_c3
730+ -> Parallel Seq Scan on p1_c2
731+ -> Parallel Seq Scan on p1_c4
732+ -> Parallel Seq Scan on p1_c1_c1
733+ -> Parallel Seq Scan on p1_c1_c2
734+ -> Parallel Seq Scan on p1_c3_c1
735+ -> Parallel Seq Scan on p1_c3_c2
736+ -> Parallel Hash
737+ -> Parallel Append
738+ -> Parallel Index Scan using p2_c2_id2_val on p2_c2
739+ -> Parallel Index Scan using p2_c4_id_val_idx on p2_c4
740+ -> Parallel Index Scan using p2_c1_c1_id_val_idx on p2_c1_c1
741+ -> Parallel Index Scan using p2_c1_c2_id_val_idx on p2_c1_c2
742+ -> Parallel Index Scan using p2_c3_c1_id_val_idx on p2_c3_c1
743+ -> Parallel Index Scan using p2_c3_c2_id_val_idx on p2_c3_c2
744+ -> Parallel Index Scan using p2_id2_val on p2
745+ -> Parallel Index Scan using p2_c1_id2_val on p2_c1
746+ -> Parallel Index Scan using p2_c3_id_val_idx on p2_c3
747+(25 rows)
748+
514749 -- Parallel sequential scan
750+SET enable_parallel_append to false;
515751 /*+Parallel(p1 8 hard) SeqScan(p1) */
516752 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
517753 LOG: pg_hint_plan:
@@ -522,11 +758,11 @@ not used hint:
522758 duplication hint:
523759 error hint:
524760
525- QUERY PLAN
526--------------------------------------------------
761+ QUERY PLAN
762+-------------------------------------------------------
527763 Gather
528764 Workers Planned: 8
529- -> Hash Join
765+ -> Parallel Hash Join
530766 Hash Cond: (p1.id = p2.id)
531767 -> Append
532768 -> Parallel Seq Scan on p1
@@ -538,20 +774,61 @@ error hint:
538774 -> Parallel Seq Scan on p1_c1_c2
539775 -> Parallel Seq Scan on p1_c3_c1
540776 -> Parallel Seq Scan on p1_c3_c2
541- -> Hash
777+ -> Parallel Hash
542778 -> Append
779+ -> Parallel Seq Scan on p2
780+ -> Parallel Seq Scan on p2_c1
781+ -> Parallel Seq Scan on p2_c2
782+ -> Parallel Seq Scan on p2_c3
783+ -> Parallel Seq Scan on p2_c4
784+ -> Parallel Seq Scan on p2_c1_c1
785+ -> Parallel Seq Scan on p2_c1_c2
786+ -> Parallel Seq Scan on p2_c3_c1
787+ -> Parallel Seq Scan on p2_c3_c2
788+(25 rows)
789+
790+SET enable_parallel_append to true;
791+/*+Parallel(p1 8 hard) SeqScan(p1) */
792+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
793+LOG: pg_hint_plan:
794+used hint:
795+SeqScan(p1)
796+Parallel(p1 8 hard)
797+not used hint:
798+duplication hint:
799+error hint:
800+
801+ QUERY PLAN
802+-------------------------------------------------------
803+ Gather
804+ Workers Planned: 8
805+ -> Parallel Hash Join
806+ Hash Cond: (p1.id = p2.id)
807+ -> Parallel Append
808+ -> Seq Scan on p1
809+ -> Seq Scan on p1_c1
810+ -> Seq Scan on p1_c3
811+ -> Parallel Seq Scan on p1_c2
812+ -> Parallel Seq Scan on p1_c4
813+ -> Parallel Seq Scan on p1_c1_c1
814+ -> Parallel Seq Scan on p1_c1_c2
815+ -> Parallel Seq Scan on p1_c3_c1
816+ -> Parallel Seq Scan on p1_c3_c2
817+ -> Parallel Hash
818+ -> Parallel Append
543819 -> Seq Scan on p2
544820 -> Seq Scan on p2_c1
545- -> Seq Scan on p2_c2
546821 -> Seq Scan on p2_c3
547- -> Seq Scan on p2_c4
548- -> Seq Scan on p2_c1_c1
549- -> Seq Scan on p2_c1_c2
550- -> Seq Scan on p2_c3_c1
551- -> Seq Scan on p2_c3_c2
822+ -> Parallel Seq Scan on p2_c2
823+ -> Parallel Seq Scan on p2_c4
824+ -> Parallel Seq Scan on p2_c1_c1
825+ -> Parallel Seq Scan on p2_c1_c2
826+ -> Parallel Seq Scan on p2_c3_c1
827+ -> Parallel Seq Scan on p2_c3_c2
552828 (25 rows)
553829
554830 -- Parallel index scan
831+SET enable_parallel_append to false;
555832 /*+Parallel(p1 8 hard) IndexScan(p1) */
556833 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
557834 LOG: pg_hint_plan:
@@ -566,7 +843,7 @@ error hint:
566843 -----------------------------------------------------------------------
567844 Gather
568845 Workers Planned: 8
569- -> Hash Join
846+ -> Parallel Hash Join
570847 Hash Cond: (p1.id = p2.id)
571848 -> Append
572849 -> Parallel Index Scan using p1_pkey on p1
@@ -578,17 +855,57 @@ error hint:
578855 -> Parallel Index Scan using p1_c1_c2_pkey on p1_c1_c2
579856 -> Parallel Index Scan using p1_c3_c1_pkey on p1_c3_c1
580857 -> Parallel Index Scan using p1_c3_c2_pkey on p1_c3_c2
581- -> Hash
858+ -> Parallel Hash
582859 -> Append
860+ -> Parallel Seq Scan on p2
861+ -> Parallel Seq Scan on p2_c1
862+ -> Parallel Seq Scan on p2_c2
863+ -> Parallel Seq Scan on p2_c3
864+ -> Parallel Seq Scan on p2_c4
865+ -> Parallel Seq Scan on p2_c1_c1
866+ -> Parallel Seq Scan on p2_c1_c2
867+ -> Parallel Seq Scan on p2_c3_c1
868+ -> Parallel Seq Scan on p2_c3_c2
869+(25 rows)
870+
871+SET enable_parallel_append to true;
872+/*+Parallel(p1 8 hard) IndexScan(p1) */
873+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
874+LOG: pg_hint_plan:
875+used hint:
876+IndexScan(p1)
877+Parallel(p1 8 hard)
878+not used hint:
879+duplication hint:
880+error hint:
881+
882+ QUERY PLAN
883+-----------------------------------------------------------------------
884+ Gather
885+ Workers Planned: 8
886+ -> Parallel Hash Join
887+ Hash Cond: (p1.id = p2.id)
888+ -> Parallel Append
889+ -> Parallel Index Scan using p1_pkey on p1
890+ -> Parallel Index Scan using p1_c1_pkey on p1_c1
891+ -> Parallel Index Scan using p1_c2_pkey on p1_c2
892+ -> Parallel Index Scan using p1_c3_pkey on p1_c3
893+ -> Parallel Index Scan using p1_c4_pkey on p1_c4
894+ -> Parallel Index Scan using p1_c1_c1_pkey on p1_c1_c1
895+ -> Parallel Index Scan using p1_c1_c2_pkey on p1_c1_c2
896+ -> Parallel Index Scan using p1_c3_c1_pkey on p1_c3_c1
897+ -> Parallel Index Scan using p1_c3_c2_pkey on p1_c3_c2
898+ -> Parallel Hash
899+ -> Parallel Append
583900 -> Seq Scan on p2
584901 -> Seq Scan on p2_c1
585- -> Seq Scan on p2_c2
586902 -> Seq Scan on p2_c3
587- -> Seq Scan on p2_c4
588- -> Seq Scan on p2_c1_c1
589- -> Seq Scan on p2_c1_c2
590- -> Seq Scan on p2_c3_c1
591- -> Seq Scan on p2_c3_c2
903+ -> Parallel Seq Scan on p2_c2
904+ -> Parallel Seq Scan on p2_c4
905+ -> Parallel Seq Scan on p2_c1_c1
906+ -> Parallel Seq Scan on p2_c1_c2
907+ -> Parallel Seq Scan on p2_c3_c1
908+ -> Parallel Seq Scan on p2_c3_c2
592909 (25 rows)
593910
594911 -- This hint doesn't turn on parallel, so the Parallel hint is ignored
@@ -674,7 +991,13 @@ error hint:
674991 -------------------------------------------
675992 Gather
676993 Workers Planned: 1
677- -> Append
994+ -> Parallel Append
995+ -> Parallel Seq Scan on p2_c2
996+ -> Parallel Seq Scan on p2_c4
997+ -> Parallel Seq Scan on p2_c1_c1
998+ -> Parallel Seq Scan on p2_c1_c2
999+ -> Parallel Seq Scan on p2_c3_c1
1000+ -> Parallel Seq Scan on p2_c3_c2
6781001 -> Parallel Seq Scan on p1
6791002 -> Parallel Seq Scan on p1_c1
6801003 -> Parallel Seq Scan on p1_c2
@@ -686,16 +1009,10 @@ error hint:
6861009 -> Parallel Seq Scan on p1_c3_c2
6871010 -> Parallel Seq Scan on p2
6881011 -> Parallel Seq Scan on p2_c1
689- -> Parallel Seq Scan on p2_c2
6901012 -> Parallel Seq Scan on p2_c3
691- -> Parallel Seq Scan on p2_c4
692- -> Parallel Seq Scan on p2_c1_c1
693- -> Parallel Seq Scan on p2_c1_c2
694- -> Parallel Seq Scan on p2_c3_c1
695- -> Parallel Seq Scan on p2_c3_c2
6961013 (21 rows)
6971014
698--- set hint does the same thing
1015+-- set hint has the same effect
6991016 /*+Set(max_parallel_workers_per_gather 1)*/
7001017 EXPLAIN (COSTS false) SELECT id FROM p1 UNION ALL SELECT id FROM p2;
7011018 LOG: pg_hint_plan:
@@ -709,25 +1026,25 @@ error hint:
7091026 -------------------------------------------
7101027 Gather
7111028 Workers Planned: 1
712- -> Append
713- -> Parallel Seq Scan on p1
714- -> Parallel Seq Scan on p1_c1
1029+ -> Parallel Append
7151030 -> Parallel Seq Scan on p1_c2
716- -> Parallel Seq Scan on p1_c3
7171031 -> Parallel Seq Scan on p1_c4
1032+ -> Parallel Seq Scan on p2_c2
1033+ -> Parallel Seq Scan on p2_c4
7181034 -> Parallel Seq Scan on p1_c1_c1
7191035 -> Parallel Seq Scan on p1_c1_c2
7201036 -> Parallel Seq Scan on p1_c3_c1
7211037 -> Parallel Seq Scan on p1_c3_c2
722- -> Parallel Seq Scan on p2
723- -> Parallel Seq Scan on p2_c1
724- -> Parallel Seq Scan on p2_c2
725- -> Parallel Seq Scan on p2_c3
726- -> Parallel Seq Scan on p2_c4
7271038 -> Parallel Seq Scan on p2_c1_c1
7281039 -> Parallel Seq Scan on p2_c1_c2
7291040 -> Parallel Seq Scan on p2_c3_c1
7301041 -> Parallel Seq Scan on p2_c3_c2
1042+ -> Parallel Seq Scan on p1
1043+ -> Parallel Seq Scan on p1_c1
1044+ -> Parallel Seq Scan on p1_c3
1045+ -> Parallel Seq Scan on p2
1046+ -> Parallel Seq Scan on p2_c1
1047+ -> Parallel Seq Scan on p2_c3
7311048 (21 rows)
7321049
7331050 -- applies largest number of workers on merged parallel paths
@@ -750,20 +1067,20 @@ error hint:
7501067 -------------------------------------------
7511068 Gather
7521069 Workers Planned: 6
753- -> Append
754- -> Parallel Seq Scan on p1
755- -> Parallel Seq Scan on p1_c1
1070+ -> Parallel Append
1071+ -> Seq Scan on p1
1072+ -> Seq Scan on p1_c1
1073+ -> Seq Scan on p1_c3
1074+ -> Seq Scan on p2
1075+ -> Seq Scan on p2_c1
1076+ -> Seq Scan on p2_c3
7561077 -> Parallel Seq Scan on p1_c2
757- -> Parallel Seq Scan on p1_c3
7581078 -> Parallel Seq Scan on p1_c4
7591079 -> Parallel Seq Scan on p1_c1_c1
7601080 -> Parallel Seq Scan on p1_c1_c2
7611081 -> Parallel Seq Scan on p1_c3_c1
7621082 -> Parallel Seq Scan on p1_c3_c2
763- -> Parallel Seq Scan on p2
764- -> Parallel Seq Scan on p2_c1
7651083 -> Parallel Seq Scan on p2_c2
766- -> Parallel Seq Scan on p2_c3
7671084 -> Parallel Seq Scan on p2_c4
7681085 -> Parallel Seq Scan on p2_c1_c1
7691086 -> Parallel Seq Scan on p2_c1_c2
@@ -771,45 +1088,7 @@ error hint:
7711088 -> Parallel Seq Scan on p2_c3_c2
7721089 (21 rows)
7731090
774--- num of workers of non-hinted relations should be default value
775-SET parallel_setup_cost to 0;
776-SET parallel_tuple_cost to 0;
777-SET min_parallel_table_scan_size to 0;
778-SET min_parallel_index_scan_size to 0;
779-SET max_parallel_workers_per_gather to 3;
780-SET enable_indexscan to false;
781-/*+Parallel(p1 8 hard) */
782-EXPLAIN (COSTS false) SELECT * FROM p1 join t1 on p1.id = t1.id;
783-LOG: pg_hint_plan:
784-used hint:
785-Parallel(p1 8 hard)
786-not used hint:
787-duplication hint:
788-error hint:
789-
790- QUERY PLAN
791--------------------------------------------------------
792- Hash Join
793- Hash Cond: (t1.id = p1.id)
794- -> Gather
795- Workers Planned: 3
796- -> Parallel Seq Scan on t1
797- -> Hash
798- -> Gather
799- Workers Planned: 8
800- -> Append
801- -> Parallel Seq Scan on p1
802- -> Parallel Seq Scan on p1_c1
803- -> Parallel Seq Scan on p1_c2
804- -> Parallel Seq Scan on p1_c3
805- -> Parallel Seq Scan on p1_c4
806- -> Parallel Seq Scan on p1_c1_c1
807- -> Parallel Seq Scan on p1_c1_c2
808- -> Parallel Seq Scan on p1_c3_c1
809- -> Parallel Seq Scan on p1_c3_c2
810-(18 rows)
811-
812--- Negative hint
1091+-- Negative hints
8131092 SET enable_indexscan to DEFAULT;
8141093 SET parallel_setup_cost to 0;
8151094 SET parallel_tuple_cost to 0;
@@ -820,19 +1099,44 @@ EXPLAIN (COSTS false) SELECT * FROM p1;
8201099 QUERY PLAN
8211100 -------------------------------------------
8221101 Gather
823- Workers Planned: 1
824- -> Append
825- -> Parallel Seq Scan on p1
826- -> Parallel Seq Scan on p1_c1
1102+ Workers Planned: 4
1103+ -> Parallel Append
8271104 -> Parallel Seq Scan on p1_c2
828- -> Parallel Seq Scan on p1_c3
8291105 -> Parallel Seq Scan on p1_c4
8301106 -> Parallel Seq Scan on p1_c1_c1
8311107 -> Parallel Seq Scan on p1_c1_c2
8321108 -> Parallel Seq Scan on p1_c3_c1
8331109 -> Parallel Seq Scan on p1_c3_c2
1110+ -> Parallel Seq Scan on p1
1111+ -> Parallel Seq Scan on p1_c1
1112+ -> Parallel Seq Scan on p1_c3
8341113 (12 rows)
8351114
1115+SET enable_parallel_append to false;
1116+/*+Parallel(p1 0 hard)*/
1117+EXPLAIN (COSTS false) SELECT * FROM p1;
1118+LOG: pg_hint_plan:
1119+used hint:
1120+Parallel(p1 0 hard)
1121+not used hint:
1122+duplication hint:
1123+error hint:
1124+
1125+ QUERY PLAN
1126+----------------------------
1127+ Append
1128+ -> Seq Scan on p1
1129+ -> Seq Scan on p1_c1
1130+ -> Seq Scan on p1_c2
1131+ -> Seq Scan on p1_c3
1132+ -> Seq Scan on p1_c4
1133+ -> Seq Scan on p1_c1_c1
1134+ -> Seq Scan on p1_c1_c2
1135+ -> Seq Scan on p1_c3_c1
1136+ -> Seq Scan on p1_c3_c2
1137+(10 rows)
1138+
1139+SET enable_parallel_append to true;
8361140 /*+Parallel(p1 0 hard)*/
8371141 EXPLAIN (COSTS false) SELECT * FROM p1;
8381142 LOG: pg_hint_plan:
@@ -888,25 +1192,25 @@ Parallel()
8881192 -------------------------------------------
8891193 Gather
8901194 Workers Planned: 1
891- -> Append
892- -> Parallel Seq Scan on p1
893- -> Parallel Seq Scan on p1_c1
1195+ -> Parallel Append
8941196 -> Parallel Seq Scan on p1_c2
895- -> Parallel Seq Scan on p1_c3
8961197 -> Parallel Seq Scan on p1_c4
1198+ -> Parallel Seq Scan on p2_c2
1199+ -> Parallel Seq Scan on p2_c4
8971200 -> Parallel Seq Scan on p1_c1_c1
8981201 -> Parallel Seq Scan on p1_c1_c2
8991202 -> Parallel Seq Scan on p1_c3_c1
9001203 -> Parallel Seq Scan on p1_c3_c2
901- -> Parallel Seq Scan on p2
902- -> Parallel Seq Scan on p2_c1
903- -> Parallel Seq Scan on p2_c2
904- -> Parallel Seq Scan on p2_c3
905- -> Parallel Seq Scan on p2_c4
9061204 -> Parallel Seq Scan on p2_c1_c1
9071205 -> Parallel Seq Scan on p2_c1_c2
9081206 -> Parallel Seq Scan on p2_c3_c1
9091207 -> Parallel Seq Scan on p2_c3_c2
1208+ -> Parallel Seq Scan on p1
1209+ -> Parallel Seq Scan on p1_c1
1210+ -> Parallel Seq Scan on p1_c3
1211+ -> Parallel Seq Scan on p2
1212+ -> Parallel Seq Scan on p2_c1
1213+ -> Parallel Seq Scan on p2_c3
9101214 (21 rows)
9111215
9121216 -- Hints on unhintable relations are just ignored
@@ -947,14 +1251,14 @@ error hint:
9471251 CTE cte1
9481252 -> Gather
9491253 Workers Planned: 5
950- -> Append
951- -> Parallel Seq Scan on p1
1254+ -> Parallel Append
1255+ -> Seq Scan on p1
9521256 Filter: ((id % 2) = 0)
953- -> Parallel Seq Scan on p1_c1
1257+ -> Seq Scan on p1_c1
9541258 Filter: ((id % 2) = 0)
955- -> Parallel Seq Scan on p1_c2
1259+ -> Seq Scan on p1_c3
9561260 Filter: ((id % 2) = 0)
957- -> Parallel Seq Scan on p1_c3
1261+ -> Parallel Seq Scan on p1_c2
9581262 Filter: ((id % 2) = 0)
9591263 -> Parallel Seq Scan on p1_c4
9601264 Filter: ((id % 2) = 0)
--- a/output/ut-fdw.source
+++ b/output/ut-fdw.source
@@ -138,10 +138,10 @@ error hint:
138138 Join Filter: (ft_1.id = ft_2.id)
139139 -> Foreign Scan on ft1 ft_1 (cost=xxx rows=1 width=xxx)
140140 Foreign File: @abs_srcdir@/data/data.csv
141- Foreign File Size: 42
141+ Foreign File Size: 42 b
142142 -> Foreign Scan on ft1 ft_2 (cost=xxx rows=1 width=xxx)
143143 Foreign File: @abs_srcdir@/data/data.csv
144- Foreign File Size: 42
144+ Foreign File Size: 42 b
145145 -> Index Scan using t1_i1 on t1 (cost=xxx rows=1 width=xxx)
146146 Index Cond: (c1 = ft_2.id)
147147
--- a/pg_hint_plan.c
+++ b/pg_hint_plan.c
@@ -31,6 +31,7 @@
3131 #include "parser/analyze.h"
3232 #include "parser/parsetree.h"
3333 #include "parser/scansup.h"
34+#include "partitioning/partbounds.h"
3435 #include "tcop/utility.h"
3536 #include "utils/builtins.h"
3637 #include "utils/lsyscache.h"
@@ -459,8 +460,6 @@ void pg_hint_plan_set_rel_pathlist(PlannerInfo * root, RelOptInfo *rel,
459460 Index rti, RangeTblEntry *rte);
460461 static void create_plain_partial_paths(PlannerInfo *root,
461462 RelOptInfo *rel);
462-static void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
463- List *live_childrels);
464463 static void make_rels_by_clause_joins(PlannerInfo *root, RelOptInfo *old_rel,
465464 ListCell *other_rels);
466465 static void make_rels_by_clauseless_joins(PlannerInfo *root,
@@ -471,14 +470,6 @@ static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
471470 RangeTblEntry *rte);
472471 static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
473472 Index rti, RangeTblEntry *rte);
474-static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
475- List *live_childrels,
476- List *all_child_pathkeys,
477- List *partitioned_rels);
478-static Path *get_cheapest_parameterized_child_path(PlannerInfo *root,
479- RelOptInfo *rel,
480- Relids required_outer);
481-static List *accumulate_append_subpath(List *subpaths, Path *path);
482473 RelOptInfo *pg_hint_plan_make_join_rel(PlannerInfo *root, RelOptInfo *rel1,
483474 RelOptInfo *rel2);
484475
@@ -1724,7 +1715,7 @@ get_hints_from_table(const char *client_query, const char *client_application)
17241715 char *hints = NULL;
17251716 Oid argtypes[2] = { TEXTOID, TEXTOID };
17261717 Datum values[2];
1727- bool nulls[2] = { false, false };
1718+ char nulls[2] = {' ', ' '};
17281719 text *qry;
17291720 text *app;
17301721
@@ -2648,7 +2639,7 @@ setup_parallel_plan_enforcement(ParallelHint *hint, HintState *state)
26482639 state->init_nworkers, state->context);
26492640
26502641 /* force means that enforce parallel as far as possible */
2651- if (hint && hint->force_parallel)
2642+ if (hint && hint->force_parallel && hint->nworkers > 0)
26522643 {
26532644 set_config_int32_option("parallel_tuple_cost", 0, state->context);
26542645 set_config_int32_option("parallel_setup_cost", 0, state->context);
@@ -3362,7 +3353,7 @@ restrict_indexes(PlannerInfo *root, ScanMethodHint *hint, RelOptInfo *rel,
33623353 break;
33633354
33643355 c_attname = get_attname(relationObjectId,
3365- info->indexkeys[i]);
3356+ info->indexkeys[i], false);
33663357
33673358 /* deny if any of column attributes don't match */
33683359 if (strcmp(p_attname, c_attname) != 0 ||
@@ -3389,7 +3380,7 @@ restrict_indexes(PlannerInfo *root, ScanMethodHint *hint, RelOptInfo *rel,
33893380
33903381 /* check expressions if both expressions are available */
33913382 if (p_info->expression_str &&
3392- !heap_attisnull(ht_idx, Anum_pg_index_indexprs))
3383+ !heap_attisnull(ht_idx, Anum_pg_index_indexprs, NULL))
33933384 {
33943385 Datum exprsDatum;
33953386 bool isnull;
@@ -3420,7 +3411,7 @@ restrict_indexes(PlannerInfo *root, ScanMethodHint *hint, RelOptInfo *rel,
34203411
34213412 /* compare index predicates */
34223413 if (p_info->indpred_str &&
3423- !heap_attisnull(ht_idx, Anum_pg_index_indpred))
3414+ !heap_attisnull(ht_idx, Anum_pg_index_indpred, NULL))
34243415 {
34253416 Datum predDatum;
34263417 bool isnull;
@@ -3525,9 +3516,14 @@ get_parent_index_info(Oid indexoid, Oid relid)
35253516 p_info->opclass = (Oid *) palloc(sizeof(Oid) * index->indnatts);
35263517 p_info->indoption = (int16 *) palloc(sizeof(Oid) * index->indnatts);
35273518
3519+ /*
3520+ * Collect relation attribute names of index columns for index
3521+ * identification, not index attribute names. NULL means expression index
3522+ * columns.
3523+ */
35283524 for (i = 0; i < index->indnatts; i++)
35293525 {
3530- attname = get_attname(relid, index->indkey.values[i]);
3526+ attname = get_attname(relid, index->indkey.values[i], true);
35313527 p_info->column_names = lappend(p_info->column_names, attname);
35323528
35333529 p_info->indcollation[i] = indexRelation->rd_indcollation[i];
@@ -3539,7 +3535,8 @@ get_parent_index_info(Oid indexoid, Oid relid)
35393535 * to check to match the expression's parameter of index with child indexes
35403536 */
35413537 p_info->expression_str = NULL;
3542- if(!heap_attisnull(indexRelation->rd_indextuple, Anum_pg_index_indexprs))
3538+ if(!heap_attisnull(indexRelation->rd_indextuple, Anum_pg_index_indexprs,
3539+ NULL))
35433540 {
35443541 Datum exprsDatum;
35453542 bool isnull;
@@ -3559,7 +3556,8 @@ get_parent_index_info(Oid indexoid, Oid relid)
35593556 * to check to match the predicate's parameter of index with child indexes
35603557 */
35613558 p_info->indpred_str = NULL;
3562- if(!heap_attisnull(indexRelation->rd_indextuple, Anum_pg_index_indpred))
3559+ if(!heap_attisnull(indexRelation->rd_indextuple, Anum_pg_index_indpred,
3560+ NULL))
35633561 {
35643562 Datum predDatum;
35653563 bool isnull;
@@ -3595,7 +3593,7 @@ reset_hint_enforcement()
35953593 * bitmap of HintTypeBitmap. If shint or phint is not NULL, set used hint
35963594 * there respectively.
35973595 */
3598-static bool
3596+static int
35993597 setup_hint_enforcement(PlannerInfo *root, RelOptInfo *rel,
36003598 ScanMethodHint **rshint, ParallelHint **rphint)
36013599 {
@@ -3620,6 +3618,16 @@ setup_hint_enforcement(PlannerInfo *root, RelOptInfo *rel,
36203618 */
36213619 if (inhparent)
36223620 {
3621+ /* set up only parallel hints for parent relation */
3622+ phint = find_parallel_hint(root, rel->relid);
3623+ if (phint)
3624+ {
3625+ setup_parallel_plan_enforcement(phint, current_hint_state);
3626+ if (rphint) *rphint = phint;
3627+ ret |= HINT_BM_PARALLEL;
3628+ return ret;
3629+ }
3630+
36233631 if (debug_level > 1)
36243632 ereport(pg_hint_plan_message_level,
36253633 (errhidestmt(true),
@@ -3657,8 +3665,8 @@ setup_hint_enforcement(PlannerInfo *root, RelOptInfo *rel,
36573665 {
36583666 /*
36593667 * Here we found a new parent for the current relation. Scan continues
3660- * hint to other childrens of this parent so remember it * to avoid
3661- * hinthintredundant setup cost.
3668+ * hint to other childrens of this parent so remember it to avoid
3669+ * redundant setup cost.
36623670 */
36633671 current_hint_state->parent_relid = new_parent_relid;
36643672
@@ -4395,6 +4403,38 @@ pg_hint_plan_join_search(PlannerInfo *root, int levels_needed,
43954403
43964404 rel = pg_hint_plan_standard_join_search(root, levels_needed, initial_rels);
43974405
4406+ /*
4407+ * Adjust number of parallel workers of the result rel to the largest
4408+ * number of the component paths.
4409+ */
4410+ if (current_hint_state->num_hints[HINT_TYPE_PARALLEL] > 0)
4411+ {
4412+ ListCell *lc;
4413+ int nworkers = 0;
4414+
4415+ foreach (lc, initial_rels)
4416+ {
4417+ ListCell *lcp;
4418+ RelOptInfo *rel = (RelOptInfo *) lfirst(lc);
4419+
4420+ foreach (lcp, rel->partial_pathlist)
4421+ {
4422+ Path *path = (Path *) lfirst(lcp);
4423+
4424+ if (nworkers < path-> parallel_workers)
4425+ nworkers = path-> parallel_workers;
4426+ }
4427+ }
4428+
4429+ foreach (lc, rel->partial_pathlist)
4430+ {
4431+ Path *path = (Path *) lfirst(lc);
4432+
4433+ if (path->parallel_safe && path->parallel_workers < nworkers)
4434+ path->parallel_workers = nworkers;
4435+ }
4436+ }
4437+
43984438 for (i = 2; i <= nbaserel; i++)
43994439 {
44004440 list_free(current_hint_state->join_hint_level[i]);
@@ -4441,11 +4481,65 @@ pg_hint_plan_set_rel_pathlist(PlannerInfo * root, RelOptInfo *rel,
44414481 * We can accept only plain relations, foreign tables and table saples are
44424482 * also unacceptable. See set_rel_pathlist.
44434483 */
4444- if (rel->rtekind != RTE_RELATION ||
4484+ if ((rel->rtekind != RTE_RELATION &&
4485+ rel->rtekind != RTE_SUBQUERY)||
44454486 rte->relkind == RELKIND_FOREIGN_TABLE ||
44464487 rte->tablesample != NULL)
44474488 return;
44484489
4490+ /*
4491+ * Even though UNION ALL node doesn't have particular name so usually it is
4492+ * unhintable, turn on parallel when it contains parallel nodes.
4493+ */
4494+ if (rel->rtekind == RTE_SUBQUERY)
4495+ {
4496+ ListCell *lc;
4497+ bool inhibit_nonparallel = false;
4498+
4499+ if (rel->partial_pathlist == NIL)
4500+ return;
4501+
4502+ foreach(lc, rel->partial_pathlist)
4503+ {
4504+ ListCell *lcp;
4505+ AppendPath *apath = (AppendPath *) lfirst(lc);
4506+ int parallel_workers = 0;
4507+
4508+ if (!IsA(apath, AppendPath))
4509+ continue;
4510+
4511+ foreach (lcp, apath->subpaths)
4512+ {
4513+ Path *spath = (Path *) lfirst(lcp);
4514+
4515+ if (spath->parallel_aware &&
4516+ parallel_workers < spath->parallel_workers)
4517+ parallel_workers = spath->parallel_workers;
4518+ }
4519+
4520+ apath->path.parallel_workers = parallel_workers;
4521+ inhibit_nonparallel = true;
4522+ }
4523+
4524+ if (inhibit_nonparallel)
4525+ {
4526+ ListCell *lc;
4527+
4528+ foreach(lc, rel->pathlist)
4529+ {
4530+ Path *path = (Path *) lfirst(lc);
4531+
4532+ if (path->startup_cost < disable_cost)
4533+ {
4534+ path->startup_cost += disable_cost;
4535+ path->total_cost += disable_cost;
4536+ }
4537+ }
4538+ }
4539+
4540+ return;
4541+ }
4542+
44494543 /* We cannot handle if this requires an outer */
44504544 if (rel->lateral_relids)
44514545 return;
@@ -4457,49 +4551,95 @@ pg_hint_plan_set_rel_pathlist(PlannerInfo * root, RelOptInfo *rel,
44574551 /* Here, we regenerate paths with the current hint restriction */
44584552 if (found_hints & HINT_BM_SCAN_METHOD || found_hints & HINT_BM_PARALLEL)
44594553 {
4460- /* Just discard all the paths considered so far */
4461- list_free_deep(rel->pathlist);
4462- rel->pathlist = NIL;
4554+ /*
4555+ * When hint is specified on non-parent relations, discard existing
4556+ * paths and regenerate based on the hint considered. Otherwise we
4557+ * already have hinted childx paths then just adjust the number of
4558+ * planned number of workers.
4559+ */
4560+ if (root->simple_rte_array[rel->relid]->inh)
4561+ {
4562+ /* enforce number of workers if requested */
4563+ if (phint && phint->force_parallel)
4564+ {
4565+ if (phint->nworkers == 0)
4566+ {
4567+ list_free_deep(rel->partial_pathlist);
4568+ rel->partial_pathlist = NIL;
4569+ }
4570+ else
4571+ {
4572+ /* prioritize partial paths */
4573+ foreach (l, rel->partial_pathlist)
4574+ {
4575+ Path *ppath = (Path *) lfirst(l);
4576+
4577+ if (ppath->parallel_safe)
4578+ {
4579+ ppath->parallel_workers = phint->nworkers;
4580+ ppath->startup_cost = 0;
4581+ ppath->total_cost = 0;
4582+ }
4583+ }
4584+
4585+ /* disable non-partial paths */
4586+ foreach (l, rel->pathlist)
4587+ {
4588+ Path *ppath = (Path *) lfirst(l);
44634589
4464- /* Remove all the partial paths if Parallel hint is specfied */
4465- if ((found_hints & HINT_BM_PARALLEL) && rel->partial_pathlist)
4590+ if (ppath->startup_cost < disable_cost)
4591+ {
4592+ ppath->startup_cost += disable_cost;
4593+ ppath->total_cost += disable_cost;
4594+ }
4595+ }
4596+ }
4597+ }
4598+ }
4599+ else
44664600 {
4601+ /* Just discard all the paths considered so far */
4602+ list_free_deep(rel->pathlist);
4603+ rel->pathlist = NIL;
44674604 list_free_deep(rel->partial_pathlist);
44684605 rel->partial_pathlist = NIL;
4469- }
44704606
4471- /* Regenerate paths with the current enforcement */
4472- set_plain_rel_pathlist(root, rel, rte);
4607+ /* Regenerate paths with the current enforcement */
4608+ set_plain_rel_pathlist(root, rel, rte);
44734609
4474- /* Additional work to enforce parallel query execution */
4475- if (phint && phint->nworkers > 0)
4476- {
4477- /* Lower the priorities of non-parallel paths */
4478- foreach (l, rel->pathlist)
4610+ /* Additional work to enforce parallel query execution */
4611+ if (phint && phint->nworkers > 0)
44794612 {
4480- Path *path = (Path *) lfirst(l);
4481-
4482- if (path->startup_cost < disable_cost)
4613+ /*
4614+ * For Parallel Append to be planned properly, we shouldn't set
4615+ * the costs of non-partial paths to disable-value. Lower the
4616+ * priority of non-parallel paths by setting partial path costs
4617+ * to 0 instead.
4618+ */
4619+ foreach (l, rel->partial_pathlist)
44834620 {
4484- path->startup_cost += disable_cost;
4485- path->total_cost += disable_cost;
4621+ Path *path = (Path *) lfirst(l);
4622+
4623+ path->startup_cost = 0;
4624+ path->total_cost = 0;
44864625 }
4487- }
44884626
4489- /* enforce number of workers if requested */
4490- if (phint->force_parallel)
4491- {
4492- foreach (l, rel->partial_pathlist)
4627+ /* enforce number of workers if requested */
4628+ if (phint->force_parallel)
44934629 {
4494- Path *ppath = (Path *) lfirst(l);
4630+ foreach (l, rel->partial_pathlist)
4631+ {
4632+ Path *ppath = (Path *) lfirst(l);
44954633
4496- ppath->parallel_workers = phint->nworkers;
4634+ if (ppath->parallel_safe)
4635+ ppath->parallel_workers = phint->nworkers;
4636+ }
44974637 }
4498- }
44994638
4500- /* Generate gather paths for base rels */
4501- if (rel->reloptkind == RELOPT_BASEREL)
4502- generate_gather_paths(root, rel);
4639+ /* Generate gather paths */
4640+ if (rel->reloptkind == RELOPT_BASEREL)
4641+ generate_gather_paths(root, rel, false);
4642+ }
45034643 }
45044644 }
45054645
--- a/sql/ut-W.sql
+++ b/sql/ut-W.sql
@@ -34,17 +34,31 @@ SET parallel_setup_cost to 0;
3434 SET parallel_tuple_cost to 0;
3535 SET min_parallel_table_scan_size to 0;
3636 SET min_parallel_index_scan_size to 0;
37+SET enable_parallel_append to false;
3738 /*+Parallel(p1 8)*/
3839 EXPLAIN (COSTS false) SELECT * FROM p1;
40+SET enable_parallel_append to true;
41+/*+Parallel(p1 8)*/
42+EXPLAIN (COSTS false) SELECT * FROM p1;
43+
3944 SET parallel_setup_cost to DEFAULT;
4045 SET parallel_tuple_cost to DEFAULT;
4146 SET min_parallel_table_scan_size to DEFAULT;
4247 SET min_parallel_index_scan_size to DEFAULT;
4348
49+SET enable_parallel_append to false;
4450 /*+Parallel(p1 8 hard)*/
4551 EXPLAIN (COSTS false) SELECT * FROM p1;
4652
47--- hinting on children makes the whole inheritance parallel
53+SET enable_parallel_append to true;
54+/*+Parallel(p1 8 hard)*/
55+EXPLAIN (COSTS false) SELECT * FROM p1;
56+
57+-- hinting on children doesn't work (changed as of pg_hint_plan 10)
58+SET enable_parallel_append to false;
59+/*+Parallel(p1_c1 8 hard)*/
60+EXPLAIN (COSTS false) SELECT * FROM p1;
61+SET enable_parallel_append to true;
4862 /*+Parallel(p1_c1 8 hard)*/
4963 EXPLAIN (COSTS false) SELECT * FROM p1;
5064
@@ -75,9 +89,17 @@ SET parallel_setup_cost to 0;
7589 SET parallel_tuple_cost to 0;
7690 SET min_parallel_table_scan_size to 0;
7791 SET min_parallel_index_scan_size to 0;
92+SET enable_parallel_append to false;
93+/*+Parallel(p1 8)*/
94+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
95+SET enable_parallel_append to true;
7896 /*+Parallel(p1 8)*/
7997 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
8098
99+SET enable_parallel_append to false;
100+/*+Parallel(p1 8)Parallel(p2 0)*/
101+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
102+SET enable_parallel_append to true;
81103 /*+Parallel(p1 8)Parallel(p2 0)*/
82104 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
83105
@@ -92,20 +114,37 @@ EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
92114 /*+Parallel(p2 8 hard)*/
93115 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
94116
117+-- Number of workers results to the largest number
118+SET enable_parallel_append to false;
119+/*+Parallel(p2 8 hard) Parallel(p1 5 hard) */
120+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
121+SET enable_parallel_append to true;
95122 /*+Parallel(p2 8 hard) Parallel(p1 5 hard) */
96123 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
97124
98125
99--- Mixture with a scan hint
126+-- Mixture with scan hints
100127 -- p1 can be parallel
128+SET enable_parallel_append to false;
129+/*+Parallel(p1 8 hard) IndexScan(p2) */
130+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
131+SET enable_parallel_append to true;
101132 /*+Parallel(p1 8 hard) IndexScan(p2) */
102133 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
103134
104135 -- Parallel sequential scan
136+SET enable_parallel_append to false;
137+/*+Parallel(p1 8 hard) SeqScan(p1) */
138+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
139+SET enable_parallel_append to true;
105140 /*+Parallel(p1 8 hard) SeqScan(p1) */
106141 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
107142
108143 -- Parallel index scan
144+SET enable_parallel_append to false;
145+/*+Parallel(p1 8 hard) IndexScan(p1) */
146+EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
147+SET enable_parallel_append to true;
109148 /*+Parallel(p1 8 hard) IndexScan(p1) */
110149 EXPLAIN (COSTS false) SELECT * FROM p1 join p2 on p1.id = p2.id;
111150
@@ -128,7 +167,7 @@ SET max_parallel_workers_per_gather to 0;
128167 /*+Parallel(p1 8) */
129168 EXPLAIN (COSTS false) SELECT id FROM p1 UNION ALL SELECT id FROM p2;
130169
131--- set hint does the same thing
170+-- set hint has the same effect
132171 /*+Set(max_parallel_workers_per_gather 1)*/
133172 EXPLAIN (COSTS false) SELECT id FROM p1 UNION ALL SELECT id FROM p2;
134173
@@ -142,18 +181,7 @@ SET max_parallel_workers_per_gather to 8;
142181 EXPLAIN (COSTS false) SELECT id FROM p1 UNION ALL SELECT id FROM p2;
143182
144183
145--- num of workers of non-hinted relations should be default value
146-SET parallel_setup_cost to 0;
147-SET parallel_tuple_cost to 0;
148-SET min_parallel_table_scan_size to 0;
149-SET min_parallel_index_scan_size to 0;
150-SET max_parallel_workers_per_gather to 3;
151-SET enable_indexscan to false;
152-
153-/*+Parallel(p1 8 hard) */
154-EXPLAIN (COSTS false) SELECT * FROM p1 join t1 on p1.id = t1.id;
155-
156--- Negative hint
184+-- Negative hints
157185 SET enable_indexscan to DEFAULT;
158186 SET parallel_setup_cost to 0;
159187 SET parallel_tuple_cost to 0;
@@ -162,6 +190,10 @@ SET min_parallel_index_scan_size to 0;
162190 SET max_parallel_workers_per_gather to 5;
163191 EXPLAIN (COSTS false) SELECT * FROM p1;
164192
193+SET enable_parallel_append to false;
194+/*+Parallel(p1 0 hard)*/
195+EXPLAIN (COSTS false) SELECT * FROM p1;
196+SET enable_parallel_append to true;
165197 /*+Parallel(p1 0 hard)*/
166198 EXPLAIN (COSTS false) SELECT * FROM p1;
167199
Show on old repository browser