tree-eh.c revision 1.6
1/* Exception handling semantics and decomposition for trees.
2   Copyright (C) 2003-2015 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 3, or (at your option)
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "hash-table.h"
24#include "tm.h"
25#include "hash-set.h"
26#include "machmode.h"
27#include "vec.h"
28#include "double-int.h"
29#include "input.h"
30#include "alias.h"
31#include "symtab.h"
32#include "wide-int.h"
33#include "inchash.h"
34#include "tree.h"
35#include "fold-const.h"
36#include "hashtab.h"
37#include "hard-reg-set.h"
38#include "function.h"
39#include "rtl.h"
40#include "flags.h"
41#include "statistics.h"
42#include "real.h"
43#include "fixed-value.h"
44#include "insn-config.h"
45#include "expmed.h"
46#include "dojump.h"
47#include "explow.h"
48#include "calls.h"
49#include "emit-rtl.h"
50#include "varasm.h"
51#include "stmt.h"
52#include "expr.h"
53#include "except.h"
54#include "predict.h"
55#include "dominance.h"
56#include "cfg.h"
57#include "cfganal.h"
58#include "cfgcleanup.h"
59#include "basic-block.h"
60#include "tree-ssa-alias.h"
61#include "internal-fn.h"
62#include "tree-eh.h"
63#include "gimple-expr.h"
64#include "is-a.h"
65#include "gimple.h"
66#include "gimple-iterator.h"
67#include "gimple-ssa.h"
68#include "hash-map.h"
69#include "plugin-api.h"
70#include "ipa-ref.h"
71#include "cgraph.h"
72#include "tree-cfg.h"
73#include "tree-phinodes.h"
74#include "ssa-iterators.h"
75#include "stringpool.h"
76#include "tree-ssanames.h"
77#include "tree-into-ssa.h"
78#include "tree-ssa.h"
79#include "tree-inline.h"
80#include "tree-pass.h"
81#include "langhooks.h"
82#include "diagnostic-core.h"
83#include "target.h"
84#include "cfgloop.h"
85#include "gimple-low.h"
86
87/* In some instances a tree and a gimple need to be stored in a same table,
88   i.e. in hash tables. This is a structure to do this. */
89typedef union {tree *tp; tree t; gimple g;} treemple;
90
91/* Misc functions used in this file.  */
92
93/* Remember and lookup EH landing pad data for arbitrary statements.
94   Really this means any statement that could_throw_p.  We could
95   stuff this information into the stmt_ann data structure, but:
96
97   (1) We absolutely rely on this information being kept until
98   we get to rtl.  Once we're done with lowering here, if we lose
99   the information there's no way to recover it!
100
101   (2) There are many more statements that *cannot* throw as
102   compared to those that can.  We should be saving some amount
103   of space by only allocating memory for those that can throw.  */
104
105/* Add statement T in function IFUN to landing pad NUM.  */
106
107static void
108add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
109{
110  gcc_assert (num != 0);
111
112  if (!get_eh_throw_stmt_table (ifun))
113    set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
114
115  gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
116}
117
118/* Add statement T in the current function (cfun) to EH landing pad NUM.  */
119
120void
121add_stmt_to_eh_lp (gimple t, int num)
122{
123  add_stmt_to_eh_lp_fn (cfun, t, num);
124}
125
126/* Add statement T to the single EH landing pad in REGION.  */
127
128static void
129record_stmt_eh_region (eh_region region, gimple t)
130{
131  if (region == NULL)
132    return;
133  if (region->type == ERT_MUST_NOT_THROW)
134    add_stmt_to_eh_lp_fn (cfun, t, -region->index);
135  else
136    {
137      eh_landing_pad lp = region->landing_pads;
138      if (lp == NULL)
139	lp = gen_eh_landing_pad (region);
140      else
141	gcc_assert (lp->next_lp == NULL);
142      add_stmt_to_eh_lp_fn (cfun, t, lp->index);
143    }
144}
145
146
147/* Remove statement T in function IFUN from its EH landing pad.  */
148
149bool
150remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
151{
152  if (!get_eh_throw_stmt_table (ifun))
153    return false;
154
155  if (!get_eh_throw_stmt_table (ifun)->get (t))
156    return false;
157
158  get_eh_throw_stmt_table (ifun)->remove (t);
159      return true;
160}
161
162
163/* Remove statement T in the current function (cfun) from its
164   EH landing pad.  */
165
166bool
167remove_stmt_from_eh_lp (gimple t)
168{
169  return remove_stmt_from_eh_lp_fn (cfun, t);
170}
171
172/* Determine if statement T is inside an EH region in function IFUN.
173   Positive numbers indicate a landing pad index; negative numbers
174   indicate a MUST_NOT_THROW region index; zero indicates that the
175   statement is not recorded in the region table.  */
176
177int
178lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
179{
180  if (ifun->eh->throw_stmt_table == NULL)
181    return 0;
182
183  int *lp_nr = ifun->eh->throw_stmt_table->get (t);
184  return lp_nr ? *lp_nr : 0;
185}
186
187/* Likewise, but always use the current function.  */
188
189int
190lookup_stmt_eh_lp (gimple t)
191{
192  /* We can get called from initialized data when -fnon-call-exceptions
193     is on; prevent crash.  */
194  if (!cfun)
195    return 0;
196  return lookup_stmt_eh_lp_fn (cfun, t);
197}
198
199/* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
200   nodes and LABEL_DECL nodes.  We will use this during the second phase to
201   determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
202
203struct finally_tree_node
204{
205  /* When storing a GIMPLE_TRY, we have to record a gimple.  However
206     when deciding whether a GOTO to a certain LABEL_DECL (which is a
207     tree) leaves the TRY block, its necessary to record a tree in
208     this field.  Thus a treemple is used. */
209  treemple child;
210  gtry *parent;
211};
212
213/* Hashtable helpers.  */
214
215struct finally_tree_hasher : typed_free_remove <finally_tree_node>
216{
217  typedef finally_tree_node value_type;
218  typedef finally_tree_node compare_type;
219  static inline hashval_t hash (const value_type *);
220  static inline bool equal (const value_type *, const compare_type *);
221};
222
223inline hashval_t
224finally_tree_hasher::hash (const value_type *v)
225{
226  return (intptr_t)v->child.t >> 4;
227}
228
229inline bool
230finally_tree_hasher::equal (const value_type *v, const compare_type *c)
231{
232  return v->child.t == c->child.t;
233}
234
235/* Note that this table is *not* marked GTY.  It is short-lived.  */
236static hash_table<finally_tree_hasher> *finally_tree;
237
238static void
239record_in_finally_tree (treemple child, gtry *parent)
240{
241  struct finally_tree_node *n;
242  finally_tree_node **slot;
243
244  n = XNEW (struct finally_tree_node);
245  n->child = child;
246  n->parent = parent;
247
248  slot = finally_tree->find_slot (n, INSERT);
249  gcc_assert (!*slot);
250  *slot = n;
251}
252
253static void
254collect_finally_tree (gimple stmt, gtry *region);
255
256/* Go through the gimple sequence.  Works with collect_finally_tree to
257   record all GIMPLE_LABEL and GIMPLE_TRY statements. */
258
259static void
260collect_finally_tree_1 (gimple_seq seq, gtry *region)
261{
262  gimple_stmt_iterator gsi;
263
264  for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
265    collect_finally_tree (gsi_stmt (gsi), region);
266}
267
268static void
269collect_finally_tree (gimple stmt, gtry *region)
270{
271  treemple temp;
272
273  switch (gimple_code (stmt))
274    {
275    case GIMPLE_LABEL:
276      temp.t = gimple_label_label (as_a <glabel *> (stmt));
277      record_in_finally_tree (temp, region);
278      break;
279
280    case GIMPLE_TRY:
281      if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
282        {
283          temp.g = stmt;
284          record_in_finally_tree (temp, region);
285          collect_finally_tree_1 (gimple_try_eval (stmt),
286				  as_a <gtry *> (stmt));
287	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
288        }
289      else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
290        {
291          collect_finally_tree_1 (gimple_try_eval (stmt), region);
292          collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
293        }
294      break;
295
296    case GIMPLE_CATCH:
297      collect_finally_tree_1 (gimple_catch_handler (
298				 as_a <gcatch *> (stmt)),
299			      region);
300      break;
301
302    case GIMPLE_EH_FILTER:
303      collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
304      break;
305
306    case GIMPLE_EH_ELSE:
307      {
308	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
309	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
310	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
311      }
312      break;
313
314    default:
315      /* A type, a decl, or some kind of statement that we're not
316	 interested in.  Don't walk them.  */
317      break;
318    }
319}
320
321
322/* Use the finally tree to determine if a jump from START to TARGET
323   would leave the try_finally node that START lives in.  */
324
325static bool
326outside_finally_tree (treemple start, gimple target)
327{
328  struct finally_tree_node n, *p;
329
330  do
331    {
332      n.child = start;
333      p = finally_tree->find (&n);
334      if (!p)
335	return true;
336      start.g = p->parent;
337    }
338  while (start.g != target);
339
340  return false;
341}
342
343/* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
344   nodes into a set of gotos, magic labels, and eh regions.
345   The eh region creation is straight-forward, but frobbing all the gotos
346   and such into shape isn't.  */
347
348/* The sequence into which we record all EH stuff.  This will be
349   placed at the end of the function when we're all done.  */
350static gimple_seq eh_seq;
351
352/* Record whether an EH region contains something that can throw,
353   indexed by EH region number.  */
354static bitmap eh_region_may_contain_throw_map;
355
356/* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
357   statements that are seen to escape this GIMPLE_TRY_FINALLY node.
358   The idea is to record a gimple statement for everything except for
359   the conditionals, which get their labels recorded. Since labels are
360   of type 'tree', we need this node to store both gimple and tree
361   objects.  REPL_STMT is the sequence used to replace the goto/return
362   statement.  CONT_STMT is used to store the statement that allows
363   the return/goto to jump to the original destination. */
364
365struct goto_queue_node
366{
367  treemple stmt;
368  location_t location;
369  gimple_seq repl_stmt;
370  gimple cont_stmt;
371  int index;
372  /* This is used when index >= 0 to indicate that stmt is a label (as
373     opposed to a goto stmt).  */
374  int is_label;
375};
376
377/* State of the world while lowering.  */
378
379struct leh_state
380{
381  /* What's "current" while constructing the eh region tree.  These
382     correspond to variables of the same name in cfun->eh, which we
383     don't have easy access to.  */
384  eh_region cur_region;
385
386  /* What's "current" for the purposes of __builtin_eh_pointer.  For
387     a CATCH, this is the associated TRY.  For an EH_FILTER, this is
388     the associated ALLOWED_EXCEPTIONS, etc.  */
389  eh_region ehp_region;
390
391  /* Processing of TRY_FINALLY requires a bit more state.  This is
392     split out into a separate structure so that we don't have to
393     copy so much when processing other nodes.  */
394  struct leh_tf_state *tf;
395};
396
397struct leh_tf_state
398{
399  /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
400     try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
401     this so that outside_finally_tree can reliably reference the tree used
402     in the collect_finally_tree data structures.  */
403  gtry *try_finally_expr;
404  gtry *top_p;
405
406  /* While lowering a top_p usually it is expanded into multiple statements,
407     thus we need the following field to store them. */
408  gimple_seq top_p_seq;
409
410  /* The state outside this try_finally node.  */
411  struct leh_state *outer;
412
413  /* The exception region created for it.  */
414  eh_region region;
415
416  /* The goto queue.  */
417  struct goto_queue_node *goto_queue;
418  size_t goto_queue_size;
419  size_t goto_queue_active;
420
421  /* Pointer map to help in searching goto_queue when it is large.  */
422  hash_map<gimple, goto_queue_node *> *goto_queue_map;
423
424  /* The set of unique labels seen as entries in the goto queue.  */
425  vec<tree> dest_array;
426
427  /* A label to be added at the end of the completed transformed
428     sequence.  It will be set if may_fallthru was true *at one time*,
429     though subsequent transformations may have cleared that flag.  */
430  tree fallthru_label;
431
432  /* True if it is possible to fall out the bottom of the try block.
433     Cleared if the fallthru is converted to a goto.  */
434  bool may_fallthru;
435
436  /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
437  bool may_return;
438
439  /* True if the finally block can receive an exception edge.
440     Cleared if the exception case is handled by code duplication.  */
441  bool may_throw;
442};
443
444static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
445
446/* Search for STMT in the goto queue.  Return the replacement,
447   or null if the statement isn't in the queue.  */
448
449#define LARGE_GOTO_QUEUE 20
450
451static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
452
453static gimple_seq
454find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
455{
456  unsigned int i;
457
458  if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
459    {
460      for (i = 0; i < tf->goto_queue_active; i++)
461	if ( tf->goto_queue[i].stmt.g == stmt.g)
462	  return tf->goto_queue[i].repl_stmt;
463      return NULL;
464    }
465
466  /* If we have a large number of entries in the goto_queue, create a
467     pointer map and use that for searching.  */
468
469  if (!tf->goto_queue_map)
470    {
471      tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
472      for (i = 0; i < tf->goto_queue_active; i++)
473	{
474	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
475						  &tf->goto_queue[i]);
476	  gcc_assert (!existed);
477	}
478    }
479
480  goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
481  if (slot != NULL)
482    return ((*slot)->repl_stmt);
483
484  return NULL;
485}
486
487/* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
488   lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
489   then we can just splat it in, otherwise we add the new stmts immediately
490   after the GIMPLE_COND and redirect.  */
491
492static void
493replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
494				gimple_stmt_iterator *gsi)
495{
496  tree label;
497  gimple_seq new_seq;
498  treemple temp;
499  location_t loc = gimple_location (gsi_stmt (*gsi));
500
501  temp.tp = tp;
502  new_seq = find_goto_replacement (tf, temp);
503  if (!new_seq)
504    return;
505
506  if (gimple_seq_singleton_p (new_seq)
507      && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
508    {
509      *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
510      return;
511    }
512
513  label = create_artificial_label (loc);
514  /* Set the new label for the GIMPLE_COND */
515  *tp = label;
516
517  gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
518  gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
519}
520
521/* The real work of replace_goto_queue.  Returns with TSI updated to
522   point to the next statement.  */
523
524static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
525
526static void
527replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
528		      gimple_stmt_iterator *gsi)
529{
530  gimple_seq seq;
531  treemple temp;
532  temp.g = NULL;
533
534  switch (gimple_code (stmt))
535    {
536    case GIMPLE_GOTO:
537    case GIMPLE_RETURN:
538      temp.g = stmt;
539      seq = find_goto_replacement (tf, temp);
540      if (seq)
541	{
542	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
543	  gsi_remove (gsi, false);
544	  return;
545	}
546      break;
547
548    case GIMPLE_COND:
549      replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
550      replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
551      break;
552
553    case GIMPLE_TRY:
554      replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
555      replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
556      break;
557    case GIMPLE_CATCH:
558      replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
559				      as_a <gcatch *> (stmt)),
560				    tf);
561      break;
562    case GIMPLE_EH_FILTER:
563      replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
564      break;
565    case GIMPLE_EH_ELSE:
566      {
567	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
568	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
569				      tf);
570	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
571				      tf);
572      }
573      break;
574
575    default:
576      /* These won't have gotos in them.  */
577      break;
578    }
579
580  gsi_next (gsi);
581}
582
583/* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
584
585static void
586replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
587{
588  gimple_stmt_iterator gsi = gsi_start (*seq);
589
590  while (!gsi_end_p (gsi))
591    replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
592}
593
594/* Replace all goto queue members.  */
595
596static void
597replace_goto_queue (struct leh_tf_state *tf)
598{
599  if (tf->goto_queue_active == 0)
600    return;
601  replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
602  replace_goto_queue_stmt_list (&eh_seq, tf);
603}
604
605/* Add a new record to the goto queue contained in TF. NEW_STMT is the
606   data to be added, IS_LABEL indicates whether NEW_STMT is a label or
607   a gimple return. */
608
609static void
610record_in_goto_queue (struct leh_tf_state *tf,
611                      treemple new_stmt,
612                      int index,
613                      bool is_label,
614		      location_t location)
615{
616  size_t active, size;
617  struct goto_queue_node *q;
618
619  gcc_assert (!tf->goto_queue_map);
620
621  active = tf->goto_queue_active;
622  size = tf->goto_queue_size;
623  if (active >= size)
624    {
625      size = (size ? size * 2 : 32);
626      tf->goto_queue_size = size;
627      tf->goto_queue
628         = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
629    }
630
631  q = &tf->goto_queue[active];
632  tf->goto_queue_active = active + 1;
633
634  memset (q, 0, sizeof (*q));
635  q->stmt = new_stmt;
636  q->index = index;
637  q->location = location;
638  q->is_label = is_label;
639}
640
641/* Record the LABEL label in the goto queue contained in TF.
642   TF is not null.  */
643
644static void
645record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
646			    location_t location)
647{
648  int index;
649  treemple temp, new_stmt;
650
651  if (!label)
652    return;
653
654  /* Computed and non-local gotos do not get processed.  Given
655     their nature we can neither tell whether we've escaped the
656     finally block nor redirect them if we knew.  */
657  if (TREE_CODE (label) != LABEL_DECL)
658    return;
659
660  /* No need to record gotos that don't leave the try block.  */
661  temp.t = label;
662  if (!outside_finally_tree (temp, tf->try_finally_expr))
663    return;
664
665  if (! tf->dest_array.exists ())
666    {
667      tf->dest_array.create (10);
668      tf->dest_array.quick_push (label);
669      index = 0;
670    }
671  else
672    {
673      int n = tf->dest_array.length ();
674      for (index = 0; index < n; ++index)
675        if (tf->dest_array[index] == label)
676          break;
677      if (index == n)
678        tf->dest_array.safe_push (label);
679    }
680
681  /* In the case of a GOTO we want to record the destination label,
682     since with a GIMPLE_COND we have an easy access to the then/else
683     labels. */
684  new_stmt = stmt;
685  record_in_goto_queue (tf, new_stmt, index, true, location);
686}
687
688/* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
689   node, and if so record that fact in the goto queue associated with that
690   try_finally node.  */
691
692static void
693maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
694{
695  struct leh_tf_state *tf = state->tf;
696  treemple new_stmt;
697
698  if (!tf)
699    return;
700
701  switch (gimple_code (stmt))
702    {
703    case GIMPLE_COND:
704      {
705	gcond *cond_stmt = as_a <gcond *> (stmt);
706	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
707	record_in_goto_queue_label (tf, new_stmt,
708				    gimple_cond_true_label (cond_stmt),
709				    EXPR_LOCATION (*new_stmt.tp));
710	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
711	record_in_goto_queue_label (tf, new_stmt,
712				    gimple_cond_false_label (cond_stmt),
713				    EXPR_LOCATION (*new_stmt.tp));
714      }
715      break;
716    case GIMPLE_GOTO:
717      new_stmt.g = stmt;
718      record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
719				  gimple_location (stmt));
720      break;
721
722    case GIMPLE_RETURN:
723      tf->may_return = true;
724      new_stmt.g = stmt;
725      record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
726      break;
727
728    default:
729      gcc_unreachable ();
730    }
731}
732
733
734#ifdef ENABLE_CHECKING
735/* We do not process GIMPLE_SWITCHes for now.  As long as the original source
736   was in fact structured, and we've not yet done jump threading, then none
737   of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
738
739static void
740verify_norecord_switch_expr (struct leh_state *state,
741			     gswitch *switch_expr)
742{
743  struct leh_tf_state *tf = state->tf;
744  size_t i, n;
745
746  if (!tf)
747    return;
748
749  n = gimple_switch_num_labels (switch_expr);
750
751  for (i = 0; i < n; ++i)
752    {
753      treemple temp;
754      tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
755      temp.t = lab;
756      gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
757    }
758}
759#else
760#define verify_norecord_switch_expr(state, switch_expr)
761#endif
762
763/* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
764   non-null, insert it before the new branch.  */
765
766static void
767do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
768{
769  gimple x;
770
771  /* In the case of a return, the queue node must be a gimple statement.  */
772  gcc_assert (!q->is_label);
773
774  /* Note that the return value may have already been computed, e.g.,
775
776	int x;
777	int foo (void)
778	{
779	  x = 0;
780	  try {
781	    return x;
782	  } finally {
783	    x++;
784	  }
785	}
786
787     should return 0, not 1.  We don't have to do anything to make
788     this happens because the return value has been placed in the
789     RESULT_DECL already.  */
790
791  q->cont_stmt = q->stmt.g;
792
793  if (mod)
794    gimple_seq_add_seq (&q->repl_stmt, mod);
795
796  x = gimple_build_goto (finlab);
797  gimple_set_location (x, q->location);
798  gimple_seq_add_stmt (&q->repl_stmt, x);
799}
800
801/* Similar, but easier, for GIMPLE_GOTO.  */
802
803static void
804do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
805		     struct leh_tf_state *tf)
806{
807  ggoto *x;
808
809  gcc_assert (q->is_label);
810
811  q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
812
813  if (mod)
814    gimple_seq_add_seq (&q->repl_stmt, mod);
815
816  x = gimple_build_goto (finlab);
817  gimple_set_location (x, q->location);
818  gimple_seq_add_stmt (&q->repl_stmt, x);
819}
820
821/* Emit a standard landing pad sequence into SEQ for REGION.  */
822
823static void
824emit_post_landing_pad (gimple_seq *seq, eh_region region)
825{
826  eh_landing_pad lp = region->landing_pads;
827  glabel *x;
828
829  if (lp == NULL)
830    lp = gen_eh_landing_pad (region);
831
832  lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
833  EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
834
835  x = gimple_build_label (lp->post_landing_pad);
836  gimple_seq_add_stmt (seq, x);
837}
838
839/* Emit a RESX statement into SEQ for REGION.  */
840
841static void
842emit_resx (gimple_seq *seq, eh_region region)
843{
844  gresx *x = gimple_build_resx (region->index);
845  gimple_seq_add_stmt (seq, x);
846  if (region->outer)
847    record_stmt_eh_region (region->outer, x);
848}
849
850/* Emit an EH_DISPATCH statement into SEQ for REGION.  */
851
852static void
853emit_eh_dispatch (gimple_seq *seq, eh_region region)
854{
855  geh_dispatch *x = gimple_build_eh_dispatch (region->index);
856  gimple_seq_add_stmt (seq, x);
857}
858
859/* Note that the current EH region may contain a throw, or a
860   call to a function which itself may contain a throw.  */
861
862static void
863note_eh_region_may_contain_throw (eh_region region)
864{
865  while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
866    {
867      if (region->type == ERT_MUST_NOT_THROW)
868	break;
869      region = region->outer;
870      if (region == NULL)
871	break;
872    }
873}
874
875/* Check if REGION has been marked as containing a throw.  If REGION is
876   NULL, this predicate is false.  */
877
878static inline bool
879eh_region_may_contain_throw (eh_region r)
880{
881  return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
882}
883
884/* We want to transform
885	try { body; } catch { stuff; }
886   to
887	normal_sequence:
888	  body;
889	  over:
890	eh_sequence:
891	  landing_pad:
892	  stuff;
893	  goto over;
894
895   TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
896   should be placed before the second operand, or NULL.  OVER is
897   an existing label that should be put at the exit, or NULL.  */
898
899static gimple_seq
900frob_into_branch_around (gtry *tp, eh_region region, tree over)
901{
902  gimple x;
903  gimple_seq cleanup, result;
904  location_t loc = gimple_location (tp);
905
906  cleanup = gimple_try_cleanup (tp);
907  result = gimple_try_eval (tp);
908
909  if (region)
910    emit_post_landing_pad (&eh_seq, region);
911
912  if (gimple_seq_may_fallthru (cleanup))
913    {
914      if (!over)
915	over = create_artificial_label (loc);
916      x = gimple_build_goto (over);
917      gimple_set_location (x, loc);
918      gimple_seq_add_stmt (&cleanup, x);
919    }
920  gimple_seq_add_seq (&eh_seq, cleanup);
921
922  if (over)
923    {
924      x = gimple_build_label (over);
925      gimple_seq_add_stmt (&result, x);
926    }
927  return result;
928}
929
930/* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
931   Make sure to record all new labels found.  */
932
933static gimple_seq
934lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
935			     location_t loc)
936{
937  gtry *region = NULL;
938  gimple_seq new_seq;
939  gimple_stmt_iterator gsi;
940
941  new_seq = copy_gimple_seq_and_replace_locals (seq);
942
943  for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
944    {
945      gimple stmt = gsi_stmt (gsi);
946      if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
947	{
948	  tree block = gimple_block (stmt);
949	  gimple_set_location (stmt, loc);
950	  gimple_set_block (stmt, block);
951	}
952    }
953
954  if (outer_state->tf)
955    region = outer_state->tf->try_finally_expr;
956  collect_finally_tree_1 (new_seq, region);
957
958  return new_seq;
959}
960
961/* A subroutine of lower_try_finally.  Create a fallthru label for
962   the given try_finally state.  The only tricky bit here is that
963   we have to make sure to record the label in our outer context.  */
964
965static tree
966lower_try_finally_fallthru_label (struct leh_tf_state *tf)
967{
968  tree label = tf->fallthru_label;
969  treemple temp;
970
971  if (!label)
972    {
973      label = create_artificial_label (gimple_location (tf->try_finally_expr));
974      tf->fallthru_label = label;
975      if (tf->outer->tf)
976        {
977          temp.t = label;
978          record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
979        }
980    }
981  return label;
982}
983
984/* A subroutine of lower_try_finally.  If FINALLY consits of a
985   GIMPLE_EH_ELSE node, return it.  */
986
987static inline geh_else *
988get_eh_else (gimple_seq finally)
989{
990  gimple x = gimple_seq_first_stmt (finally);
991  if (gimple_code (x) == GIMPLE_EH_ELSE)
992    {
993      gcc_assert (gimple_seq_singleton_p (finally));
994      return as_a <geh_else *> (x);
995    }
996  return NULL;
997}
998
999/* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
1000   langhook returns non-null, then the language requires that the exception
1001   path out of a try_finally be treated specially.  To wit: the code within
1002   the finally block may not itself throw an exception.  We have two choices
1003   here. First we can duplicate the finally block and wrap it in a
1004   must_not_throw region.  Second, we can generate code like
1005
1006	try {
1007	  finally_block;
1008	} catch {
1009	  if (fintmp == eh_edge)
1010	    protect_cleanup_actions;
1011	}
1012
1013   where "fintmp" is the temporary used in the switch statement generation
1014   alternative considered below.  For the nonce, we always choose the first
1015   option.
1016
1017   THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
1018
1019static void
1020honor_protect_cleanup_actions (struct leh_state *outer_state,
1021			       struct leh_state *this_state,
1022			       struct leh_tf_state *tf)
1023{
1024  tree protect_cleanup_actions;
1025  gimple_stmt_iterator gsi;
1026  bool finally_may_fallthru;
1027  gimple_seq finally;
1028  gimple x;
1029  geh_mnt *eh_mnt;
1030  gtry *try_stmt;
1031  geh_else *eh_else;
1032
1033  /* First check for nothing to do.  */
1034  if (lang_hooks.eh_protect_cleanup_actions == NULL)
1035    return;
1036  protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1037  if (protect_cleanup_actions == NULL)
1038    return;
1039
1040  finally = gimple_try_cleanup (tf->top_p);
1041  eh_else = get_eh_else (finally);
1042
1043  /* Duplicate the FINALLY block.  Only need to do this for try-finally,
1044     and not for cleanups.  If we've got an EH_ELSE, extract it now.  */
1045  if (eh_else)
1046    {
1047      finally = gimple_eh_else_e_body (eh_else);
1048      gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1049    }
1050  else if (this_state)
1051    finally = lower_try_finally_dup_block (finally, outer_state,
1052	gimple_location (tf->try_finally_expr));
1053  finally_may_fallthru = gimple_seq_may_fallthru (finally);
1054
1055  /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1056     set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1057     to be in an enclosing scope, but needs to be implemented at this level
1058     to avoid a nesting violation (see wrap_temporary_cleanups in
1059     cp/decl.c).  Since it's logically at an outer level, we should call
1060     terminate before we get to it, so strip it away before adding the
1061     MUST_NOT_THROW filter.  */
1062  gsi = gsi_start (finally);
1063  x = gsi_stmt (gsi);
1064  if (gimple_code (x) == GIMPLE_TRY
1065      && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1066      && gimple_try_catch_is_cleanup (x))
1067    {
1068      gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1069      gsi_remove (&gsi, false);
1070    }
1071
1072  /* Wrap the block with protect_cleanup_actions as the action.  */
1073  eh_mnt = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1074  try_stmt = gimple_build_try (finally, gimple_seq_alloc_with_stmt (eh_mnt),
1075			       GIMPLE_TRY_CATCH);
1076  finally = lower_eh_must_not_throw (outer_state, try_stmt);
1077
1078  /* Drop all of this into the exception sequence.  */
1079  emit_post_landing_pad (&eh_seq, tf->region);
1080  gimple_seq_add_seq (&eh_seq, finally);
1081  if (finally_may_fallthru)
1082    emit_resx (&eh_seq, tf->region);
1083
1084  /* Having now been handled, EH isn't to be considered with
1085     the rest of the outgoing edges.  */
1086  tf->may_throw = false;
1087}
1088
1089/* A subroutine of lower_try_finally.  We have determined that there is
1090   no fallthru edge out of the finally block.  This means that there is
1091   no outgoing edge corresponding to any incoming edge.  Restructure the
1092   try_finally node for this special case.  */
1093
1094static void
1095lower_try_finally_nofallthru (struct leh_state *state,
1096			      struct leh_tf_state *tf)
1097{
1098  tree lab;
1099  gimple x;
1100  geh_else *eh_else;
1101  gimple_seq finally;
1102  struct goto_queue_node *q, *qe;
1103
1104  lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1105
1106  /* We expect that tf->top_p is a GIMPLE_TRY. */
1107  finally = gimple_try_cleanup (tf->top_p);
1108  tf->top_p_seq = gimple_try_eval (tf->top_p);
1109
1110  x = gimple_build_label (lab);
1111  gimple_seq_add_stmt (&tf->top_p_seq, x);
1112
1113  q = tf->goto_queue;
1114  qe = q + tf->goto_queue_active;
1115  for (; q < qe; ++q)
1116    if (q->index < 0)
1117      do_return_redirection (q, lab, NULL);
1118    else
1119      do_goto_redirection (q, lab, NULL, tf);
1120
1121  replace_goto_queue (tf);
1122
1123  /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1124  eh_else = get_eh_else (finally);
1125  if (eh_else)
1126    {
1127      finally = gimple_eh_else_n_body (eh_else);
1128      lower_eh_constructs_1 (state, &finally);
1129      gimple_seq_add_seq (&tf->top_p_seq, finally);
1130
1131      if (tf->may_throw)
1132	{
1133	  finally = gimple_eh_else_e_body (eh_else);
1134	  lower_eh_constructs_1 (state, &finally);
1135
1136	  emit_post_landing_pad (&eh_seq, tf->region);
1137	  gimple_seq_add_seq (&eh_seq, finally);
1138	}
1139    }
1140  else
1141    {
1142      lower_eh_constructs_1 (state, &finally);
1143      gimple_seq_add_seq (&tf->top_p_seq, finally);
1144
1145      if (tf->may_throw)
1146	{
1147	  emit_post_landing_pad (&eh_seq, tf->region);
1148
1149	  x = gimple_build_goto (lab);
1150	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1151	  gimple_seq_add_stmt (&eh_seq, x);
1152	}
1153    }
1154}
1155
1156/* A subroutine of lower_try_finally.  We have determined that there is
1157   exactly one destination of the finally block.  Restructure the
1158   try_finally node for this special case.  */
1159
1160static void
1161lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1162{
1163  struct goto_queue_node *q, *qe;
1164  geh_else *eh_else;
1165  glabel *label_stmt;
1166  gimple x;
1167  gimple_seq finally;
1168  gimple_stmt_iterator gsi;
1169  tree finally_label;
1170  location_t loc = gimple_location (tf->try_finally_expr);
1171
1172  finally = gimple_try_cleanup (tf->top_p);
1173  tf->top_p_seq = gimple_try_eval (tf->top_p);
1174
1175  /* Since there's only one destination, and the destination edge can only
1176     either be EH or non-EH, that implies that all of our incoming edges
1177     are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1178  eh_else = get_eh_else (finally);
1179  if (eh_else)
1180    {
1181      if (tf->may_throw)
1182	finally = gimple_eh_else_e_body (eh_else);
1183      else
1184	finally = gimple_eh_else_n_body (eh_else);
1185    }
1186
1187  lower_eh_constructs_1 (state, &finally);
1188
1189  for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1190    {
1191      gimple stmt = gsi_stmt (gsi);
1192      if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1193	{
1194	  tree block = gimple_block (stmt);
1195	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1196	  gimple_set_block (stmt, block);
1197	}
1198    }
1199
1200  if (tf->may_throw)
1201    {
1202      /* Only reachable via the exception edge.  Add the given label to
1203         the head of the FINALLY block.  Append a RESX at the end.  */
1204      emit_post_landing_pad (&eh_seq, tf->region);
1205      gimple_seq_add_seq (&eh_seq, finally);
1206      emit_resx (&eh_seq, tf->region);
1207      return;
1208    }
1209
1210  if (tf->may_fallthru)
1211    {
1212      /* Only reachable via the fallthru edge.  Do nothing but let
1213	 the two blocks run together; we'll fall out the bottom.  */
1214      gimple_seq_add_seq (&tf->top_p_seq, finally);
1215      return;
1216    }
1217
1218  finally_label = create_artificial_label (loc);
1219  label_stmt = gimple_build_label (finally_label);
1220  gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1221
1222  gimple_seq_add_seq (&tf->top_p_seq, finally);
1223
1224  q = tf->goto_queue;
1225  qe = q + tf->goto_queue_active;
1226
1227  if (tf->may_return)
1228    {
1229      /* Reachable by return expressions only.  Redirect them.  */
1230      for (; q < qe; ++q)
1231	do_return_redirection (q, finally_label, NULL);
1232      replace_goto_queue (tf);
1233    }
1234  else
1235    {
1236      /* Reachable by goto expressions only.  Redirect them.  */
1237      for (; q < qe; ++q)
1238	do_goto_redirection (q, finally_label, NULL, tf);
1239      replace_goto_queue (tf);
1240
1241      if (tf->dest_array[0] == tf->fallthru_label)
1242	{
1243	  /* Reachable by goto to fallthru label only.  Redirect it
1244	     to the new label (already created, sadly), and do not
1245	     emit the final branch out, or the fallthru label.  */
1246	  tf->fallthru_label = NULL;
1247	  return;
1248	}
1249    }
1250
1251  /* Place the original return/goto to the original destination
1252     immediately after the finally block. */
1253  x = tf->goto_queue[0].cont_stmt;
1254  gimple_seq_add_stmt (&tf->top_p_seq, x);
1255  maybe_record_in_goto_queue (state, x);
1256}
1257
1258/* A subroutine of lower_try_finally.  There are multiple edges incoming
1259   and outgoing from the finally block.  Implement this by duplicating the
1260   finally block for every destination.  */
1261
1262static void
1263lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1264{
1265  gimple_seq finally;
1266  gimple_seq new_stmt;
1267  gimple_seq seq;
1268  gimple x;
1269  geh_else *eh_else;
1270  tree tmp;
1271  location_t tf_loc = gimple_location (tf->try_finally_expr);
1272
1273  finally = gimple_try_cleanup (tf->top_p);
1274
1275  /* Notice EH_ELSE, and simplify some of the remaining code
1276     by considering FINALLY to be the normal return path only.  */
1277  eh_else = get_eh_else (finally);
1278  if (eh_else)
1279    finally = gimple_eh_else_n_body (eh_else);
1280
1281  tf->top_p_seq = gimple_try_eval (tf->top_p);
1282  new_stmt = NULL;
1283
1284  if (tf->may_fallthru)
1285    {
1286      seq = lower_try_finally_dup_block (finally, state, tf_loc);
1287      lower_eh_constructs_1 (state, &seq);
1288      gimple_seq_add_seq (&new_stmt, seq);
1289
1290      tmp = lower_try_finally_fallthru_label (tf);
1291      x = gimple_build_goto (tmp);
1292      gimple_set_location (x, tf_loc);
1293      gimple_seq_add_stmt (&new_stmt, x);
1294    }
1295
1296  if (tf->may_throw)
1297    {
1298      /* We don't need to copy the EH path of EH_ELSE,
1299	 since it is only emitted once.  */
1300      if (eh_else)
1301	seq = gimple_eh_else_e_body (eh_else);
1302      else
1303	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1304      lower_eh_constructs_1 (state, &seq);
1305
1306      emit_post_landing_pad (&eh_seq, tf->region);
1307      gimple_seq_add_seq (&eh_seq, seq);
1308      emit_resx (&eh_seq, tf->region);
1309    }
1310
1311  if (tf->goto_queue)
1312    {
1313      struct goto_queue_node *q, *qe;
1314      int return_index, index;
1315      struct labels_s
1316      {
1317	struct goto_queue_node *q;
1318	tree label;
1319      } *labels;
1320
1321      return_index = tf->dest_array.length ();
1322      labels = XCNEWVEC (struct labels_s, return_index + 1);
1323
1324      q = tf->goto_queue;
1325      qe = q + tf->goto_queue_active;
1326      for (; q < qe; q++)
1327	{
1328	  index = q->index < 0 ? return_index : q->index;
1329
1330	  if (!labels[index].q)
1331	    labels[index].q = q;
1332	}
1333
1334      for (index = 0; index < return_index + 1; index++)
1335	{
1336	  tree lab;
1337
1338	  q = labels[index].q;
1339	  if (! q)
1340	    continue;
1341
1342	  lab = labels[index].label
1343	    = create_artificial_label (tf_loc);
1344
1345	  if (index == return_index)
1346	    do_return_redirection (q, lab, NULL);
1347	  else
1348	    do_goto_redirection (q, lab, NULL, tf);
1349
1350	  x = gimple_build_label (lab);
1351          gimple_seq_add_stmt (&new_stmt, x);
1352
1353	  seq = lower_try_finally_dup_block (finally, state, q->location);
1354	  lower_eh_constructs_1 (state, &seq);
1355          gimple_seq_add_seq (&new_stmt, seq);
1356
1357          gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1358	  maybe_record_in_goto_queue (state, q->cont_stmt);
1359	}
1360
1361      for (q = tf->goto_queue; q < qe; q++)
1362	{
1363	  tree lab;
1364
1365	  index = q->index < 0 ? return_index : q->index;
1366
1367	  if (labels[index].q == q)
1368	    continue;
1369
1370	  lab = labels[index].label;
1371
1372	  if (index == return_index)
1373	    do_return_redirection (q, lab, NULL);
1374	  else
1375	    do_goto_redirection (q, lab, NULL, tf);
1376	}
1377
1378      replace_goto_queue (tf);
1379      free (labels);
1380    }
1381
1382  /* Need to link new stmts after running replace_goto_queue due
1383     to not wanting to process the same goto stmts twice.  */
1384  gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1385}
1386
1387/* A subroutine of lower_try_finally.  There are multiple edges incoming
1388   and outgoing from the finally block.  Implement this by instrumenting
1389   each incoming edge and creating a switch statement at the end of the
1390   finally block that branches to the appropriate destination.  */
1391
1392static void
1393lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1394{
1395  struct goto_queue_node *q, *qe;
1396  tree finally_tmp, finally_label;
1397  int return_index, eh_index, fallthru_index;
1398  int nlabels, ndests, j, last_case_index;
1399  tree last_case;
1400  vec<tree> case_label_vec;
1401  gimple_seq switch_body = NULL;
1402  gimple x;
1403  geh_else *eh_else;
1404  tree tmp;
1405  gimple switch_stmt;
1406  gimple_seq finally;
1407  hash_map<tree, gimple> *cont_map = NULL;
1408  /* The location of the TRY_FINALLY stmt.  */
1409  location_t tf_loc = gimple_location (tf->try_finally_expr);
1410  /* The location of the finally block.  */
1411  location_t finally_loc;
1412
1413  finally = gimple_try_cleanup (tf->top_p);
1414  eh_else = get_eh_else (finally);
1415
1416  /* Mash the TRY block to the head of the chain.  */
1417  tf->top_p_seq = gimple_try_eval (tf->top_p);
1418
1419  /* The location of the finally is either the last stmt in the finally
1420     block or the location of the TRY_FINALLY itself.  */
1421  x = gimple_seq_last_stmt (finally);
1422  finally_loc = x ? gimple_location (x) : tf_loc;
1423
1424  /* Prepare for switch statement generation.  */
1425  nlabels = tf->dest_array.length ();
1426  return_index = nlabels;
1427  eh_index = return_index + tf->may_return;
1428  fallthru_index = eh_index + (tf->may_throw && !eh_else);
1429  ndests = fallthru_index + tf->may_fallthru;
1430
1431  finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1432  finally_label = create_artificial_label (finally_loc);
1433
1434  /* We use vec::quick_push on case_label_vec throughout this function,
1435     since we know the size in advance and allocate precisely as muce
1436     space as needed.  */
1437  case_label_vec.create (ndests);
1438  last_case = NULL;
1439  last_case_index = 0;
1440
1441  /* Begin inserting code for getting to the finally block.  Things
1442     are done in this order to correspond to the sequence the code is
1443     laid out.  */
1444
1445  if (tf->may_fallthru)
1446    {
1447      x = gimple_build_assign (finally_tmp,
1448			       build_int_cst (integer_type_node,
1449					      fallthru_index));
1450      gimple_seq_add_stmt (&tf->top_p_seq, x);
1451
1452      tmp = build_int_cst (integer_type_node, fallthru_index);
1453      last_case = build_case_label (tmp, NULL,
1454				    create_artificial_label (tf_loc));
1455      case_label_vec.quick_push (last_case);
1456      last_case_index++;
1457
1458      x = gimple_build_label (CASE_LABEL (last_case));
1459      gimple_seq_add_stmt (&switch_body, x);
1460
1461      tmp = lower_try_finally_fallthru_label (tf);
1462      x = gimple_build_goto (tmp);
1463      gimple_set_location (x, tf_loc);
1464      gimple_seq_add_stmt (&switch_body, x);
1465    }
1466
1467  /* For EH_ELSE, emit the exception path (plus resx) now, then
1468     subsequently we only need consider the normal path.  */
1469  if (eh_else)
1470    {
1471      if (tf->may_throw)
1472	{
1473	  finally = gimple_eh_else_e_body (eh_else);
1474	  lower_eh_constructs_1 (state, &finally);
1475
1476	  emit_post_landing_pad (&eh_seq, tf->region);
1477	  gimple_seq_add_seq (&eh_seq, finally);
1478	  emit_resx (&eh_seq, tf->region);
1479	}
1480
1481      finally = gimple_eh_else_n_body (eh_else);
1482    }
1483  else if (tf->may_throw)
1484    {
1485      emit_post_landing_pad (&eh_seq, tf->region);
1486
1487      x = gimple_build_assign (finally_tmp,
1488			       build_int_cst (integer_type_node, eh_index));
1489      gimple_seq_add_stmt (&eh_seq, x);
1490
1491      x = gimple_build_goto (finally_label);
1492      gimple_set_location (x, tf_loc);
1493      gimple_seq_add_stmt (&eh_seq, x);
1494
1495      tmp = build_int_cst (integer_type_node, eh_index);
1496      last_case = build_case_label (tmp, NULL,
1497				    create_artificial_label (tf_loc));
1498      case_label_vec.quick_push (last_case);
1499      last_case_index++;
1500
1501      x = gimple_build_label (CASE_LABEL (last_case));
1502      gimple_seq_add_stmt (&eh_seq, x);
1503      emit_resx (&eh_seq, tf->region);
1504    }
1505
1506  x = gimple_build_label (finally_label);
1507  gimple_seq_add_stmt (&tf->top_p_seq, x);
1508
1509  lower_eh_constructs_1 (state, &finally);
1510  gimple_seq_add_seq (&tf->top_p_seq, finally);
1511
1512  /* Redirect each incoming goto edge.  */
1513  q = tf->goto_queue;
1514  qe = q + tf->goto_queue_active;
1515  j = last_case_index + tf->may_return;
1516  /* Prepare the assignments to finally_tmp that are executed upon the
1517     entrance through a particular edge. */
1518  for (; q < qe; ++q)
1519    {
1520      gimple_seq mod = NULL;
1521      int switch_id;
1522      unsigned int case_index;
1523
1524      if (q->index < 0)
1525	{
1526	  x = gimple_build_assign (finally_tmp,
1527				   build_int_cst (integer_type_node,
1528						  return_index));
1529	  gimple_seq_add_stmt (&mod, x);
1530	  do_return_redirection (q, finally_label, mod);
1531	  switch_id = return_index;
1532	}
1533      else
1534	{
1535	  x = gimple_build_assign (finally_tmp,
1536				   build_int_cst (integer_type_node, q->index));
1537	  gimple_seq_add_stmt (&mod, x);
1538	  do_goto_redirection (q, finally_label, mod, tf);
1539	  switch_id = q->index;
1540	}
1541
1542      case_index = j + q->index;
1543      if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1544        {
1545          tree case_lab;
1546	  tmp = build_int_cst (integer_type_node, switch_id);
1547          case_lab = build_case_label (tmp, NULL,
1548				       create_artificial_label (tf_loc));
1549          /* We store the cont_stmt in the pointer map, so that we can recover
1550             it in the loop below.  */
1551          if (!cont_map)
1552            cont_map = new hash_map<tree, gimple>;
1553          cont_map->put (case_lab, q->cont_stmt);
1554          case_label_vec.quick_push (case_lab);
1555        }
1556    }
1557  for (j = last_case_index; j < last_case_index + nlabels; j++)
1558    {
1559      gimple cont_stmt;
1560
1561      last_case = case_label_vec[j];
1562
1563      gcc_assert (last_case);
1564      gcc_assert (cont_map);
1565
1566      cont_stmt = *cont_map->get (last_case);
1567
1568      x = gimple_build_label (CASE_LABEL (last_case));
1569      gimple_seq_add_stmt (&switch_body, x);
1570      gimple_seq_add_stmt (&switch_body, cont_stmt);
1571      maybe_record_in_goto_queue (state, cont_stmt);
1572    }
1573  if (cont_map)
1574    delete cont_map;
1575
1576  replace_goto_queue (tf);
1577
1578  /* Make sure that the last case is the default label, as one is required.
1579     Then sort the labels, which is also required in GIMPLE.  */
1580  CASE_LOW (last_case) = NULL;
1581  tree tem = case_label_vec.pop ();
1582  gcc_assert (tem == last_case);
1583  sort_case_labels (case_label_vec);
1584
1585  /* Build the switch statement, setting last_case to be the default
1586     label.  */
1587  switch_stmt = gimple_build_switch (finally_tmp, last_case,
1588				     case_label_vec);
1589  gimple_set_location (switch_stmt, finally_loc);
1590
1591  /* Need to link SWITCH_STMT after running replace_goto_queue
1592     due to not wanting to process the same goto stmts twice.  */
1593  gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1594  gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1595}
1596
1597/* Decide whether or not we are going to duplicate the finally block.
1598   There are several considerations.
1599
1600   First, if this is Java, then the finally block contains code
1601   written by the user.  It has line numbers associated with it,
1602   so duplicating the block means it's difficult to set a breakpoint.
1603   Since controlling code generation via -g is verboten, we simply
1604   never duplicate code without optimization.
1605
1606   Second, we'd like to prevent egregious code growth.  One way to
1607   do this is to estimate the size of the finally block, multiply
1608   that by the number of copies we'd need to make, and compare against
1609   the estimate of the size of the switch machinery we'd have to add.  */
1610
1611static bool
1612decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1613{
1614  int f_estimate, sw_estimate;
1615  geh_else *eh_else;
1616
1617  /* If there's an EH_ELSE involved, the exception path is separate
1618     and really doesn't come into play for this computation.  */
1619  eh_else = get_eh_else (finally);
1620  if (eh_else)
1621    {
1622      ndests -= may_throw;
1623      finally = gimple_eh_else_n_body (eh_else);
1624    }
1625
1626  if (!optimize)
1627    {
1628      gimple_stmt_iterator gsi;
1629
1630      if (ndests == 1)
1631        return true;
1632
1633      for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1634	{
1635	  gimple stmt = gsi_stmt (gsi);
1636	  if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1637	    return false;
1638	}
1639      return true;
1640    }
1641
1642  /* Finally estimate N times, plus N gotos.  */
1643  f_estimate = count_insns_seq (finally, &eni_size_weights);
1644  f_estimate = (f_estimate + 1) * ndests;
1645
1646  /* Switch statement (cost 10), N variable assignments, N gotos.  */
1647  sw_estimate = 10 + 2 * ndests;
1648
1649  /* Optimize for size clearly wants our best guess.  */
1650  if (optimize_function_for_size_p (cfun))
1651    return f_estimate < sw_estimate;
1652
1653  /* ??? These numbers are completely made up so far.  */
1654  if (optimize > 1)
1655    return f_estimate < 100 || f_estimate < sw_estimate * 2;
1656  else
1657    return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1658}
1659
1660/* REG is the enclosing region for a possible cleanup region, or the region
1661   itself.  Returns TRUE if such a region would be unreachable.
1662
1663   Cleanup regions within a must-not-throw region aren't actually reachable
1664   even if there are throwing stmts within them, because the personality
1665   routine will call terminate before unwinding.  */
1666
1667static bool
1668cleanup_is_dead_in (eh_region reg)
1669{
1670  while (reg && reg->type == ERT_CLEANUP)
1671    reg = reg->outer;
1672  return (reg && reg->type == ERT_MUST_NOT_THROW);
1673}
1674
1675/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1676   to a sequence of labels and blocks, plus the exception region trees
1677   that record all the magic.  This is complicated by the need to
1678   arrange for the FINALLY block to be executed on all exits.  */
1679
1680static gimple_seq
1681lower_try_finally (struct leh_state *state, gtry *tp)
1682{
1683  struct leh_tf_state this_tf;
1684  struct leh_state this_state;
1685  int ndests;
1686  gimple_seq old_eh_seq;
1687
1688  /* Process the try block.  */
1689
1690  memset (&this_tf, 0, sizeof (this_tf));
1691  this_tf.try_finally_expr = tp;
1692  this_tf.top_p = tp;
1693  this_tf.outer = state;
1694  if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1695    {
1696      this_tf.region = gen_eh_region_cleanup (state->cur_region);
1697      this_state.cur_region = this_tf.region;
1698    }
1699  else
1700    {
1701      this_tf.region = NULL;
1702      this_state.cur_region = state->cur_region;
1703    }
1704
1705  this_state.ehp_region = state->ehp_region;
1706  this_state.tf = &this_tf;
1707
1708  old_eh_seq = eh_seq;
1709  eh_seq = NULL;
1710
1711  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1712
1713  /* Determine if the try block is escaped through the bottom.  */
1714  this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1715
1716  /* Determine if any exceptions are possible within the try block.  */
1717  if (this_tf.region)
1718    this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1719  if (this_tf.may_throw)
1720    honor_protect_cleanup_actions (state, &this_state, &this_tf);
1721
1722  /* Determine how many edges (still) reach the finally block.  Or rather,
1723     how many destinations are reached by the finally block.  Use this to
1724     determine how we process the finally block itself.  */
1725
1726  ndests = this_tf.dest_array.length ();
1727  ndests += this_tf.may_fallthru;
1728  ndests += this_tf.may_return;
1729  ndests += this_tf.may_throw;
1730
1731  /* If the FINALLY block is not reachable, dike it out.  */
1732  if (ndests == 0)
1733    {
1734      gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1735      gimple_try_set_cleanup (tp, NULL);
1736    }
1737  /* If the finally block doesn't fall through, then any destination
1738     we might try to impose there isn't reached either.  There may be
1739     some minor amount of cleanup and redirection still needed.  */
1740  else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1741    lower_try_finally_nofallthru (state, &this_tf);
1742
1743  /* We can easily special-case redirection to a single destination.  */
1744  else if (ndests == 1)
1745    lower_try_finally_onedest (state, &this_tf);
1746  else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1747				    gimple_try_cleanup (tp)))
1748    lower_try_finally_copy (state, &this_tf);
1749  else
1750    lower_try_finally_switch (state, &this_tf);
1751
1752  /* If someone requested we add a label at the end of the transformed
1753     block, do so.  */
1754  if (this_tf.fallthru_label)
1755    {
1756      /* This must be reached only if ndests == 0. */
1757      gimple x = gimple_build_label (this_tf.fallthru_label);
1758      gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1759    }
1760
1761  this_tf.dest_array.release ();
1762  free (this_tf.goto_queue);
1763  if (this_tf.goto_queue_map)
1764    delete this_tf.goto_queue_map;
1765
1766  /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1767     If there was no old eh_seq, then the append is trivially already done.  */
1768  if (old_eh_seq)
1769    {
1770      if (eh_seq == NULL)
1771	eh_seq = old_eh_seq;
1772      else
1773	{
1774	  gimple_seq new_eh_seq = eh_seq;
1775	  eh_seq = old_eh_seq;
1776	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1777	}
1778    }
1779
1780  return this_tf.top_p_seq;
1781}
1782
1783/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1784   list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1785   exception region trees that records all the magic.  */
1786
1787static gimple_seq
1788lower_catch (struct leh_state *state, gtry *tp)
1789{
1790  eh_region try_region = NULL;
1791  struct leh_state this_state = *state;
1792  gimple_stmt_iterator gsi;
1793  tree out_label;
1794  gimple_seq new_seq, cleanup;
1795  gimple x;
1796  location_t try_catch_loc = gimple_location (tp);
1797
1798  if (flag_exceptions)
1799    {
1800      try_region = gen_eh_region_try (state->cur_region);
1801      this_state.cur_region = try_region;
1802    }
1803
1804  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1805
1806  if (!eh_region_may_contain_throw (try_region))
1807    return gimple_try_eval (tp);
1808
1809  new_seq = NULL;
1810  emit_eh_dispatch (&new_seq, try_region);
1811  emit_resx (&new_seq, try_region);
1812
1813  this_state.cur_region = state->cur_region;
1814  this_state.ehp_region = try_region;
1815
1816  /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1817     itself, so that e.g. for coverage purposes the nested cleanups don't
1818     appear before the cleanup body.  See PR64634 for details.  */
1819  gimple_seq old_eh_seq = eh_seq;
1820  eh_seq = NULL;
1821
1822  out_label = NULL;
1823  cleanup = gimple_try_cleanup (tp);
1824  for (gsi = gsi_start (cleanup);
1825       !gsi_end_p (gsi);
1826       gsi_next (&gsi))
1827    {
1828      eh_catch c;
1829      gcatch *catch_stmt;
1830      gimple_seq handler;
1831
1832      catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1833      c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1834
1835      handler = gimple_catch_handler (catch_stmt);
1836      lower_eh_constructs_1 (&this_state, &handler);
1837
1838      c->label = create_artificial_label (UNKNOWN_LOCATION);
1839      x = gimple_build_label (c->label);
1840      gimple_seq_add_stmt (&new_seq, x);
1841
1842      gimple_seq_add_seq (&new_seq, handler);
1843
1844      if (gimple_seq_may_fallthru (new_seq))
1845	{
1846	  if (!out_label)
1847	    out_label = create_artificial_label (try_catch_loc);
1848
1849	  x = gimple_build_goto (out_label);
1850	  gimple_seq_add_stmt (&new_seq, x);
1851	}
1852      if (!c->type_list)
1853	break;
1854    }
1855
1856  gimple_try_set_cleanup (tp, new_seq);
1857
1858  gimple_seq new_eh_seq = eh_seq;
1859  eh_seq = old_eh_seq;
1860  gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1861  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1862  return ret_seq;
1863}
1864
1865/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1866   GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1867   region trees that record all the magic.  */
1868
1869static gimple_seq
1870lower_eh_filter (struct leh_state *state, gtry *tp)
1871{
1872  struct leh_state this_state = *state;
1873  eh_region this_region = NULL;
1874  gimple inner, x;
1875  gimple_seq new_seq;
1876
1877  inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1878
1879  if (flag_exceptions)
1880    {
1881      this_region = gen_eh_region_allowed (state->cur_region,
1882				           gimple_eh_filter_types (inner));
1883      this_state.cur_region = this_region;
1884    }
1885
1886  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1887
1888  if (!eh_region_may_contain_throw (this_region))
1889    return gimple_try_eval (tp);
1890
1891  new_seq = NULL;
1892  this_state.cur_region = state->cur_region;
1893  this_state.ehp_region = this_region;
1894
1895  emit_eh_dispatch (&new_seq, this_region);
1896  emit_resx (&new_seq, this_region);
1897
1898  this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1899  x = gimple_build_label (this_region->u.allowed.label);
1900  gimple_seq_add_stmt (&new_seq, x);
1901
1902  lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1903  gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1904
1905  gimple_try_set_cleanup (tp, new_seq);
1906
1907  return frob_into_branch_around (tp, this_region, NULL);
1908}
1909
1910/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1911   an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1912   plus the exception region trees that record all the magic.  */
1913
1914static gimple_seq
1915lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1916{
1917  struct leh_state this_state = *state;
1918
1919  if (flag_exceptions)
1920    {
1921      gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1922      eh_region this_region;
1923
1924      this_region = gen_eh_region_must_not_throw (state->cur_region);
1925      this_region->u.must_not_throw.failure_decl
1926	= gimple_eh_must_not_throw_fndecl (
1927	    as_a <geh_mnt *> (inner));
1928      this_region->u.must_not_throw.failure_loc
1929	= LOCATION_LOCUS (gimple_location (tp));
1930
1931      /* In order to get mangling applied to this decl, we must mark it
1932	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1933	 needs to happen.  */
1934      TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1935
1936      this_state.cur_region = this_region;
1937    }
1938
1939  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1940
1941  return gimple_try_eval (tp);
1942}
1943
1944/* Implement a cleanup expression.  This is similar to try-finally,
1945   except that we only execute the cleanup block for exception edges.  */
1946
1947static gimple_seq
1948lower_cleanup (struct leh_state *state, gtry *tp)
1949{
1950  struct leh_state this_state = *state;
1951  eh_region this_region = NULL;
1952  struct leh_tf_state fake_tf;
1953  gimple_seq result;
1954  bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1955
1956  if (flag_exceptions && !cleanup_dead)
1957    {
1958      this_region = gen_eh_region_cleanup (state->cur_region);
1959      this_state.cur_region = this_region;
1960    }
1961
1962  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1963
1964  if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1965    return gimple_try_eval (tp);
1966
1967  /* Build enough of a try-finally state so that we can reuse
1968     honor_protect_cleanup_actions.  */
1969  memset (&fake_tf, 0, sizeof (fake_tf));
1970  fake_tf.top_p = fake_tf.try_finally_expr = tp;
1971  fake_tf.outer = state;
1972  fake_tf.region = this_region;
1973  fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1974  fake_tf.may_throw = true;
1975
1976  honor_protect_cleanup_actions (state, NULL, &fake_tf);
1977
1978  if (fake_tf.may_throw)
1979    {
1980      /* In this case honor_protect_cleanup_actions had nothing to do,
1981	 and we should process this normally.  */
1982      lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1983      result = frob_into_branch_around (tp, this_region,
1984                                        fake_tf.fallthru_label);
1985    }
1986  else
1987    {
1988      /* In this case honor_protect_cleanup_actions did nearly all of
1989	 the work.  All we have left is to append the fallthru_label.  */
1990
1991      result = gimple_try_eval (tp);
1992      if (fake_tf.fallthru_label)
1993	{
1994	  gimple x = gimple_build_label (fake_tf.fallthru_label);
1995	  gimple_seq_add_stmt (&result, x);
1996	}
1997    }
1998  return result;
1999}
2000
2001/* Main loop for lowering eh constructs. Also moves gsi to the next
2002   statement. */
2003
2004static void
2005lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
2006{
2007  gimple_seq replace;
2008  gimple x;
2009  gimple stmt = gsi_stmt (*gsi);
2010
2011  switch (gimple_code (stmt))
2012    {
2013    case GIMPLE_CALL:
2014      {
2015	tree fndecl = gimple_call_fndecl (stmt);
2016	tree rhs, lhs;
2017
2018	if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2019	  switch (DECL_FUNCTION_CODE (fndecl))
2020	    {
2021	    case BUILT_IN_EH_POINTER:
2022	      /* The front end may have generated a call to
2023		 __builtin_eh_pointer (0) within a catch region.  Replace
2024		 this zero argument with the current catch region number.  */
2025	      if (state->ehp_region)
2026		{
2027		  tree nr = build_int_cst (integer_type_node,
2028					   state->ehp_region->index);
2029		  gimple_call_set_arg (stmt, 0, nr);
2030		}
2031	      else
2032		{
2033		  /* The user has dome something silly.  Remove it.  */
2034		  rhs = null_pointer_node;
2035		  goto do_replace;
2036		}
2037	      break;
2038
2039	    case BUILT_IN_EH_FILTER:
2040	      /* ??? This should never appear, but since it's a builtin it
2041		 is accessible to abuse by users.  Just remove it and
2042		 replace the use with the arbitrary value zero.  */
2043	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2044	    do_replace:
2045	      lhs = gimple_call_lhs (stmt);
2046	      x = gimple_build_assign (lhs, rhs);
2047	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2048	      /* FALLTHRU */
2049
2050	    case BUILT_IN_EH_COPY_VALUES:
2051	      /* Likewise this should not appear.  Remove it.  */
2052	      gsi_remove (gsi, true);
2053	      return;
2054
2055	    default:
2056	      break;
2057	    }
2058      }
2059      /* FALLTHRU */
2060
2061    case GIMPLE_ASSIGN:
2062      /* If the stmt can throw use a new temporary for the assignment
2063         to a LHS.  This makes sure the old value of the LHS is
2064	 available on the EH edge.  Only do so for statements that
2065	 potentially fall through (no noreturn calls e.g.), otherwise
2066	 this new assignment might create fake fallthru regions.  */
2067      if (stmt_could_throw_p (stmt)
2068	  && gimple_has_lhs (stmt)
2069	  && gimple_stmt_may_fallthru (stmt)
2070	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2071	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2072	{
2073	  tree lhs = gimple_get_lhs (stmt);
2074	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2075	  gimple s = gimple_build_assign (lhs, tmp);
2076	  gimple_set_location (s, gimple_location (stmt));
2077	  gimple_set_block (s, gimple_block (stmt));
2078	  gimple_set_lhs (stmt, tmp);
2079	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2080	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2081	    DECL_GIMPLE_REG_P (tmp) = 1;
2082	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2083	}
2084      /* Look for things that can throw exceptions, and record them.  */
2085      if (state->cur_region && stmt_could_throw_p (stmt))
2086	{
2087	  record_stmt_eh_region (state->cur_region, stmt);
2088	  note_eh_region_may_contain_throw (state->cur_region);
2089	}
2090      break;
2091
2092    case GIMPLE_COND:
2093    case GIMPLE_GOTO:
2094    case GIMPLE_RETURN:
2095      maybe_record_in_goto_queue (state, stmt);
2096      break;
2097
2098    case GIMPLE_SWITCH:
2099      verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2100      break;
2101
2102    case GIMPLE_TRY:
2103      {
2104	gtry *try_stmt = as_a <gtry *> (stmt);
2105	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2106	  replace = lower_try_finally (state, try_stmt);
2107	else
2108	  {
2109	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2110	    if (!x)
2111	      {
2112		replace = gimple_try_eval (try_stmt);
2113		lower_eh_constructs_1 (state, &replace);
2114	      }
2115	    else
2116	      switch (gimple_code (x))
2117		{
2118		case GIMPLE_CATCH:
2119		  replace = lower_catch (state, try_stmt);
2120		  break;
2121		case GIMPLE_EH_FILTER:
2122		  replace = lower_eh_filter (state, try_stmt);
2123		  break;
2124		case GIMPLE_EH_MUST_NOT_THROW:
2125		  replace = lower_eh_must_not_throw (state, try_stmt);
2126		  break;
2127		case GIMPLE_EH_ELSE:
2128		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2129		  gcc_unreachable ();
2130		default:
2131		  replace = lower_cleanup (state, try_stmt);
2132		  break;
2133		}
2134	  }
2135      }
2136
2137      /* Remove the old stmt and insert the transformed sequence
2138	 instead. */
2139      gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2140      gsi_remove (gsi, true);
2141
2142      /* Return since we don't want gsi_next () */
2143      return;
2144
2145    case GIMPLE_EH_ELSE:
2146      /* We should be eliminating this in lower_try_finally et al.  */
2147      gcc_unreachable ();
2148
2149    default:
2150      /* A type, a decl, or some kind of statement that we're not
2151	 interested in.  Don't walk them.  */
2152      break;
2153    }
2154
2155  gsi_next (gsi);
2156}
2157
2158/* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2159
2160static void
2161lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2162{
2163  gimple_stmt_iterator gsi;
2164  for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2165    lower_eh_constructs_2 (state, &gsi);
2166}
2167
2168namespace {
2169
2170const pass_data pass_data_lower_eh =
2171{
2172  GIMPLE_PASS, /* type */
2173  "eh", /* name */
2174  OPTGROUP_NONE, /* optinfo_flags */
2175  TV_TREE_EH, /* tv_id */
2176  PROP_gimple_lcf, /* properties_required */
2177  PROP_gimple_leh, /* properties_provided */
2178  0, /* properties_destroyed */
2179  0, /* todo_flags_start */
2180  0, /* todo_flags_finish */
2181};
2182
2183class pass_lower_eh : public gimple_opt_pass
2184{
2185public:
2186  pass_lower_eh (gcc::context *ctxt)
2187    : gimple_opt_pass (pass_data_lower_eh, ctxt)
2188  {}
2189
2190  /* opt_pass methods: */
2191  virtual unsigned int execute (function *);
2192
2193}; // class pass_lower_eh
2194
2195unsigned int
2196pass_lower_eh::execute (function *fun)
2197{
2198  struct leh_state null_state;
2199  gimple_seq bodyp;
2200
2201  bodyp = gimple_body (current_function_decl);
2202  if (bodyp == NULL)
2203    return 0;
2204
2205  finally_tree = new hash_table<finally_tree_hasher> (31);
2206  eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2207  memset (&null_state, 0, sizeof (null_state));
2208
2209  collect_finally_tree_1 (bodyp, NULL);
2210  lower_eh_constructs_1 (&null_state, &bodyp);
2211  gimple_set_body (current_function_decl, bodyp);
2212
2213  /* We assume there's a return statement, or something, at the end of
2214     the function, and thus ploping the EH sequence afterward won't
2215     change anything.  */
2216  gcc_assert (!gimple_seq_may_fallthru (bodyp));
2217  gimple_seq_add_seq (&bodyp, eh_seq);
2218
2219  /* We assume that since BODYP already existed, adding EH_SEQ to it
2220     didn't change its value, and we don't have to re-set the function.  */
2221  gcc_assert (bodyp == gimple_body (current_function_decl));
2222
2223  delete finally_tree;
2224  finally_tree = NULL;
2225  BITMAP_FREE (eh_region_may_contain_throw_map);
2226  eh_seq = NULL;
2227
2228  /* If this function needs a language specific EH personality routine
2229     and the frontend didn't already set one do so now.  */
2230  if (function_needs_eh_personality (fun) == eh_personality_lang
2231      && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2232    DECL_FUNCTION_PERSONALITY (current_function_decl)
2233      = lang_hooks.eh_personality ();
2234
2235  return 0;
2236}
2237
2238} // anon namespace
2239
2240gimple_opt_pass *
2241make_pass_lower_eh (gcc::context *ctxt)
2242{
2243  return new pass_lower_eh (ctxt);
2244}
2245
2246/* Create the multiple edges from an EH_DISPATCH statement to all of
2247   the possible handlers for its EH region.  Return true if there's
2248   no fallthru edge; false if there is.  */
2249
2250bool
2251make_eh_dispatch_edges (geh_dispatch *stmt)
2252{
2253  eh_region r;
2254  eh_catch c;
2255  basic_block src, dst;
2256
2257  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2258  src = gimple_bb (stmt);
2259
2260  switch (r->type)
2261    {
2262    case ERT_TRY:
2263      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2264	{
2265	  dst = label_to_block (c->label);
2266	  make_edge (src, dst, 0);
2267
2268	  /* A catch-all handler doesn't have a fallthru.  */
2269	  if (c->type_list == NULL)
2270	    return false;
2271	}
2272      break;
2273
2274    case ERT_ALLOWED_EXCEPTIONS:
2275      dst = label_to_block (r->u.allowed.label);
2276      make_edge (src, dst, 0);
2277      break;
2278
2279    default:
2280      gcc_unreachable ();
2281    }
2282
2283  return true;
2284}
2285
2286/* Create the single EH edge from STMT to its nearest landing pad,
2287   if there is such a landing pad within the current function.  */
2288
2289void
2290make_eh_edges (gimple stmt)
2291{
2292  basic_block src, dst;
2293  eh_landing_pad lp;
2294  int lp_nr;
2295
2296  lp_nr = lookup_stmt_eh_lp (stmt);
2297  if (lp_nr <= 0)
2298    return;
2299
2300  lp = get_eh_landing_pad_from_number (lp_nr);
2301  gcc_assert (lp != NULL);
2302
2303  src = gimple_bb (stmt);
2304  dst = label_to_block (lp->post_landing_pad);
2305  make_edge (src, dst, EDGE_EH);
2306}
2307
2308/* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2309   do not actually perform the final edge redirection.
2310
2311   CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2312   we intend to change the destination EH region as well; this means
2313   EH_LANDING_PAD_NR must already be set on the destination block label.
2314   If false, we're being called from generic cfg manipulation code and we
2315   should preserve our place within the region tree.  */
2316
2317static void
2318redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2319{
2320  eh_landing_pad old_lp, new_lp;
2321  basic_block old_bb;
2322  gimple throw_stmt;
2323  int old_lp_nr, new_lp_nr;
2324  tree old_label, new_label;
2325  edge_iterator ei;
2326  edge e;
2327
2328  old_bb = edge_in->dest;
2329  old_label = gimple_block_label (old_bb);
2330  old_lp_nr = EH_LANDING_PAD_NR (old_label);
2331  gcc_assert (old_lp_nr > 0);
2332  old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2333
2334  throw_stmt = last_stmt (edge_in->src);
2335  gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2336
2337  new_label = gimple_block_label (new_bb);
2338
2339  /* Look for an existing region that might be using NEW_BB already.  */
2340  new_lp_nr = EH_LANDING_PAD_NR (new_label);
2341  if (new_lp_nr)
2342    {
2343      new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2344      gcc_assert (new_lp);
2345
2346      /* Unless CHANGE_REGION is true, the new and old landing pad
2347	 had better be associated with the same EH region.  */
2348      gcc_assert (change_region || new_lp->region == old_lp->region);
2349    }
2350  else
2351    {
2352      new_lp = NULL;
2353      gcc_assert (!change_region);
2354    }
2355
2356  /* Notice when we redirect the last EH edge away from OLD_BB.  */
2357  FOR_EACH_EDGE (e, ei, old_bb->preds)
2358    if (e != edge_in && (e->flags & EDGE_EH))
2359      break;
2360
2361  if (new_lp)
2362    {
2363      /* NEW_LP already exists.  If there are still edges into OLD_LP,
2364	 there's nothing to do with the EH tree.  If there are no more
2365	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2366	 If CHANGE_REGION is true, then our caller is expecting to remove
2367	 the landing pad.  */
2368      if (e == NULL && !change_region)
2369	remove_eh_landing_pad (old_lp);
2370    }
2371  else
2372    {
2373      /* No correct landing pad exists.  If there are no more edges
2374	 into OLD_LP, then we can simply re-use the existing landing pad.
2375	 Otherwise, we have to create a new landing pad.  */
2376      if (e == NULL)
2377	{
2378	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2379	  new_lp = old_lp;
2380	}
2381      else
2382	new_lp = gen_eh_landing_pad (old_lp->region);
2383      new_lp->post_landing_pad = new_label;
2384      EH_LANDING_PAD_NR (new_label) = new_lp->index;
2385    }
2386
2387  /* Maybe move the throwing statement to the new region.  */
2388  if (old_lp != new_lp)
2389    {
2390      remove_stmt_from_eh_lp (throw_stmt);
2391      add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2392    }
2393}
2394
2395/* Redirect EH edge E to NEW_BB.  */
2396
2397edge
2398redirect_eh_edge (edge edge_in, basic_block new_bb)
2399{
2400  redirect_eh_edge_1 (edge_in, new_bb, false);
2401  return ssa_redirect_edge (edge_in, new_bb);
2402}
2403
2404/* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2405   labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2406   The actual edge update will happen in the caller.  */
2407
2408void
2409redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2410{
2411  tree new_lab = gimple_block_label (new_bb);
2412  bool any_changed = false;
2413  basic_block old_bb;
2414  eh_region r;
2415  eh_catch c;
2416
2417  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2418  switch (r->type)
2419    {
2420    case ERT_TRY:
2421      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2422	{
2423	  old_bb = label_to_block (c->label);
2424	  if (old_bb == e->dest)
2425	    {
2426	      c->label = new_lab;
2427	      any_changed = true;
2428	    }
2429	}
2430      break;
2431
2432    case ERT_ALLOWED_EXCEPTIONS:
2433      old_bb = label_to_block (r->u.allowed.label);
2434      gcc_assert (old_bb == e->dest);
2435      r->u.allowed.label = new_lab;
2436      any_changed = true;
2437      break;
2438
2439    default:
2440      gcc_unreachable ();
2441    }
2442
2443  gcc_assert (any_changed);
2444}
2445
2446/* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2447
2448bool
2449operation_could_trap_helper_p (enum tree_code op,
2450			       bool fp_operation,
2451			       bool honor_trapv,
2452			       bool honor_nans,
2453			       bool honor_snans,
2454			       tree divisor,
2455			       bool *handled)
2456{
2457  *handled = true;
2458  switch (op)
2459    {
2460    case TRUNC_DIV_EXPR:
2461    case CEIL_DIV_EXPR:
2462    case FLOOR_DIV_EXPR:
2463    case ROUND_DIV_EXPR:
2464    case EXACT_DIV_EXPR:
2465    case CEIL_MOD_EXPR:
2466    case FLOOR_MOD_EXPR:
2467    case ROUND_MOD_EXPR:
2468    case TRUNC_MOD_EXPR:
2469    case RDIV_EXPR:
2470      if (honor_snans || honor_trapv)
2471	return true;
2472      if (fp_operation)
2473	return flag_trapping_math;
2474      if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2475        return true;
2476      return false;
2477
2478    case LT_EXPR:
2479    case LE_EXPR:
2480    case GT_EXPR:
2481    case GE_EXPR:
2482    case LTGT_EXPR:
2483      /* Some floating point comparisons may trap.  */
2484      return honor_nans;
2485
2486    case EQ_EXPR:
2487    case NE_EXPR:
2488    case UNORDERED_EXPR:
2489    case ORDERED_EXPR:
2490    case UNLT_EXPR:
2491    case UNLE_EXPR:
2492    case UNGT_EXPR:
2493    case UNGE_EXPR:
2494    case UNEQ_EXPR:
2495      return honor_snans;
2496
2497    case NEGATE_EXPR:
2498    case ABS_EXPR:
2499    case CONJ_EXPR:
2500      /* These operations don't trap with floating point.  */
2501      if (honor_trapv)
2502	return true;
2503      return false;
2504
2505    case PLUS_EXPR:
2506    case MINUS_EXPR:
2507    case MULT_EXPR:
2508      /* Any floating arithmetic may trap.  */
2509      if (fp_operation && flag_trapping_math)
2510	return true;
2511      if (honor_trapv)
2512	return true;
2513      return false;
2514
2515    case COMPLEX_EXPR:
2516    case CONSTRUCTOR:
2517      /* Constructing an object cannot trap.  */
2518      return false;
2519
2520    default:
2521      /* Any floating arithmetic may trap.  */
2522      if (fp_operation && flag_trapping_math)
2523	return true;
2524
2525      *handled = false;
2526      return false;
2527    }
2528}
2529
2530/* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2531   on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2532   type operands that may trap.  If OP is a division operator, DIVISOR contains
2533   the value of the divisor.  */
2534
2535bool
2536operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2537			tree divisor)
2538{
2539  bool honor_nans = (fp_operation && flag_trapping_math
2540		     && !flag_finite_math_only);
2541  bool honor_snans = fp_operation && flag_signaling_nans != 0;
2542  bool handled;
2543
2544  if (TREE_CODE_CLASS (op) != tcc_comparison
2545      && TREE_CODE_CLASS (op) != tcc_unary
2546      && TREE_CODE_CLASS (op) != tcc_binary
2547      && op != FMA_EXPR)
2548    return false;
2549
2550  return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2551					honor_nans, honor_snans, divisor,
2552					&handled);
2553}
2554
2555
2556/* Returns true if it is possible to prove that the index of
2557   an array access REF (an ARRAY_REF expression) falls into the
2558   array bounds.  */
2559
2560static bool
2561in_array_bounds_p (tree ref)
2562{
2563  tree idx = TREE_OPERAND (ref, 1);
2564  tree min, max;
2565
2566  if (TREE_CODE (idx) != INTEGER_CST)
2567    return false;
2568
2569  min = array_ref_low_bound (ref);
2570  max = array_ref_up_bound (ref);
2571  if (!min
2572      || !max
2573      || TREE_CODE (min) != INTEGER_CST
2574      || TREE_CODE (max) != INTEGER_CST)
2575    return false;
2576
2577  if (tree_int_cst_lt (idx, min)
2578      || tree_int_cst_lt (max, idx))
2579    return false;
2580
2581  return true;
2582}
2583
2584/* Returns true if it is possible to prove that the range of
2585   an array access REF (an ARRAY_RANGE_REF expression) falls
2586   into the array bounds.  */
2587
2588static bool
2589range_in_array_bounds_p (tree ref)
2590{
2591  tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2592  tree range_min, range_max, min, max;
2593
2594  range_min = TYPE_MIN_VALUE (domain_type);
2595  range_max = TYPE_MAX_VALUE (domain_type);
2596  if (!range_min
2597      || !range_max
2598      || TREE_CODE (range_min) != INTEGER_CST
2599      || TREE_CODE (range_max) != INTEGER_CST)
2600    return false;
2601
2602  min = array_ref_low_bound (ref);
2603  max = array_ref_up_bound (ref);
2604  if (!min
2605      || !max
2606      || TREE_CODE (min) != INTEGER_CST
2607      || TREE_CODE (max) != INTEGER_CST)
2608    return false;
2609
2610  if (tree_int_cst_lt (range_min, min)
2611      || tree_int_cst_lt (max, range_max))
2612    return false;
2613
2614  return true;
2615}
2616
2617/* Return true if EXPR can trap, as in dereferencing an invalid pointer
2618   location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2619   This routine expects only GIMPLE lhs or rhs input.  */
2620
2621bool
2622tree_could_trap_p (tree expr)
2623{
2624  enum tree_code code;
2625  bool fp_operation = false;
2626  bool honor_trapv = false;
2627  tree t, base, div = NULL_TREE;
2628
2629  if (!expr)
2630    return false;
2631
2632  code = TREE_CODE (expr);
2633  t = TREE_TYPE (expr);
2634
2635  if (t)
2636    {
2637      if (COMPARISON_CLASS_P (expr))
2638	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2639      else
2640	fp_operation = FLOAT_TYPE_P (t);
2641      honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2642    }
2643
2644  if (TREE_CODE_CLASS (code) == tcc_binary)
2645    div = TREE_OPERAND (expr, 1);
2646  if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2647    return true;
2648
2649 restart:
2650  switch (code)
2651    {
2652    case COMPONENT_REF:
2653    case REALPART_EXPR:
2654    case IMAGPART_EXPR:
2655    case BIT_FIELD_REF:
2656    case VIEW_CONVERT_EXPR:
2657    case WITH_SIZE_EXPR:
2658      expr = TREE_OPERAND (expr, 0);
2659      code = TREE_CODE (expr);
2660      goto restart;
2661
2662    case ARRAY_RANGE_REF:
2663      base = TREE_OPERAND (expr, 0);
2664      if (tree_could_trap_p (base))
2665	return true;
2666      if (TREE_THIS_NOTRAP (expr))
2667	return false;
2668      return !range_in_array_bounds_p (expr);
2669
2670    case ARRAY_REF:
2671      base = TREE_OPERAND (expr, 0);
2672      if (tree_could_trap_p (base))
2673	return true;
2674      if (TREE_THIS_NOTRAP (expr))
2675	return false;
2676      return !in_array_bounds_p (expr);
2677
2678    case TARGET_MEM_REF:
2679    case MEM_REF:
2680      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2681	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2682	return true;
2683      if (TREE_THIS_NOTRAP (expr))
2684	return false;
2685      /* We cannot prove that the access is in-bounds when we have
2686         variable-index TARGET_MEM_REFs.  */
2687      if (code == TARGET_MEM_REF
2688	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2689	return true;
2690      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2691	{
2692	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2693	  offset_int off = mem_ref_offset (expr);
2694	  if (wi::neg_p (off, SIGNED))
2695	    return true;
2696	  if (TREE_CODE (base) == STRING_CST)
2697	    return wi::leu_p (TREE_STRING_LENGTH (base), off);
2698	  else if (DECL_SIZE_UNIT (base) == NULL_TREE
2699		   || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2700		   || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2701	    return true;
2702	  /* Now we are sure the first byte of the access is inside
2703	     the object.  */
2704	  return false;
2705	}
2706      return true;
2707
2708    case INDIRECT_REF:
2709      return !TREE_THIS_NOTRAP (expr);
2710
2711    case ASM_EXPR:
2712      return TREE_THIS_VOLATILE (expr);
2713
2714    case CALL_EXPR:
2715      t = get_callee_fndecl (expr);
2716      /* Assume that calls to weak functions may trap.  */
2717      if (!t || !DECL_P (t))
2718	return true;
2719      if (DECL_WEAK (t))
2720	return tree_could_trap_p (t);
2721      return false;
2722
2723    case FUNCTION_DECL:
2724      /* Assume that accesses to weak functions may trap, unless we know
2725	 they are certainly defined in current TU or in some other
2726	 LTO partition.  */
2727      if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2728	{
2729	  cgraph_node *node = cgraph_node::get (expr);
2730	  if (node)
2731	    node = node->function_symbol ();
2732	  return !(node && node->in_other_partition);
2733	}
2734      return false;
2735
2736    case VAR_DECL:
2737      /* Assume that accesses to weak vars may trap, unless we know
2738	 they are certainly defined in current TU or in some other
2739	 LTO partition.  */
2740      if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2741	{
2742	  varpool_node *node = varpool_node::get (expr);
2743	  if (node)
2744	    node = node->ultimate_alias_target ();
2745	  return !(node && node->in_other_partition);
2746	}
2747      return false;
2748
2749    default:
2750      return false;
2751    }
2752}
2753
2754
2755/* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2756   an assignment or a conditional) may throw.  */
2757
2758static bool
2759stmt_could_throw_1_p (gimple stmt)
2760{
2761  enum tree_code code = gimple_expr_code (stmt);
2762  bool honor_nans = false;
2763  bool honor_snans = false;
2764  bool fp_operation = false;
2765  bool honor_trapv = false;
2766  tree t;
2767  size_t i;
2768  bool handled, ret;
2769
2770  if (TREE_CODE_CLASS (code) == tcc_comparison
2771      || TREE_CODE_CLASS (code) == tcc_unary
2772      || TREE_CODE_CLASS (code) == tcc_binary
2773      || code == FMA_EXPR)
2774    {
2775      if (is_gimple_assign (stmt)
2776	  && TREE_CODE_CLASS (code) == tcc_comparison)
2777	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2778      else if (gimple_code (stmt) == GIMPLE_COND)
2779	t = TREE_TYPE (gimple_cond_lhs (stmt));
2780      else
2781	t = gimple_expr_type (stmt);
2782      fp_operation = FLOAT_TYPE_P (t);
2783      if (fp_operation)
2784	{
2785	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2786	  honor_snans = flag_signaling_nans != 0;
2787	}
2788      else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2789	honor_trapv = true;
2790    }
2791
2792  /* Check if the main expression may trap.  */
2793  t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2794  ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2795				       honor_nans, honor_snans, t,
2796				       &handled);
2797  if (handled)
2798    return ret;
2799
2800  /* If the expression does not trap, see if any of the individual operands may
2801     trap.  */
2802  for (i = 0; i < gimple_num_ops (stmt); i++)
2803    if (tree_could_trap_p (gimple_op (stmt, i)))
2804      return true;
2805
2806  return false;
2807}
2808
2809
2810/* Return true if statement STMT could throw an exception.  */
2811
2812bool
2813stmt_could_throw_p (gimple stmt)
2814{
2815  if (!flag_exceptions)
2816    return false;
2817
2818  /* The only statements that can throw an exception are assignments,
2819     conditionals, calls, resx, and asms.  */
2820  switch (gimple_code (stmt))
2821    {
2822    case GIMPLE_RESX:
2823      return true;
2824
2825    case GIMPLE_CALL:
2826      return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2827
2828    case GIMPLE_ASSIGN:
2829    case GIMPLE_COND:
2830      if (!cfun->can_throw_non_call_exceptions)
2831        return false;
2832      return stmt_could_throw_1_p (stmt);
2833
2834    case GIMPLE_ASM:
2835      if (!cfun->can_throw_non_call_exceptions)
2836        return false;
2837      return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2838
2839    default:
2840      return false;
2841    }
2842}
2843
2844
2845/* Return true if expression T could throw an exception.  */
2846
2847bool
2848tree_could_throw_p (tree t)
2849{
2850  if (!flag_exceptions)
2851    return false;
2852  if (TREE_CODE (t) == MODIFY_EXPR)
2853    {
2854      if (cfun->can_throw_non_call_exceptions
2855          && tree_could_trap_p (TREE_OPERAND (t, 0)))
2856        return true;
2857      t = TREE_OPERAND (t, 1);
2858    }
2859
2860  if (TREE_CODE (t) == WITH_SIZE_EXPR)
2861    t = TREE_OPERAND (t, 0);
2862  if (TREE_CODE (t) == CALL_EXPR)
2863    return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2864  if (cfun->can_throw_non_call_exceptions)
2865    return tree_could_trap_p (t);
2866  return false;
2867}
2868
2869/* Return true if STMT can throw an exception that is not caught within
2870   the current function (CFUN).  */
2871
2872bool
2873stmt_can_throw_external (gimple stmt)
2874{
2875  int lp_nr;
2876
2877  if (!stmt_could_throw_p (stmt))
2878    return false;
2879
2880  lp_nr = lookup_stmt_eh_lp (stmt);
2881  return lp_nr == 0;
2882}
2883
2884/* Return true if STMT can throw an exception that is caught within
2885   the current function (CFUN).  */
2886
2887bool
2888stmt_can_throw_internal (gimple stmt)
2889{
2890  int lp_nr;
2891
2892  if (!stmt_could_throw_p (stmt))
2893    return false;
2894
2895  lp_nr = lookup_stmt_eh_lp (stmt);
2896  return lp_nr > 0;
2897}
2898
2899/* Given a statement STMT in IFUN, if STMT can no longer throw, then
2900   remove any entry it might have from the EH table.  Return true if
2901   any change was made.  */
2902
2903bool
2904maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2905{
2906  if (stmt_could_throw_p (stmt))
2907    return false;
2908  return remove_stmt_from_eh_lp_fn (ifun, stmt);
2909}
2910
2911/* Likewise, but always use the current function.  */
2912
2913bool
2914maybe_clean_eh_stmt (gimple stmt)
2915{
2916  return maybe_clean_eh_stmt_fn (cfun, stmt);
2917}
2918
2919/* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2920   OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2921   in the table if it should be in there.  Return TRUE if a replacement was
2922   done that my require an EH edge purge.  */
2923
2924bool
2925maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2926{
2927  int lp_nr = lookup_stmt_eh_lp (old_stmt);
2928
2929  if (lp_nr != 0)
2930    {
2931      bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2932
2933      if (new_stmt == old_stmt && new_stmt_could_throw)
2934	return false;
2935
2936      remove_stmt_from_eh_lp (old_stmt);
2937      if (new_stmt_could_throw)
2938	{
2939	  add_stmt_to_eh_lp (new_stmt, lp_nr);
2940	  return false;
2941	}
2942      else
2943	return true;
2944    }
2945
2946  return false;
2947}
2948
2949/* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2950   in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
2951   operand is the return value of duplicate_eh_regions.  */
2952
2953bool
2954maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2955			    struct function *old_fun, gimple old_stmt,
2956			    hash_map<void *, void *> *map,
2957			    int default_lp_nr)
2958{
2959  int old_lp_nr, new_lp_nr;
2960
2961  if (!stmt_could_throw_p (new_stmt))
2962    return false;
2963
2964  old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2965  if (old_lp_nr == 0)
2966    {
2967      if (default_lp_nr == 0)
2968	return false;
2969      new_lp_nr = default_lp_nr;
2970    }
2971  else if (old_lp_nr > 0)
2972    {
2973      eh_landing_pad old_lp, new_lp;
2974
2975      old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2976      new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2977      new_lp_nr = new_lp->index;
2978    }
2979  else
2980    {
2981      eh_region old_r, new_r;
2982
2983      old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2984      new_r = static_cast<eh_region> (*map->get (old_r));
2985      new_lp_nr = -new_r->index;
2986    }
2987
2988  add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2989  return true;
2990}
2991
2992/* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2993   and thus no remapping is required.  */
2994
2995bool
2996maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2997{
2998  int lp_nr;
2999
3000  if (!stmt_could_throw_p (new_stmt))
3001    return false;
3002
3003  lp_nr = lookup_stmt_eh_lp (old_stmt);
3004  if (lp_nr == 0)
3005    return false;
3006
3007  add_stmt_to_eh_lp (new_stmt, lp_nr);
3008  return true;
3009}
3010
3011/* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3012   GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3013   this only handles handlers consisting of a single call, as that's the
3014   important case for C++: a destructor call for a particular object showing
3015   up in multiple handlers.  */
3016
3017static bool
3018same_handler_p (gimple_seq oneh, gimple_seq twoh)
3019{
3020  gimple_stmt_iterator gsi;
3021  gimple ones, twos;
3022  unsigned int ai;
3023
3024  gsi = gsi_start (oneh);
3025  if (!gsi_one_before_end_p (gsi))
3026    return false;
3027  ones = gsi_stmt (gsi);
3028
3029  gsi = gsi_start (twoh);
3030  if (!gsi_one_before_end_p (gsi))
3031    return false;
3032  twos = gsi_stmt (gsi);
3033
3034  if (!is_gimple_call (ones)
3035      || !is_gimple_call (twos)
3036      || gimple_call_lhs (ones)
3037      || gimple_call_lhs (twos)
3038      || gimple_call_chain (ones)
3039      || gimple_call_chain (twos)
3040      || !gimple_call_same_target_p (ones, twos)
3041      || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3042    return false;
3043
3044  for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3045    if (!operand_equal_p (gimple_call_arg (ones, ai),
3046                          gimple_call_arg (twos, ai), 0))
3047      return false;
3048
3049  return true;
3050}
3051
3052/* Optimize
3053    try { A() } finally { try { ~B() } catch { ~A() } }
3054    try { ... } finally { ~A() }
3055   into
3056    try { A() } catch { ~B() }
3057    try { ~B() ... } finally { ~A() }
3058
3059   This occurs frequently in C++, where A is a local variable and B is a
3060   temporary used in the initializer for A.  */
3061
3062static void
3063optimize_double_finally (gtry *one, gtry *two)
3064{
3065  gimple oneh;
3066  gimple_stmt_iterator gsi;
3067  gimple_seq cleanup;
3068
3069  cleanup = gimple_try_cleanup (one);
3070  gsi = gsi_start (cleanup);
3071  if (!gsi_one_before_end_p (gsi))
3072    return;
3073
3074  oneh = gsi_stmt (gsi);
3075  if (gimple_code (oneh) != GIMPLE_TRY
3076      || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3077    return;
3078
3079  if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3080    {
3081      gimple_seq seq = gimple_try_eval (oneh);
3082
3083      gimple_try_set_cleanup (one, seq);
3084      gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3085      seq = copy_gimple_seq_and_replace_locals (seq);
3086      gimple_seq_add_seq (&seq, gimple_try_eval (two));
3087      gimple_try_set_eval (two, seq);
3088    }
3089}
3090
3091/* Perform EH refactoring optimizations that are simpler to do when code
3092   flow has been lowered but EH structures haven't.  */
3093
3094static void
3095refactor_eh_r (gimple_seq seq)
3096{
3097  gimple_stmt_iterator gsi;
3098  gimple one, two;
3099
3100  one = NULL;
3101  two = NULL;
3102  gsi = gsi_start (seq);
3103  while (1)
3104    {
3105      one = two;
3106      if (gsi_end_p (gsi))
3107	two = NULL;
3108      else
3109	two = gsi_stmt (gsi);
3110      if (one && two)
3111	if (gtry *try_one = dyn_cast <gtry *> (one))
3112	  if (gtry *try_two = dyn_cast <gtry *> (two))
3113	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3114		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3115	      optimize_double_finally (try_one, try_two);
3116      if (one)
3117	switch (gimple_code (one))
3118	  {
3119	  case GIMPLE_TRY:
3120	    refactor_eh_r (gimple_try_eval (one));
3121	    refactor_eh_r (gimple_try_cleanup (one));
3122	    break;
3123	  case GIMPLE_CATCH:
3124	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3125	    break;
3126	  case GIMPLE_EH_FILTER:
3127	    refactor_eh_r (gimple_eh_filter_failure (one));
3128	    break;
3129	  case GIMPLE_EH_ELSE:
3130	    {
3131	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3132	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3133	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3134	    }
3135	    break;
3136	  default:
3137	    break;
3138	  }
3139      if (two)
3140	gsi_next (&gsi);
3141      else
3142	break;
3143    }
3144}
3145
3146namespace {
3147
3148const pass_data pass_data_refactor_eh =
3149{
3150  GIMPLE_PASS, /* type */
3151  "ehopt", /* name */
3152  OPTGROUP_NONE, /* optinfo_flags */
3153  TV_TREE_EH, /* tv_id */
3154  PROP_gimple_lcf, /* properties_required */
3155  0, /* properties_provided */
3156  0, /* properties_destroyed */
3157  0, /* todo_flags_start */
3158  0, /* todo_flags_finish */
3159};
3160
3161class pass_refactor_eh : public gimple_opt_pass
3162{
3163public:
3164  pass_refactor_eh (gcc::context *ctxt)
3165    : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3166  {}
3167
3168  /* opt_pass methods: */
3169  virtual bool gate (function *) { return flag_exceptions != 0; }
3170  virtual unsigned int execute (function *)
3171    {
3172      refactor_eh_r (gimple_body (current_function_decl));
3173      return 0;
3174    }
3175
3176}; // class pass_refactor_eh
3177
3178} // anon namespace
3179
3180gimple_opt_pass *
3181make_pass_refactor_eh (gcc::context *ctxt)
3182{
3183  return new pass_refactor_eh (ctxt);
3184}
3185
3186/* At the end of gimple optimization, we can lower RESX.  */
3187
3188static bool
3189lower_resx (basic_block bb, gresx *stmt,
3190	    hash_map<eh_region, tree> *mnt_map)
3191{
3192  int lp_nr;
3193  eh_region src_r, dst_r;
3194  gimple_stmt_iterator gsi;
3195  gimple x;
3196  tree fn, src_nr;
3197  bool ret = false;
3198
3199  lp_nr = lookup_stmt_eh_lp (stmt);
3200  if (lp_nr != 0)
3201    dst_r = get_eh_region_from_lp_number (lp_nr);
3202  else
3203    dst_r = NULL;
3204
3205  src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3206  gsi = gsi_last_bb (bb);
3207
3208  if (src_r == NULL)
3209    {
3210      /* We can wind up with no source region when pass_cleanup_eh shows
3211	 that there are no entries into an eh region and deletes it, but
3212	 then the block that contains the resx isn't removed.  This can
3213	 happen without optimization when the switch statement created by
3214	 lower_try_finally_switch isn't simplified to remove the eh case.
3215
3216	 Resolve this by expanding the resx node to an abort.  */
3217
3218      fn = builtin_decl_implicit (BUILT_IN_TRAP);
3219      x = gimple_build_call (fn, 0);
3220      gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3221
3222      while (EDGE_COUNT (bb->succs) > 0)
3223	remove_edge (EDGE_SUCC (bb, 0));
3224    }
3225  else if (dst_r)
3226    {
3227      /* When we have a destination region, we resolve this by copying
3228	 the excptr and filter values into place, and changing the edge
3229	 to immediately after the landing pad.  */
3230      edge e;
3231
3232      if (lp_nr < 0)
3233	{
3234	  basic_block new_bb;
3235	  tree lab;
3236
3237	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3238	     the failure decl into a new block, if needed.  */
3239	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3240
3241	  tree *slot = mnt_map->get (dst_r);
3242	  if (slot == NULL)
3243	    {
3244	      gimple_stmt_iterator gsi2;
3245
3246	      new_bb = create_empty_bb (bb);
3247	      add_bb_to_loop (new_bb, bb->loop_father);
3248	      lab = gimple_block_label (new_bb);
3249	      gsi2 = gsi_start_bb (new_bb);
3250
3251	      fn = dst_r->u.must_not_throw.failure_decl;
3252	      x = gimple_build_call (fn, 0);
3253	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3254	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3255
3256	      mnt_map->put (dst_r, lab);
3257	    }
3258	  else
3259	    {
3260	      lab = *slot;
3261	      new_bb = label_to_block (lab);
3262	    }
3263
3264	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3265	  e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3266	  e->count = bb->count;
3267	  e->probability = REG_BR_PROB_BASE;
3268	}
3269      else
3270	{
3271	  edge_iterator ei;
3272	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3273
3274	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3275	  src_nr = build_int_cst (integer_type_node, src_r->index);
3276	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3277	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3278
3279	  /* Update the flags for the outgoing edge.  */
3280	  e = single_succ_edge (bb);
3281	  gcc_assert (e->flags & EDGE_EH);
3282	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3283
3284	  /* If there are no more EH users of the landing pad, delete it.  */
3285	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3286	    if (e->flags & EDGE_EH)
3287	      break;
3288	  if (e == NULL)
3289	    {
3290	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3291	      remove_eh_landing_pad (lp);
3292	    }
3293	}
3294
3295      ret = true;
3296    }
3297  else
3298    {
3299      tree var;
3300
3301      /* When we don't have a destination region, this exception escapes
3302	 up the call chain.  We resolve this by generating a call to the
3303	 _Unwind_Resume library function.  */
3304
3305      /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3306	 with no arguments for C++ and Java.  Check for that.  */
3307      if (src_r->use_cxa_end_cleanup)
3308	{
3309	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3310	  x = gimple_build_call (fn, 0);
3311	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3312	}
3313      else
3314	{
3315	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3316	  src_nr = build_int_cst (integer_type_node, src_r->index);
3317	  x = gimple_build_call (fn, 1, src_nr);
3318	  var = create_tmp_var (ptr_type_node);
3319	  var = make_ssa_name (var, x);
3320	  gimple_call_set_lhs (x, var);
3321	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3322
3323	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3324	  x = gimple_build_call (fn, 1, var);
3325	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3326	}
3327
3328      gcc_assert (EDGE_COUNT (bb->succs) == 0);
3329    }
3330
3331  gsi_remove (&gsi, true);
3332
3333  return ret;
3334}
3335
3336namespace {
3337
3338const pass_data pass_data_lower_resx =
3339{
3340  GIMPLE_PASS, /* type */
3341  "resx", /* name */
3342  OPTGROUP_NONE, /* optinfo_flags */
3343  TV_TREE_EH, /* tv_id */
3344  PROP_gimple_lcf, /* properties_required */
3345  0, /* properties_provided */
3346  0, /* properties_destroyed */
3347  0, /* todo_flags_start */
3348  0, /* todo_flags_finish */
3349};
3350
3351class pass_lower_resx : public gimple_opt_pass
3352{
3353public:
3354  pass_lower_resx (gcc::context *ctxt)
3355    : gimple_opt_pass (pass_data_lower_resx, ctxt)
3356  {}
3357
3358  /* opt_pass methods: */
3359  virtual bool gate (function *) { return flag_exceptions != 0; }
3360  virtual unsigned int execute (function *);
3361
3362}; // class pass_lower_resx
3363
3364unsigned
3365pass_lower_resx::execute (function *fun)
3366{
3367  basic_block bb;
3368  bool dominance_invalidated = false;
3369  bool any_rewritten = false;
3370
3371  hash_map<eh_region, tree> mnt_map;
3372
3373  FOR_EACH_BB_FN (bb, fun)
3374    {
3375      gimple last = last_stmt (bb);
3376      if (last && is_gimple_resx (last))
3377	{
3378	  dominance_invalidated |=
3379	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3380	  any_rewritten = true;
3381	}
3382    }
3383
3384  if (dominance_invalidated)
3385    {
3386      free_dominance_info (CDI_DOMINATORS);
3387      free_dominance_info (CDI_POST_DOMINATORS);
3388    }
3389
3390  return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3391}
3392
3393} // anon namespace
3394
3395gimple_opt_pass *
3396make_pass_lower_resx (gcc::context *ctxt)
3397{
3398  return new pass_lower_resx (ctxt);
3399}
3400
3401/* Try to optimize var = {v} {CLOBBER} stmts followed just by
3402   external throw.  */
3403
3404static void
3405optimize_clobbers (basic_block bb)
3406{
3407  gimple_stmt_iterator gsi = gsi_last_bb (bb);
3408  bool any_clobbers = false;
3409  bool seen_stack_restore = false;
3410  edge_iterator ei;
3411  edge e;
3412
3413  /* Only optimize anything if the bb contains at least one clobber,
3414     ends with resx (checked by caller), optionally contains some
3415     debug stmts or labels, or at most one __builtin_stack_restore
3416     call, and has an incoming EH edge.  */
3417  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3418    {
3419      gimple stmt = gsi_stmt (gsi);
3420      if (is_gimple_debug (stmt))
3421	continue;
3422      if (gimple_clobber_p (stmt))
3423	{
3424	  any_clobbers = true;
3425	  continue;
3426	}
3427      if (!seen_stack_restore
3428	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3429	{
3430	  seen_stack_restore = true;
3431	  continue;
3432	}
3433      if (gimple_code (stmt) == GIMPLE_LABEL)
3434	break;
3435      return;
3436    }
3437  if (!any_clobbers)
3438    return;
3439  FOR_EACH_EDGE (e, ei, bb->preds)
3440    if (e->flags & EDGE_EH)
3441      break;
3442  if (e == NULL)
3443    return;
3444  gsi = gsi_last_bb (bb);
3445  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3446    {
3447      gimple stmt = gsi_stmt (gsi);
3448      if (!gimple_clobber_p (stmt))
3449	continue;
3450      unlink_stmt_vdef (stmt);
3451      gsi_remove (&gsi, true);
3452      release_defs (stmt);
3453    }
3454}
3455
3456/* Try to sink var = {v} {CLOBBER} stmts followed just by
3457   internal throw to successor BB.  */
3458
3459static int
3460sink_clobbers (basic_block bb)
3461{
3462  edge e;
3463  edge_iterator ei;
3464  gimple_stmt_iterator gsi, dgsi;
3465  basic_block succbb;
3466  bool any_clobbers = false;
3467  unsigned todo = 0;
3468
3469  /* Only optimize if BB has a single EH successor and
3470     all predecessor edges are EH too.  */
3471  if (!single_succ_p (bb)
3472      || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3473    return 0;
3474
3475  FOR_EACH_EDGE (e, ei, bb->preds)
3476    {
3477      if ((e->flags & EDGE_EH) == 0)
3478	return 0;
3479    }
3480
3481  /* And BB contains only CLOBBER stmts before the final
3482     RESX.  */
3483  gsi = gsi_last_bb (bb);
3484  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3485    {
3486      gimple stmt = gsi_stmt (gsi);
3487      if (is_gimple_debug (stmt))
3488	continue;
3489      if (gimple_code (stmt) == GIMPLE_LABEL)
3490	break;
3491      if (!gimple_clobber_p (stmt))
3492	return 0;
3493      any_clobbers = true;
3494    }
3495  if (!any_clobbers)
3496    return 0;
3497
3498  edge succe = single_succ_edge (bb);
3499  succbb = succe->dest;
3500
3501  /* See if there is a virtual PHI node to take an updated virtual
3502     operand from.  */
3503  gphi *vphi = NULL;
3504  tree vuse = NULL_TREE;
3505  for (gphi_iterator gpi = gsi_start_phis (succbb);
3506       !gsi_end_p (gpi); gsi_next (&gpi))
3507    {
3508      tree res = gimple_phi_result (gpi.phi ());
3509      if (virtual_operand_p (res))
3510	{
3511	  vphi = gpi.phi ();
3512	  vuse = res;
3513	  break;
3514	}
3515    }
3516
3517  dgsi = gsi_after_labels (succbb);
3518  gsi = gsi_last_bb (bb);
3519  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3520    {
3521      gimple stmt = gsi_stmt (gsi);
3522      tree lhs;
3523      if (is_gimple_debug (stmt))
3524	continue;
3525      if (gimple_code (stmt) == GIMPLE_LABEL)
3526	break;
3527      lhs = gimple_assign_lhs (stmt);
3528      /* Unfortunately we don't have dominance info updated at this
3529	 point, so checking if
3530	 dominated_by_p (CDI_DOMINATORS, succbb,
3531			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3532	 would be too costly.  Thus, avoid sinking any clobbers that
3533	 refer to non-(D) SSA_NAMEs.  */
3534      if (TREE_CODE (lhs) == MEM_REF
3535	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3536	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3537	{
3538	  unlink_stmt_vdef (stmt);
3539	  gsi_remove (&gsi, true);
3540	  release_defs (stmt);
3541	  continue;
3542	}
3543
3544      /* As we do not change stmt order when sinking across a
3545         forwarder edge we can keep virtual operands in place.  */
3546      gsi_remove (&gsi, false);
3547      gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3548
3549      /* But adjust virtual operands if we sunk across a PHI node.  */
3550      if (vuse)
3551	{
3552	  gimple use_stmt;
3553	  imm_use_iterator iter;
3554	  use_operand_p use_p;
3555	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3556	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3557	      SET_USE (use_p, gimple_vdef (stmt));
3558	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3559	    {
3560	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3561	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3562	    }
3563	  /* Adjust the incoming virtual operand.  */
3564	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3565	  SET_USE (gimple_vuse_op (stmt), vuse);
3566	}
3567      /* If there isn't a single predecessor but no virtual PHI node
3568         arrange for virtual operands to be renamed.  */
3569      else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3570	       && !single_pred_p (succbb))
3571	{
3572	  /* In this case there will be no use of the VDEF of this stmt.
3573	     ???  Unless this is a secondary opportunity and we have not
3574	     removed unreachable blocks yet, so we cannot assert this.
3575	     Which also means we will end up renaming too many times.  */
3576	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3577	  mark_virtual_operands_for_renaming (cfun);
3578	  todo |= TODO_update_ssa_only_virtuals;
3579	}
3580    }
3581
3582  return todo;
3583}
3584
3585/* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3586   we have found some duplicate labels and removed some edges.  */
3587
3588static bool
3589lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3590{
3591  gimple_stmt_iterator gsi;
3592  int region_nr;
3593  eh_region r;
3594  tree filter, fn;
3595  gimple x;
3596  bool redirected = false;
3597
3598  region_nr = gimple_eh_dispatch_region (stmt);
3599  r = get_eh_region_from_number (region_nr);
3600
3601  gsi = gsi_last_bb (src);
3602
3603  switch (r->type)
3604    {
3605    case ERT_TRY:
3606      {
3607	auto_vec<tree> labels;
3608	tree default_label = NULL;
3609	eh_catch c;
3610	edge_iterator ei;
3611	edge e;
3612	hash_set<tree> seen_values;
3613
3614	/* Collect the labels for a switch.  Zero the post_landing_pad
3615	   field becase we'll no longer have anything keeping these labels
3616	   in existence and the optimizer will be free to merge these
3617	   blocks at will.  */
3618	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3619	  {
3620	    tree tp_node, flt_node, lab = c->label;
3621	    bool have_label = false;
3622
3623	    c->label = NULL;
3624	    tp_node = c->type_list;
3625	    flt_node = c->filter_list;
3626
3627	    if (tp_node == NULL)
3628	      {
3629	        default_label = lab;
3630		break;
3631	      }
3632	    do
3633	      {
3634		/* Filter out duplicate labels that arise when this handler
3635		   is shadowed by an earlier one.  When no labels are
3636		   attached to the handler anymore, we remove
3637		   the corresponding edge and then we delete unreachable
3638		   blocks at the end of this pass.  */
3639		if (! seen_values.contains (TREE_VALUE (flt_node)))
3640		  {
3641		    tree t = build_case_label (TREE_VALUE (flt_node),
3642					       NULL, lab);
3643		    labels.safe_push (t);
3644		    seen_values.add (TREE_VALUE (flt_node));
3645		    have_label = true;
3646		  }
3647
3648		tp_node = TREE_CHAIN (tp_node);
3649		flt_node = TREE_CHAIN (flt_node);
3650	      }
3651	    while (tp_node);
3652	    if (! have_label)
3653	      {
3654	        remove_edge (find_edge (src, label_to_block (lab)));
3655	        redirected = true;
3656	      }
3657	  }
3658
3659	/* Clean up the edge flags.  */
3660	FOR_EACH_EDGE (e, ei, src->succs)
3661	  {
3662	    if (e->flags & EDGE_FALLTHRU)
3663	      {
3664		/* If there was no catch-all, use the fallthru edge.  */
3665		if (default_label == NULL)
3666		  default_label = gimple_block_label (e->dest);
3667		e->flags &= ~EDGE_FALLTHRU;
3668	      }
3669	  }
3670	gcc_assert (default_label != NULL);
3671
3672	/* Don't generate a switch if there's only a default case.
3673	   This is common in the form of try { A; } catch (...) { B; }.  */
3674	if (!labels.exists ())
3675	  {
3676	    e = single_succ_edge (src);
3677	    e->flags |= EDGE_FALLTHRU;
3678	  }
3679	else
3680	  {
3681	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3682	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3683							 region_nr));
3684	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3685	    filter = make_ssa_name (filter, x);
3686	    gimple_call_set_lhs (x, filter);
3687	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3688
3689	    /* Turn the default label into a default case.  */
3690	    default_label = build_case_label (NULL, NULL, default_label);
3691	    sort_case_labels (labels);
3692
3693	    x = gimple_build_switch (filter, default_label, labels);
3694	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3695	  }
3696      }
3697      break;
3698
3699    case ERT_ALLOWED_EXCEPTIONS:
3700      {
3701	edge b_e = BRANCH_EDGE (src);
3702	edge f_e = FALLTHRU_EDGE (src);
3703
3704	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3705	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3706						     region_nr));
3707	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3708	filter = make_ssa_name (filter, x);
3709	gimple_call_set_lhs (x, filter);
3710	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3711
3712	r->u.allowed.label = NULL;
3713	x = gimple_build_cond (EQ_EXPR, filter,
3714			       build_int_cst (TREE_TYPE (filter),
3715					      r->u.allowed.filter),
3716			       NULL_TREE, NULL_TREE);
3717	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3718
3719	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3720        f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3721      }
3722      break;
3723
3724    default:
3725      gcc_unreachable ();
3726    }
3727
3728  /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3729  gsi_remove (&gsi, true);
3730  return redirected;
3731}
3732
3733namespace {
3734
3735const pass_data pass_data_lower_eh_dispatch =
3736{
3737  GIMPLE_PASS, /* type */
3738  "ehdisp", /* name */
3739  OPTGROUP_NONE, /* optinfo_flags */
3740  TV_TREE_EH, /* tv_id */
3741  PROP_gimple_lcf, /* properties_required */
3742  0, /* properties_provided */
3743  0, /* properties_destroyed */
3744  0, /* todo_flags_start */
3745  0, /* todo_flags_finish */
3746};
3747
3748class pass_lower_eh_dispatch : public gimple_opt_pass
3749{
3750public:
3751  pass_lower_eh_dispatch (gcc::context *ctxt)
3752    : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3753  {}
3754
3755  /* opt_pass methods: */
3756  virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3757  virtual unsigned int execute (function *);
3758
3759}; // class pass_lower_eh_dispatch
3760
3761unsigned
3762pass_lower_eh_dispatch::execute (function *fun)
3763{
3764  basic_block bb;
3765  int flags = 0;
3766  bool redirected = false;
3767
3768  assign_filter_values ();
3769
3770  FOR_EACH_BB_FN (bb, fun)
3771    {
3772      gimple last = last_stmt (bb);
3773      if (last == NULL)
3774	continue;
3775      if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3776	{
3777	  redirected |= lower_eh_dispatch (bb,
3778					   as_a <geh_dispatch *> (last));
3779	  flags |= TODO_update_ssa_only_virtuals;
3780	}
3781      else if (gimple_code (last) == GIMPLE_RESX)
3782	{
3783	  if (stmt_can_throw_external (last))
3784	    optimize_clobbers (bb);
3785	  else
3786	    flags |= sink_clobbers (bb);
3787	}
3788    }
3789
3790  if (redirected)
3791    delete_unreachable_blocks ();
3792  return flags;
3793}
3794
3795} // anon namespace
3796
3797gimple_opt_pass *
3798make_pass_lower_eh_dispatch (gcc::context *ctxt)
3799{
3800  return new pass_lower_eh_dispatch (ctxt);
3801}
3802
3803/* Walk statements, see what regions and, optionally, landing pads
3804   are really referenced.
3805
3806   Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3807   and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3808
3809   Passing NULL for LP_REACHABLE is valid, in this case only reachable
3810   regions are marked.
3811
3812   The caller is responsible for freeing the returned sbitmaps.  */
3813
3814static void
3815mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3816{
3817  sbitmap r_reachable, lp_reachable;
3818  basic_block bb;
3819  bool mark_landing_pads = (lp_reachablep != NULL);
3820  gcc_checking_assert (r_reachablep != NULL);
3821
3822  r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3823  bitmap_clear (r_reachable);
3824  *r_reachablep = r_reachable;
3825
3826  if (mark_landing_pads)
3827    {
3828      lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3829      bitmap_clear (lp_reachable);
3830      *lp_reachablep = lp_reachable;
3831    }
3832  else
3833    lp_reachable = NULL;
3834
3835  FOR_EACH_BB_FN (bb, cfun)
3836    {
3837      gimple_stmt_iterator gsi;
3838
3839      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3840	{
3841	  gimple stmt = gsi_stmt (gsi);
3842
3843	  if (mark_landing_pads)
3844	    {
3845	      int lp_nr = lookup_stmt_eh_lp (stmt);
3846
3847	      /* Negative LP numbers are MUST_NOT_THROW regions which
3848		 are not considered BB enders.  */
3849	      if (lp_nr < 0)
3850		bitmap_set_bit (r_reachable, -lp_nr);
3851
3852	      /* Positive LP numbers are real landing pads, and BB enders.  */
3853	      else if (lp_nr > 0)
3854		{
3855		  gcc_assert (gsi_one_before_end_p (gsi));
3856		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3857		  bitmap_set_bit (r_reachable, region->index);
3858		  bitmap_set_bit (lp_reachable, lp_nr);
3859		}
3860	    }
3861
3862	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3863	  switch (gimple_code (stmt))
3864	    {
3865	    case GIMPLE_RESX:
3866	      bitmap_set_bit (r_reachable,
3867			      gimple_resx_region (as_a <gresx *> (stmt)));
3868	      break;
3869	    case GIMPLE_EH_DISPATCH:
3870	      bitmap_set_bit (r_reachable,
3871			      gimple_eh_dispatch_region (
3872                                as_a <geh_dispatch *> (stmt)));
3873	      break;
3874	    case GIMPLE_CALL:
3875	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3876		for (int i = 0; i < 2; ++i)
3877		  {
3878		    tree rt = gimple_call_arg (stmt, i);
3879		    HOST_WIDE_INT ri = tree_to_shwi (rt);
3880
3881		    gcc_assert (ri = (int)ri);
3882		    bitmap_set_bit (r_reachable, ri);
3883		  }
3884	      break;
3885	    default:
3886	      break;
3887	    }
3888	}
3889    }
3890}
3891
3892/* Remove unreachable handlers and unreachable landing pads.  */
3893
3894static void
3895remove_unreachable_handlers (void)
3896{
3897  sbitmap r_reachable, lp_reachable;
3898  eh_region region;
3899  eh_landing_pad lp;
3900  unsigned i;
3901
3902  mark_reachable_handlers (&r_reachable, &lp_reachable);
3903
3904  if (dump_file)
3905    {
3906      fprintf (dump_file, "Before removal of unreachable regions:\n");
3907      dump_eh_tree (dump_file, cfun);
3908      fprintf (dump_file, "Reachable regions: ");
3909      dump_bitmap_file (dump_file, r_reachable);
3910      fprintf (dump_file, "Reachable landing pads: ");
3911      dump_bitmap_file (dump_file, lp_reachable);
3912    }
3913
3914  if (dump_file)
3915    {
3916      FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3917	if (region && !bitmap_bit_p (r_reachable, region->index))
3918	  fprintf (dump_file,
3919		   "Removing unreachable region %d\n",
3920		   region->index);
3921    }
3922
3923  remove_unreachable_eh_regions (r_reachable);
3924
3925  FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3926    if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3927      {
3928	if (dump_file)
3929	  fprintf (dump_file,
3930		   "Removing unreachable landing pad %d\n",
3931		   lp->index);
3932	remove_eh_landing_pad (lp);
3933      }
3934
3935  if (dump_file)
3936    {
3937      fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3938      dump_eh_tree (dump_file, cfun);
3939      fprintf (dump_file, "\n\n");
3940    }
3941
3942  sbitmap_free (r_reachable);
3943  sbitmap_free (lp_reachable);
3944
3945#ifdef ENABLE_CHECKING
3946  verify_eh_tree (cfun);
3947#endif
3948}
3949
3950/* Remove unreachable handlers if any landing pads have been removed after
3951   last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
3952
3953void
3954maybe_remove_unreachable_handlers (void)
3955{
3956  eh_landing_pad lp;
3957  unsigned i;
3958
3959  if (cfun->eh == NULL)
3960    return;
3961
3962  FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3963    if (lp && lp->post_landing_pad)
3964      {
3965	if (label_to_block (lp->post_landing_pad) == NULL)
3966	  {
3967	    remove_unreachable_handlers ();
3968	    return;
3969	  }
3970      }
3971}
3972
3973/* Remove regions that do not have landing pads.  This assumes
3974   that remove_unreachable_handlers has already been run, and
3975   that we've just manipulated the landing pads since then.
3976
3977   Preserve regions with landing pads and regions that prevent
3978   exceptions from propagating further, even if these regions
3979   are not reachable.  */
3980
3981static void
3982remove_unreachable_handlers_no_lp (void)
3983{
3984  eh_region region;
3985  sbitmap r_reachable;
3986  unsigned i;
3987
3988  mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3989
3990  FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3991    {
3992      if (! region)
3993	continue;
3994
3995      if (region->landing_pads != NULL
3996	  || region->type == ERT_MUST_NOT_THROW)
3997	bitmap_set_bit (r_reachable, region->index);
3998
3999      if (dump_file
4000	  && !bitmap_bit_p (r_reachable, region->index))
4001	fprintf (dump_file,
4002		 "Removing unreachable region %d\n",
4003		 region->index);
4004    }
4005
4006  remove_unreachable_eh_regions (r_reachable);
4007
4008  sbitmap_free (r_reachable);
4009}
4010
4011/* Undo critical edge splitting on an EH landing pad.  Earlier, we
4012   optimisticaly split all sorts of edges, including EH edges.  The
4013   optimization passes in between may not have needed them; if not,
4014   we should undo the split.
4015
4016   Recognize this case by having one EH edge incoming to the BB and
4017   one normal edge outgoing; BB should be empty apart from the
4018   post_landing_pad label.
4019
4020   Note that this is slightly different from the empty handler case
4021   handled by cleanup_empty_eh, in that the actual handler may yet
4022   have actual code but the landing pad has been separated from the
4023   handler.  As such, cleanup_empty_eh relies on this transformation
4024   having been done first.  */
4025
4026static bool
4027unsplit_eh (eh_landing_pad lp)
4028{
4029  basic_block bb = label_to_block (lp->post_landing_pad);
4030  gimple_stmt_iterator gsi;
4031  edge e_in, e_out;
4032
4033  /* Quickly check the edge counts on BB for singularity.  */
4034  if (!single_pred_p (bb) || !single_succ_p (bb))
4035    return false;
4036  e_in = single_pred_edge (bb);
4037  e_out = single_succ_edge (bb);
4038
4039  /* Input edge must be EH and output edge must be normal.  */
4040  if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4041    return false;
4042
4043  /* The block must be empty except for the labels and debug insns.  */
4044  gsi = gsi_after_labels (bb);
4045  if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4046    gsi_next_nondebug (&gsi);
4047  if (!gsi_end_p (gsi))
4048    return false;
4049
4050  /* The destination block must not already have a landing pad
4051     for a different region.  */
4052  for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4053    {
4054      glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4055      tree lab;
4056      int lp_nr;
4057
4058      if (!label_stmt)
4059	break;
4060      lab = gimple_label_label (label_stmt);
4061      lp_nr = EH_LANDING_PAD_NR (lab);
4062      if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4063	return false;
4064    }
4065
4066  /* The new destination block must not already be a destination of
4067     the source block, lest we merge fallthru and eh edges and get
4068     all sorts of confused.  */
4069  if (find_edge (e_in->src, e_out->dest))
4070    return false;
4071
4072  /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4073     thought this should have been cleaned up by a phicprop pass, but
4074     that doesn't appear to handle virtuals.  Propagate by hand.  */
4075  if (!gimple_seq_empty_p (phi_nodes (bb)))
4076    {
4077      for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4078	{
4079	  gimple use_stmt;
4080	  gphi *phi = gpi.phi ();
4081	  tree lhs = gimple_phi_result (phi);
4082	  tree rhs = gimple_phi_arg_def (phi, 0);
4083	  use_operand_p use_p;
4084	  imm_use_iterator iter;
4085
4086	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4087	    {
4088	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4089		SET_USE (use_p, rhs);
4090	    }
4091
4092	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4093	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4094
4095	  remove_phi_node (&gpi, true);
4096	}
4097    }
4098
4099  if (dump_file && (dump_flags & TDF_DETAILS))
4100    fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4101	     lp->index, e_out->dest->index);
4102
4103  /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4104     a successor edge, humor it.  But do the real CFG change with the
4105     predecessor of E_OUT in order to preserve the ordering of arguments
4106     to the PHI nodes in E_OUT->DEST.  */
4107  redirect_eh_edge_1 (e_in, e_out->dest, false);
4108  redirect_edge_pred (e_out, e_in->src);
4109  e_out->flags = e_in->flags;
4110  e_out->probability = e_in->probability;
4111  e_out->count = e_in->count;
4112  remove_edge (e_in);
4113
4114  return true;
4115}
4116
4117/* Examine each landing pad block and see if it matches unsplit_eh.  */
4118
4119static bool
4120unsplit_all_eh (void)
4121{
4122  bool changed = false;
4123  eh_landing_pad lp;
4124  int i;
4125
4126  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4127    if (lp)
4128      changed |= unsplit_eh (lp);
4129
4130  return changed;
4131}
4132
4133/* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4134   to OLD_BB to NEW_BB; return true on success, false on failure.
4135
4136   OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4137   PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4138   Virtual PHIs may be deleted and marked for renaming.  */
4139
4140static bool
4141cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4142			     edge old_bb_out, bool change_region)
4143{
4144  gphi_iterator ngsi, ogsi;
4145  edge_iterator ei;
4146  edge e;
4147  bitmap ophi_handled;
4148
4149  /* The destination block must not be a regular successor for any
4150     of the preds of the landing pad.  Thus, avoid turning
4151        <..>
4152	 |  \ EH
4153	 |  <..>
4154	 |  /
4155	<..>
4156     into
4157        <..>
4158	|  | EH
4159	<..>
4160     which CFG verification would choke on.  See PR45172 and PR51089.  */
4161  FOR_EACH_EDGE (e, ei, old_bb->preds)
4162    if (find_edge (e->src, new_bb))
4163      return false;
4164
4165  FOR_EACH_EDGE (e, ei, old_bb->preds)
4166    redirect_edge_var_map_clear (e);
4167
4168  ophi_handled = BITMAP_ALLOC (NULL);
4169
4170  /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4171     for the edges we're going to move.  */
4172  for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4173    {
4174      gphi *ophi, *nphi = ngsi.phi ();
4175      tree nresult, nop;
4176
4177      nresult = gimple_phi_result (nphi);
4178      nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4179
4180      /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4181	 the source ssa_name.  */
4182      ophi = NULL;
4183      for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4184	{
4185	  ophi = ogsi.phi ();
4186	  if (gimple_phi_result (ophi) == nop)
4187	    break;
4188	  ophi = NULL;
4189	}
4190
4191      /* If we did find the corresponding PHI, copy those inputs.  */
4192      if (ophi)
4193	{
4194	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4195	  if (!has_single_use (nop))
4196	    {
4197	      imm_use_iterator imm_iter;
4198	      use_operand_p use_p;
4199
4200	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4201		{
4202		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4203		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4204			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4205		    goto fail;
4206		}
4207	    }
4208	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4209	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4210	    {
4211	      location_t oloc;
4212	      tree oop;
4213
4214	      if ((e->flags & EDGE_EH) == 0)
4215		continue;
4216	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4217	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4218	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4219	    }
4220	}
4221      /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4222	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4223	 variable is unchanged from input to the block and we can simply
4224	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4225      else
4226	{
4227	  location_t nloc
4228	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4229	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4230	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4231	}
4232    }
4233
4234  /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4235     we don't know what values from the other edges into NEW_BB to use.  */
4236  for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4237    {
4238      gphi *ophi = ogsi.phi ();
4239      tree oresult = gimple_phi_result (ophi);
4240      if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4241	goto fail;
4242    }
4243
4244  /* Finally, move the edges and update the PHIs.  */
4245  for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4246    if (e->flags & EDGE_EH)
4247      {
4248	/* ???  CFG manipluation routines do not try to update loop
4249	   form on edge redirection.  Do so manually here for now.  */
4250	/* If we redirect a loop entry or latch edge that will either create
4251	   a multiple entry loop or rotate the loop.  If the loops merge
4252	   we may have created a loop with multiple latches.
4253	   All of this isn't easily fixed thus cancel the affected loop
4254	   and mark the other loop as possibly having multiple latches.  */
4255	if (e->dest == e->dest->loop_father->header)
4256	  {
4257	    mark_loop_for_removal (e->dest->loop_father);
4258	    new_bb->loop_father->latch = NULL;
4259	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4260	  }
4261	redirect_eh_edge_1 (e, new_bb, change_region);
4262	redirect_edge_succ (e, new_bb);
4263	flush_pending_stmts (e);
4264      }
4265    else
4266      ei_next (&ei);
4267
4268  BITMAP_FREE (ophi_handled);
4269  return true;
4270
4271 fail:
4272  FOR_EACH_EDGE (e, ei, old_bb->preds)
4273    redirect_edge_var_map_clear (e);
4274  BITMAP_FREE (ophi_handled);
4275  return false;
4276}
4277
4278/* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4279   old region to NEW_REGION at BB.  */
4280
4281static void
4282cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4283			  eh_landing_pad lp, eh_region new_region)
4284{
4285  gimple_stmt_iterator gsi;
4286  eh_landing_pad *pp;
4287
4288  for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4289    continue;
4290  *pp = lp->next_lp;
4291
4292  lp->region = new_region;
4293  lp->next_lp = new_region->landing_pads;
4294  new_region->landing_pads = lp;
4295
4296  /* Delete the RESX that was matched within the empty handler block.  */
4297  gsi = gsi_last_bb (bb);
4298  unlink_stmt_vdef (gsi_stmt (gsi));
4299  gsi_remove (&gsi, true);
4300
4301  /* Clean up E_OUT for the fallthru.  */
4302  e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4303  e_out->probability = REG_BR_PROB_BASE;
4304}
4305
4306/* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4307   unsplitting than unsplit_eh was prepared to handle, e.g. when
4308   multiple incoming edges and phis are involved.  */
4309
4310static bool
4311cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4312{
4313  gimple_stmt_iterator gsi;
4314  tree lab;
4315
4316  /* We really ought not have totally lost everything following
4317     a landing pad label.  Given that BB is empty, there had better
4318     be a successor.  */
4319  gcc_assert (e_out != NULL);
4320
4321  /* The destination block must not already have a landing pad
4322     for a different region.  */
4323  lab = NULL;
4324  for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4325    {
4326      glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4327      int lp_nr;
4328
4329      if (!stmt)
4330	break;
4331      lab = gimple_label_label (stmt);
4332      lp_nr = EH_LANDING_PAD_NR (lab);
4333      if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4334	return false;
4335    }
4336
4337  /* Attempt to move the PHIs into the successor block.  */
4338  if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4339    {
4340      if (dump_file && (dump_flags & TDF_DETAILS))
4341	fprintf (dump_file,
4342		 "Unsplit EH landing pad %d to block %i "
4343		 "(via cleanup_empty_eh).\n",
4344		 lp->index, e_out->dest->index);
4345      return true;
4346    }
4347
4348  return false;
4349}
4350
4351/* Return true if edge E_FIRST is part of an empty infinite loop
4352   or leads to such a loop through a series of single successor
4353   empty bbs.  */
4354
4355static bool
4356infinite_empty_loop_p (edge e_first)
4357{
4358  bool inf_loop = false;
4359  edge e;
4360
4361  if (e_first->dest == e_first->src)
4362    return true;
4363
4364  e_first->src->aux = (void *) 1;
4365  for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4366    {
4367      gimple_stmt_iterator gsi;
4368      if (e->dest->aux)
4369	{
4370	  inf_loop = true;
4371	  break;
4372	}
4373      e->dest->aux = (void *) 1;
4374      gsi = gsi_after_labels (e->dest);
4375      if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4376	gsi_next_nondebug (&gsi);
4377      if (!gsi_end_p (gsi))
4378	break;
4379    }
4380  e_first->src->aux = NULL;
4381  for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4382    e->dest->aux = NULL;
4383
4384  return inf_loop;
4385}
4386
4387/* Examine the block associated with LP to determine if it's an empty
4388   handler for its EH region.  If so, attempt to redirect EH edges to
4389   an outer region.  Return true the CFG was updated in any way.  This
4390   is similar to jump forwarding, just across EH edges.  */
4391
4392static bool
4393cleanup_empty_eh (eh_landing_pad lp)
4394{
4395  basic_block bb = label_to_block (lp->post_landing_pad);
4396  gimple_stmt_iterator gsi;
4397  gimple resx;
4398  eh_region new_region;
4399  edge_iterator ei;
4400  edge e, e_out;
4401  bool has_non_eh_pred;
4402  bool ret = false;
4403  int new_lp_nr;
4404
4405  /* There can be zero or one edges out of BB.  This is the quickest test.  */
4406  switch (EDGE_COUNT (bb->succs))
4407    {
4408    case 0:
4409      e_out = NULL;
4410      break;
4411    case 1:
4412      e_out = single_succ_edge (bb);
4413      break;
4414    default:
4415      return false;
4416    }
4417
4418  resx = last_stmt (bb);
4419  if (resx && is_gimple_resx (resx))
4420    {
4421      if (stmt_can_throw_external (resx))
4422	optimize_clobbers (bb);
4423      else if (sink_clobbers (bb))
4424	ret = true;
4425    }
4426
4427  gsi = gsi_after_labels (bb);
4428
4429  /* Make sure to skip debug statements.  */
4430  if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4431    gsi_next_nondebug (&gsi);
4432
4433  /* If the block is totally empty, look for more unsplitting cases.  */
4434  if (gsi_end_p (gsi))
4435    {
4436      /* For the degenerate case of an infinite loop bail out.
4437	 If bb has no successors and is totally empty, which can happen e.g.
4438	 because of incorrect noreturn attribute, bail out too.  */
4439      if (e_out == NULL
4440	  || infinite_empty_loop_p (e_out))
4441	return ret;
4442
4443      return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4444    }
4445
4446  /* The block should consist only of a single RESX statement, modulo a
4447     preceding call to __builtin_stack_restore if there is no outgoing
4448     edge, since the call can be eliminated in this case.  */
4449  resx = gsi_stmt (gsi);
4450  if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4451    {
4452      gsi_next (&gsi);
4453      resx = gsi_stmt (gsi);
4454    }
4455  if (!is_gimple_resx (resx))
4456    return ret;
4457  gcc_assert (gsi_one_before_end_p (gsi));
4458
4459  /* Determine if there are non-EH edges, or resx edges into the handler.  */
4460  has_non_eh_pred = false;
4461  FOR_EACH_EDGE (e, ei, bb->preds)
4462    if (!(e->flags & EDGE_EH))
4463      has_non_eh_pred = true;
4464
4465  /* Find the handler that's outer of the empty handler by looking at
4466     where the RESX instruction was vectored.  */
4467  new_lp_nr = lookup_stmt_eh_lp (resx);
4468  new_region = get_eh_region_from_lp_number (new_lp_nr);
4469
4470  /* If there's no destination region within the current function,
4471     redirection is trivial via removing the throwing statements from
4472     the EH region, removing the EH edges, and allowing the block
4473     to go unreachable.  */
4474  if (new_region == NULL)
4475    {
4476      gcc_assert (e_out == NULL);
4477      for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4478	if (e->flags & EDGE_EH)
4479	  {
4480	    gimple stmt = last_stmt (e->src);
4481	    remove_stmt_from_eh_lp (stmt);
4482	    remove_edge (e);
4483	  }
4484	else
4485	  ei_next (&ei);
4486      goto succeed;
4487    }
4488
4489  /* If the destination region is a MUST_NOT_THROW, allow the runtime
4490     to handle the abort and allow the blocks to go unreachable.  */
4491  if (new_region->type == ERT_MUST_NOT_THROW)
4492    {
4493      for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4494	if (e->flags & EDGE_EH)
4495	  {
4496	    gimple stmt = last_stmt (e->src);
4497	    remove_stmt_from_eh_lp (stmt);
4498	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4499	    remove_edge (e);
4500	  }
4501	else
4502	  ei_next (&ei);
4503      goto succeed;
4504    }
4505
4506  /* Try to redirect the EH edges and merge the PHIs into the destination
4507     landing pad block.  If the merge succeeds, we'll already have redirected
4508     all the EH edges.  The handler itself will go unreachable if there were
4509     no normal edges.  */
4510  if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4511    goto succeed;
4512
4513  /* Finally, if all input edges are EH edges, then we can (potentially)
4514     reduce the number of transfers from the runtime by moving the landing
4515     pad from the original region to the new region.  This is a win when
4516     we remove the last CLEANUP region along a particular exception
4517     propagation path.  Since nothing changes except for the region with
4518     which the landing pad is associated, the PHI nodes do not need to be
4519     adjusted at all.  */
4520  if (!has_non_eh_pred)
4521    {
4522      cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4523      if (dump_file && (dump_flags & TDF_DETAILS))
4524	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4525		 lp->index, new_region->index);
4526
4527      /* ??? The CFG didn't change, but we may have rendered the
4528	 old EH region unreachable.  Trigger a cleanup there.  */
4529      return true;
4530    }
4531
4532  return ret;
4533
4534 succeed:
4535  if (dump_file && (dump_flags & TDF_DETAILS))
4536    fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4537  remove_eh_landing_pad (lp);
4538  return true;
4539}
4540
4541/* Do a post-order traversal of the EH region tree.  Examine each
4542   post_landing_pad block and see if we can eliminate it as empty.  */
4543
4544static bool
4545cleanup_all_empty_eh (void)
4546{
4547  bool changed = false;
4548  eh_landing_pad lp;
4549  int i;
4550
4551  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4552    if (lp)
4553      changed |= cleanup_empty_eh (lp);
4554
4555  return changed;
4556}
4557
4558/* Perform cleanups and lowering of exception handling
4559    1) cleanups regions with handlers doing nothing are optimized out
4560    2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4561    3) Info about regions that are containing instructions, and regions
4562       reachable via local EH edges is collected
4563    4) Eh tree is pruned for regions no longer necessary.
4564
4565   TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4566	 Unify those that have the same failure decl and locus.
4567*/
4568
4569static unsigned int
4570execute_cleanup_eh_1 (void)
4571{
4572  /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4573     looking up unreachable landing pads.  */
4574  remove_unreachable_handlers ();
4575
4576  /* Watch out for the region tree vanishing due to all unreachable.  */
4577  if (cfun->eh->region_tree)
4578    {
4579      bool changed = false;
4580
4581      if (optimize)
4582	changed |= unsplit_all_eh ();
4583      changed |= cleanup_all_empty_eh ();
4584
4585      if (changed)
4586	{
4587	  free_dominance_info (CDI_DOMINATORS);
4588	  free_dominance_info (CDI_POST_DOMINATORS);
4589
4590          /* We delayed all basic block deletion, as we may have performed
4591	     cleanups on EH edges while non-EH edges were still present.  */
4592	  delete_unreachable_blocks ();
4593
4594	  /* We manipulated the landing pads.  Remove any region that no
4595	     longer has a landing pad.  */
4596	  remove_unreachable_handlers_no_lp ();
4597
4598	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4599	}
4600    }
4601
4602  return 0;
4603}
4604
4605namespace {
4606
4607const pass_data pass_data_cleanup_eh =
4608{
4609  GIMPLE_PASS, /* type */
4610  "ehcleanup", /* name */
4611  OPTGROUP_NONE, /* optinfo_flags */
4612  TV_TREE_EH, /* tv_id */
4613  PROP_gimple_lcf, /* properties_required */
4614  0, /* properties_provided */
4615  0, /* properties_destroyed */
4616  0, /* todo_flags_start */
4617  0, /* todo_flags_finish */
4618};
4619
4620class pass_cleanup_eh : public gimple_opt_pass
4621{
4622public:
4623  pass_cleanup_eh (gcc::context *ctxt)
4624    : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4625  {}
4626
4627  /* opt_pass methods: */
4628  opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4629  virtual bool gate (function *fun)
4630    {
4631      return fun->eh != NULL && fun->eh->region_tree != NULL;
4632    }
4633
4634  virtual unsigned int execute (function *);
4635
4636}; // class pass_cleanup_eh
4637
4638unsigned int
4639pass_cleanup_eh::execute (function *fun)
4640{
4641  int ret = execute_cleanup_eh_1 ();
4642
4643  /* If the function no longer needs an EH personality routine
4644     clear it.  This exposes cross-language inlining opportunities
4645     and avoids references to a never defined personality routine.  */
4646  if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4647      && function_needs_eh_personality (fun) != eh_personality_lang)
4648    DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4649
4650  return ret;
4651}
4652
4653} // anon namespace
4654
4655gimple_opt_pass *
4656make_pass_cleanup_eh (gcc::context *ctxt)
4657{
4658  return new pass_cleanup_eh (ctxt);
4659}
4660
4661/* Verify that BB containing STMT as the last statement, has precisely the
4662   edge that make_eh_edges would create.  */
4663
4664DEBUG_FUNCTION bool
4665verify_eh_edges (gimple stmt)
4666{
4667  basic_block bb = gimple_bb (stmt);
4668  eh_landing_pad lp = NULL;
4669  int lp_nr;
4670  edge_iterator ei;
4671  edge e, eh_edge;
4672
4673  lp_nr = lookup_stmt_eh_lp (stmt);
4674  if (lp_nr > 0)
4675    lp = get_eh_landing_pad_from_number (lp_nr);
4676
4677  eh_edge = NULL;
4678  FOR_EACH_EDGE (e, ei, bb->succs)
4679    {
4680      if (e->flags & EDGE_EH)
4681	{
4682	  if (eh_edge)
4683	    {
4684	      error ("BB %i has multiple EH edges", bb->index);
4685	      return true;
4686	    }
4687	  else
4688	    eh_edge = e;
4689	}
4690    }
4691
4692  if (lp == NULL)
4693    {
4694      if (eh_edge)
4695	{
4696	  error ("BB %i can not throw but has an EH edge", bb->index);
4697	  return true;
4698	}
4699      return false;
4700    }
4701
4702  if (!stmt_could_throw_p (stmt))
4703    {
4704      error ("BB %i last statement has incorrectly set lp", bb->index);
4705      return true;
4706    }
4707
4708  if (eh_edge == NULL)
4709    {
4710      error ("BB %i is missing an EH edge", bb->index);
4711      return true;
4712    }
4713
4714  if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4715    {
4716      error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4717      return true;
4718    }
4719
4720  return false;
4721}
4722
4723/* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4724
4725DEBUG_FUNCTION bool
4726verify_eh_dispatch_edge (geh_dispatch *stmt)
4727{
4728  eh_region r;
4729  eh_catch c;
4730  basic_block src, dst;
4731  bool want_fallthru = true;
4732  edge_iterator ei;
4733  edge e, fall_edge;
4734
4735  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4736  src = gimple_bb (stmt);
4737
4738  FOR_EACH_EDGE (e, ei, src->succs)
4739    gcc_assert (e->aux == NULL);
4740
4741  switch (r->type)
4742    {
4743    case ERT_TRY:
4744      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4745	{
4746	  dst = label_to_block (c->label);
4747	  e = find_edge (src, dst);
4748	  if (e == NULL)
4749	    {
4750	      error ("BB %i is missing an edge", src->index);
4751	      return true;
4752	    }
4753	  e->aux = (void *)e;
4754
4755	  /* A catch-all handler doesn't have a fallthru.  */
4756	  if (c->type_list == NULL)
4757	    {
4758	      want_fallthru = false;
4759	      break;
4760	    }
4761	}
4762      break;
4763
4764    case ERT_ALLOWED_EXCEPTIONS:
4765      dst = label_to_block (r->u.allowed.label);
4766      e = find_edge (src, dst);
4767      if (e == NULL)
4768	{
4769	  error ("BB %i is missing an edge", src->index);
4770	  return true;
4771	}
4772      e->aux = (void *)e;
4773      break;
4774
4775    default:
4776      gcc_unreachable ();
4777    }
4778
4779  fall_edge = NULL;
4780  FOR_EACH_EDGE (e, ei, src->succs)
4781    {
4782      if (e->flags & EDGE_FALLTHRU)
4783	{
4784	  if (fall_edge != NULL)
4785	    {
4786	      error ("BB %i too many fallthru edges", src->index);
4787	      return true;
4788	    }
4789	  fall_edge = e;
4790	}
4791      else if (e->aux)
4792	e->aux = NULL;
4793      else
4794	{
4795	  error ("BB %i has incorrect edge", src->index);
4796	  return true;
4797	}
4798    }
4799  if ((fall_edge != NULL) ^ want_fallthru)
4800    {
4801      error ("BB %i has incorrect fallthru edge", src->index);
4802      return true;
4803    }
4804
4805  return false;
4806}
4807