1/* Exception handling semantics and decomposition for trees.
2   Copyright (C) 2003-2015 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 3, or (at your option)
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "hash-table.h"
24#include "tm.h"
25#include "hash-set.h"
26#include "machmode.h"
27#include "vec.h"
28#include "double-int.h"
29#include "input.h"
30#include "alias.h"
31#include "symtab.h"
32#include "wide-int.h"
33#include "inchash.h"
34#include "tree.h"
35#include "fold-const.h"
36#include "hashtab.h"
37#include "hard-reg-set.h"
38#include "function.h"
39#include "rtl.h"
40#include "flags.h"
41#include "statistics.h"
42#include "real.h"
43#include "fixed-value.h"
44#include "insn-config.h"
45#include "expmed.h"
46#include "dojump.h"
47#include "explow.h"
48#include "calls.h"
49#include "emit-rtl.h"
50#include "varasm.h"
51#include "stmt.h"
52#include "expr.h"
53#include "except.h"
54#include "predict.h"
55#include "dominance.h"
56#include "cfg.h"
57#include "cfganal.h"
58#include "cfgcleanup.h"
59#include "basic-block.h"
60#include "tree-ssa-alias.h"
61#include "internal-fn.h"
62#include "tree-eh.h"
63#include "gimple-expr.h"
64#include "is-a.h"
65#include "gimple.h"
66#include "gimple-iterator.h"
67#include "gimple-ssa.h"
68#include "hash-map.h"
69#include "plugin-api.h"
70#include "ipa-ref.h"
71#include "cgraph.h"
72#include "tree-cfg.h"
73#include "tree-phinodes.h"
74#include "ssa-iterators.h"
75#include "stringpool.h"
76#include "tree-ssanames.h"
77#include "tree-into-ssa.h"
78#include "tree-ssa.h"
79#include "tree-inline.h"
80#include "tree-pass.h"
81#include "langhooks.h"
82#include "diagnostic-core.h"
83#include "target.h"
84#include "cfgloop.h"
85#include "gimple-low.h"
86
87/* In some instances a tree and a gimple need to be stored in a same table,
88   i.e. in hash tables. This is a structure to do this. */
89typedef union {tree *tp; tree t; gimple g;} treemple;
90
91/* Misc functions used in this file.  */
92
93/* Remember and lookup EH landing pad data for arbitrary statements.
94   Really this means any statement that could_throw_p.  We could
95   stuff this information into the stmt_ann data structure, but:
96
97   (1) We absolutely rely on this information being kept until
98   we get to rtl.  Once we're done with lowering here, if we lose
99   the information there's no way to recover it!
100
101   (2) There are many more statements that *cannot* throw as
102   compared to those that can.  We should be saving some amount
103   of space by only allocating memory for those that can throw.  */
104
105/* Add statement T in function IFUN to landing pad NUM.  */
106
107static void
108add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
109{
110  gcc_assert (num != 0);
111
112  if (!get_eh_throw_stmt_table (ifun))
113    set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
114
115  gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
116}
117
118/* Add statement T in the current function (cfun) to EH landing pad NUM.  */
119
120void
121add_stmt_to_eh_lp (gimple t, int num)
122{
123  add_stmt_to_eh_lp_fn (cfun, t, num);
124}
125
126/* Add statement T to the single EH landing pad in REGION.  */
127
128static void
129record_stmt_eh_region (eh_region region, gimple t)
130{
131  if (region == NULL)
132    return;
133  if (region->type == ERT_MUST_NOT_THROW)
134    add_stmt_to_eh_lp_fn (cfun, t, -region->index);
135  else
136    {
137      eh_landing_pad lp = region->landing_pads;
138      if (lp == NULL)
139	lp = gen_eh_landing_pad (region);
140      else
141	gcc_assert (lp->next_lp == NULL);
142      add_stmt_to_eh_lp_fn (cfun, t, lp->index);
143    }
144}
145
146
147/* Remove statement T in function IFUN from its EH landing pad.  */
148
149bool
150remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
151{
152  if (!get_eh_throw_stmt_table (ifun))
153    return false;
154
155  if (!get_eh_throw_stmt_table (ifun)->get (t))
156    return false;
157
158  get_eh_throw_stmt_table (ifun)->remove (t);
159      return true;
160}
161
162
163/* Remove statement T in the current function (cfun) from its
164   EH landing pad.  */
165
166bool
167remove_stmt_from_eh_lp (gimple t)
168{
169  return remove_stmt_from_eh_lp_fn (cfun, t);
170}
171
172/* Determine if statement T is inside an EH region in function IFUN.
173   Positive numbers indicate a landing pad index; negative numbers
174   indicate a MUST_NOT_THROW region index; zero indicates that the
175   statement is not recorded in the region table.  */
176
177int
178lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
179{
180  if (ifun->eh->throw_stmt_table == NULL)
181    return 0;
182
183  int *lp_nr = ifun->eh->throw_stmt_table->get (t);
184  return lp_nr ? *lp_nr : 0;
185}
186
187/* Likewise, but always use the current function.  */
188
189int
190lookup_stmt_eh_lp (gimple t)
191{
192  /* We can get called from initialized data when -fnon-call-exceptions
193     is on; prevent crash.  */
194  if (!cfun)
195    return 0;
196  return lookup_stmt_eh_lp_fn (cfun, t);
197}
198
199/* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
200   nodes and LABEL_DECL nodes.  We will use this during the second phase to
201   determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
202
203struct finally_tree_node
204{
205  /* When storing a GIMPLE_TRY, we have to record a gimple.  However
206     when deciding whether a GOTO to a certain LABEL_DECL (which is a
207     tree) leaves the TRY block, its necessary to record a tree in
208     this field.  Thus a treemple is used. */
209  treemple child;
210  gtry *parent;
211};
212
213/* Hashtable helpers.  */
214
215struct finally_tree_hasher : typed_free_remove <finally_tree_node>
216{
217  typedef finally_tree_node value_type;
218  typedef finally_tree_node compare_type;
219  static inline hashval_t hash (const value_type *);
220  static inline bool equal (const value_type *, const compare_type *);
221};
222
223inline hashval_t
224finally_tree_hasher::hash (const value_type *v)
225{
226  return (intptr_t)v->child.t >> 4;
227}
228
229inline bool
230finally_tree_hasher::equal (const value_type *v, const compare_type *c)
231{
232  return v->child.t == c->child.t;
233}
234
235/* Note that this table is *not* marked GTY.  It is short-lived.  */
236static hash_table<finally_tree_hasher> *finally_tree;
237
238static void
239record_in_finally_tree (treemple child, gtry *parent)
240{
241  struct finally_tree_node *n;
242  finally_tree_node **slot;
243
244  n = XNEW (struct finally_tree_node);
245  n->child = child;
246  n->parent = parent;
247
248  slot = finally_tree->find_slot (n, INSERT);
249  gcc_assert (!*slot);
250  *slot = n;
251}
252
253static void
254collect_finally_tree (gimple stmt, gtry *region);
255
256/* Go through the gimple sequence.  Works with collect_finally_tree to
257   record all GIMPLE_LABEL and GIMPLE_TRY statements. */
258
259static void
260collect_finally_tree_1 (gimple_seq seq, gtry *region)
261{
262  gimple_stmt_iterator gsi;
263
264  for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
265    collect_finally_tree (gsi_stmt (gsi), region);
266}
267
268static void
269collect_finally_tree (gimple stmt, gtry *region)
270{
271  treemple temp;
272
273  switch (gimple_code (stmt))
274    {
275    case GIMPLE_LABEL:
276      temp.t = gimple_label_label (as_a <glabel *> (stmt));
277      record_in_finally_tree (temp, region);
278      break;
279
280    case GIMPLE_TRY:
281      if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
282        {
283          temp.g = stmt;
284          record_in_finally_tree (temp, region);
285          collect_finally_tree_1 (gimple_try_eval (stmt),
286				  as_a <gtry *> (stmt));
287	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
288        }
289      else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
290        {
291          collect_finally_tree_1 (gimple_try_eval (stmt), region);
292          collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
293        }
294      break;
295
296    case GIMPLE_CATCH:
297      collect_finally_tree_1 (gimple_catch_handler (
298				 as_a <gcatch *> (stmt)),
299			      region);
300      break;
301
302    case GIMPLE_EH_FILTER:
303      collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
304      break;
305
306    case GIMPLE_EH_ELSE:
307      {
308	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
309	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
310	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
311      }
312      break;
313
314    default:
315      /* A type, a decl, or some kind of statement that we're not
316	 interested in.  Don't walk them.  */
317      break;
318    }
319}
320
321
322/* Use the finally tree to determine if a jump from START to TARGET
323   would leave the try_finally node that START lives in.  */
324
325static bool
326outside_finally_tree (treemple start, gimple target)
327{
328  struct finally_tree_node n, *p;
329
330  do
331    {
332      n.child = start;
333      p = finally_tree->find (&n);
334      if (!p)
335	return true;
336      start.g = p->parent;
337    }
338  while (start.g != target);
339
340  return false;
341}
342
343/* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
344   nodes into a set of gotos, magic labels, and eh regions.
345   The eh region creation is straight-forward, but frobbing all the gotos
346   and such into shape isn't.  */
347
348/* The sequence into which we record all EH stuff.  This will be
349   placed at the end of the function when we're all done.  */
350static gimple_seq eh_seq;
351
352/* Record whether an EH region contains something that can throw,
353   indexed by EH region number.  */
354static bitmap eh_region_may_contain_throw_map;
355
356/* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
357   statements that are seen to escape this GIMPLE_TRY_FINALLY node.
358   The idea is to record a gimple statement for everything except for
359   the conditionals, which get their labels recorded. Since labels are
360   of type 'tree', we need this node to store both gimple and tree
361   objects.  REPL_STMT is the sequence used to replace the goto/return
362   statement.  CONT_STMT is used to store the statement that allows
363   the return/goto to jump to the original destination. */
364
365struct goto_queue_node
366{
367  treemple stmt;
368  location_t location;
369  gimple_seq repl_stmt;
370  gimple cont_stmt;
371  int index;
372  /* This is used when index >= 0 to indicate that stmt is a label (as
373     opposed to a goto stmt).  */
374  int is_label;
375};
376
377/* State of the world while lowering.  */
378
379struct leh_state
380{
381  /* What's "current" while constructing the eh region tree.  These
382     correspond to variables of the same name in cfun->eh, which we
383     don't have easy access to.  */
384  eh_region cur_region;
385
386  /* What's "current" for the purposes of __builtin_eh_pointer.  For
387     a CATCH, this is the associated TRY.  For an EH_FILTER, this is
388     the associated ALLOWED_EXCEPTIONS, etc.  */
389  eh_region ehp_region;
390
391  /* Processing of TRY_FINALLY requires a bit more state.  This is
392     split out into a separate structure so that we don't have to
393     copy so much when processing other nodes.  */
394  struct leh_tf_state *tf;
395};
396
397struct leh_tf_state
398{
399  /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
400     try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
401     this so that outside_finally_tree can reliably reference the tree used
402     in the collect_finally_tree data structures.  */
403  gtry *try_finally_expr;
404  gtry *top_p;
405
406  /* While lowering a top_p usually it is expanded into multiple statements,
407     thus we need the following field to store them. */
408  gimple_seq top_p_seq;
409
410  /* The state outside this try_finally node.  */
411  struct leh_state *outer;
412
413  /* The exception region created for it.  */
414  eh_region region;
415
416  /* The goto queue.  */
417  struct goto_queue_node *goto_queue;
418  size_t goto_queue_size;
419  size_t goto_queue_active;
420
421  /* Pointer map to help in searching goto_queue when it is large.  */
422  hash_map<gimple, goto_queue_node *> *goto_queue_map;
423
424  /* The set of unique labels seen as entries in the goto queue.  */
425  vec<tree> dest_array;
426
427  /* A label to be added at the end of the completed transformed
428     sequence.  It will be set if may_fallthru was true *at one time*,
429     though subsequent transformations may have cleared that flag.  */
430  tree fallthru_label;
431
432  /* True if it is possible to fall out the bottom of the try block.
433     Cleared if the fallthru is converted to a goto.  */
434  bool may_fallthru;
435
436  /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
437  bool may_return;
438
439  /* True if the finally block can receive an exception edge.
440     Cleared if the exception case is handled by code duplication.  */
441  bool may_throw;
442};
443
444static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
445
446/* Search for STMT in the goto queue.  Return the replacement,
447   or null if the statement isn't in the queue.  */
448
449#define LARGE_GOTO_QUEUE 20
450
451static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
452
453static gimple_seq
454find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
455{
456  unsigned int i;
457
458  if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
459    {
460      for (i = 0; i < tf->goto_queue_active; i++)
461	if ( tf->goto_queue[i].stmt.g == stmt.g)
462	  return tf->goto_queue[i].repl_stmt;
463      return NULL;
464    }
465
466  /* If we have a large number of entries in the goto_queue, create a
467     pointer map and use that for searching.  */
468
469  if (!tf->goto_queue_map)
470    {
471      tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
472      for (i = 0; i < tf->goto_queue_active; i++)
473	{
474	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
475						  &tf->goto_queue[i]);
476	  gcc_assert (!existed);
477	}
478    }
479
480  goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
481  if (slot != NULL)
482    return ((*slot)->repl_stmt);
483
484  return NULL;
485}
486
487/* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
488   lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
489   then we can just splat it in, otherwise we add the new stmts immediately
490   after the GIMPLE_COND and redirect.  */
491
492static void
493replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
494				gimple_stmt_iterator *gsi)
495{
496  tree label;
497  gimple_seq new_seq;
498  treemple temp;
499  location_t loc = gimple_location (gsi_stmt (*gsi));
500
501  temp.tp = tp;
502  new_seq = find_goto_replacement (tf, temp);
503  if (!new_seq)
504    return;
505
506  if (gimple_seq_singleton_p (new_seq)
507      && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
508    {
509      *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
510      return;
511    }
512
513  label = create_artificial_label (loc);
514  /* Set the new label for the GIMPLE_COND */
515  *tp = label;
516
517  gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
518  gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
519}
520
521/* The real work of replace_goto_queue.  Returns with TSI updated to
522   point to the next statement.  */
523
524static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
525
526static void
527replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
528		      gimple_stmt_iterator *gsi)
529{
530  gimple_seq seq;
531  treemple temp;
532  temp.g = NULL;
533
534  switch (gimple_code (stmt))
535    {
536    case GIMPLE_GOTO:
537    case GIMPLE_RETURN:
538      temp.g = stmt;
539      seq = find_goto_replacement (tf, temp);
540      if (seq)
541	{
542	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
543	  gsi_remove (gsi, false);
544	  return;
545	}
546      break;
547
548    case GIMPLE_COND:
549      replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
550      replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
551      break;
552
553    case GIMPLE_TRY:
554      replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
555      replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
556      break;
557    case GIMPLE_CATCH:
558      replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
559				      as_a <gcatch *> (stmt)),
560				    tf);
561      break;
562    case GIMPLE_EH_FILTER:
563      replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
564      break;
565    case GIMPLE_EH_ELSE:
566      {
567	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
568	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
569				      tf);
570	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
571				      tf);
572      }
573      break;
574
575    default:
576      /* These won't have gotos in them.  */
577      break;
578    }
579
580  gsi_next (gsi);
581}
582
583/* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
584
585static void
586replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
587{
588  gimple_stmt_iterator gsi = gsi_start (*seq);
589
590  while (!gsi_end_p (gsi))
591    replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
592}
593
594/* Replace all goto queue members.  */
595
596static void
597replace_goto_queue (struct leh_tf_state *tf)
598{
599  if (tf->goto_queue_active == 0)
600    return;
601  replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
602  replace_goto_queue_stmt_list (&eh_seq, tf);
603}
604
605/* Add a new record to the goto queue contained in TF. NEW_STMT is the
606   data to be added, IS_LABEL indicates whether NEW_STMT is a label or
607   a gimple return. */
608
609static void
610record_in_goto_queue (struct leh_tf_state *tf,
611                      treemple new_stmt,
612                      int index,
613                      bool is_label,
614		      location_t location)
615{
616  size_t active, size;
617  struct goto_queue_node *q;
618
619  gcc_assert (!tf->goto_queue_map);
620
621  active = tf->goto_queue_active;
622  size = tf->goto_queue_size;
623  if (active >= size)
624    {
625      size = (size ? size * 2 : 32);
626      tf->goto_queue_size = size;
627      tf->goto_queue
628         = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
629    }
630
631  q = &tf->goto_queue[active];
632  tf->goto_queue_active = active + 1;
633
634  memset (q, 0, sizeof (*q));
635  q->stmt = new_stmt;
636  q->index = index;
637  q->location = location;
638  q->is_label = is_label;
639}
640
641/* Record the LABEL label in the goto queue contained in TF.
642   TF is not null.  */
643
644static void
645record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
646			    location_t location)
647{
648  int index;
649  treemple temp, new_stmt;
650
651  if (!label)
652    return;
653
654  /* Computed and non-local gotos do not get processed.  Given
655     their nature we can neither tell whether we've escaped the
656     finally block nor redirect them if we knew.  */
657  if (TREE_CODE (label) != LABEL_DECL)
658    return;
659
660  /* No need to record gotos that don't leave the try block.  */
661  temp.t = label;
662  if (!outside_finally_tree (temp, tf->try_finally_expr))
663    return;
664
665  if (! tf->dest_array.exists ())
666    {
667      tf->dest_array.create (10);
668      tf->dest_array.quick_push (label);
669      index = 0;
670    }
671  else
672    {
673      int n = tf->dest_array.length ();
674      for (index = 0; index < n; ++index)
675        if (tf->dest_array[index] == label)
676          break;
677      if (index == n)
678        tf->dest_array.safe_push (label);
679    }
680
681  /* In the case of a GOTO we want to record the destination label,
682     since with a GIMPLE_COND we have an easy access to the then/else
683     labels. */
684  new_stmt = stmt;
685  record_in_goto_queue (tf, new_stmt, index, true, location);
686}
687
688/* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
689   node, and if so record that fact in the goto queue associated with that
690   try_finally node.  */
691
692static void
693maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
694{
695  struct leh_tf_state *tf = state->tf;
696  treemple new_stmt;
697
698  if (!tf)
699    return;
700
701  switch (gimple_code (stmt))
702    {
703    case GIMPLE_COND:
704      {
705	gcond *cond_stmt = as_a <gcond *> (stmt);
706	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
707	record_in_goto_queue_label (tf, new_stmt,
708				    gimple_cond_true_label (cond_stmt),
709				    EXPR_LOCATION (*new_stmt.tp));
710	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
711	record_in_goto_queue_label (tf, new_stmt,
712				    gimple_cond_false_label (cond_stmt),
713				    EXPR_LOCATION (*new_stmt.tp));
714      }
715      break;
716    case GIMPLE_GOTO:
717      new_stmt.g = stmt;
718      record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
719				  gimple_location (stmt));
720      break;
721
722    case GIMPLE_RETURN:
723      tf->may_return = true;
724      new_stmt.g = stmt;
725      record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
726      break;
727
728    default:
729      gcc_unreachable ();
730    }
731}
732
733
734#ifdef ENABLE_CHECKING
735/* We do not process GIMPLE_SWITCHes for now.  As long as the original source
736   was in fact structured, and we've not yet done jump threading, then none
737   of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
738
739static void
740verify_norecord_switch_expr (struct leh_state *state,
741			     gswitch *switch_expr)
742{
743  struct leh_tf_state *tf = state->tf;
744  size_t i, n;
745
746  if (!tf)
747    return;
748
749  n = gimple_switch_num_labels (switch_expr);
750
751  for (i = 0; i < n; ++i)
752    {
753      treemple temp;
754      tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
755      temp.t = lab;
756      gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
757    }
758}
759#else
760#define verify_norecord_switch_expr(state, switch_expr)
761#endif
762
763/* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
764   non-null, insert it before the new branch.  */
765
766static void
767do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
768{
769  gimple x;
770
771  /* In the case of a return, the queue node must be a gimple statement.  */
772  gcc_assert (!q->is_label);
773
774  /* Note that the return value may have already been computed, e.g.,
775
776	int x;
777	int foo (void)
778	{
779	  x = 0;
780	  try {
781	    return x;
782	  } finally {
783	    x++;
784	  }
785	}
786
787     should return 0, not 1.  We don't have to do anything to make
788     this happens because the return value has been placed in the
789     RESULT_DECL already.  */
790
791  q->cont_stmt = q->stmt.g;
792
793  if (mod)
794    gimple_seq_add_seq (&q->repl_stmt, mod);
795
796  x = gimple_build_goto (finlab);
797  gimple_set_location (x, q->location);
798  gimple_seq_add_stmt (&q->repl_stmt, x);
799}
800
801/* Similar, but easier, for GIMPLE_GOTO.  */
802
803static void
804do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
805		     struct leh_tf_state *tf)
806{
807  ggoto *x;
808
809  gcc_assert (q->is_label);
810
811  q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
812
813  if (mod)
814    gimple_seq_add_seq (&q->repl_stmt, mod);
815
816  x = gimple_build_goto (finlab);
817  gimple_set_location (x, q->location);
818  gimple_seq_add_stmt (&q->repl_stmt, x);
819}
820
821/* Emit a standard landing pad sequence into SEQ for REGION.  */
822
823static void
824emit_post_landing_pad (gimple_seq *seq, eh_region region)
825{
826  eh_landing_pad lp = region->landing_pads;
827  glabel *x;
828
829  if (lp == NULL)
830    lp = gen_eh_landing_pad (region);
831
832  lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
833  EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
834
835  x = gimple_build_label (lp->post_landing_pad);
836  gimple_seq_add_stmt (seq, x);
837}
838
839/* Emit a RESX statement into SEQ for REGION.  */
840
841static void
842emit_resx (gimple_seq *seq, eh_region region)
843{
844  gresx *x = gimple_build_resx (region->index);
845  gimple_seq_add_stmt (seq, x);
846  if (region->outer)
847    record_stmt_eh_region (region->outer, x);
848}
849
850/* Emit an EH_DISPATCH statement into SEQ for REGION.  */
851
852static void
853emit_eh_dispatch (gimple_seq *seq, eh_region region)
854{
855  geh_dispatch *x = gimple_build_eh_dispatch (region->index);
856  gimple_seq_add_stmt (seq, x);
857}
858
859/* Note that the current EH region may contain a throw, or a
860   call to a function which itself may contain a throw.  */
861
862static void
863note_eh_region_may_contain_throw (eh_region region)
864{
865  while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
866    {
867      if (region->type == ERT_MUST_NOT_THROW)
868	break;
869      region = region->outer;
870      if (region == NULL)
871	break;
872    }
873}
874
875/* Check if REGION has been marked as containing a throw.  If REGION is
876   NULL, this predicate is false.  */
877
878static inline bool
879eh_region_may_contain_throw (eh_region r)
880{
881  return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
882}
883
884/* We want to transform
885	try { body; } catch { stuff; }
886   to
887	normal_sequence:
888	  body;
889	  over:
890	eh_sequence:
891	  landing_pad:
892	  stuff;
893	  goto over;
894
895   TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
896   should be placed before the second operand, or NULL.  OVER is
897   an existing label that should be put at the exit, or NULL.  */
898
899static gimple_seq
900frob_into_branch_around (gtry *tp, eh_region region, tree over)
901{
902  gimple x;
903  gimple_seq cleanup, result;
904  location_t loc = gimple_location (tp);
905
906  cleanup = gimple_try_cleanup (tp);
907  result = gimple_try_eval (tp);
908
909  if (region)
910    emit_post_landing_pad (&eh_seq, region);
911
912  if (gimple_seq_may_fallthru (cleanup))
913    {
914      if (!over)
915	over = create_artificial_label (loc);
916      x = gimple_build_goto (over);
917      gimple_set_location (x, loc);
918      gimple_seq_add_stmt (&cleanup, x);
919    }
920  gimple_seq_add_seq (&eh_seq, cleanup);
921
922  if (over)
923    {
924      x = gimple_build_label (over);
925      gimple_seq_add_stmt (&result, x);
926    }
927  return result;
928}
929
930/* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
931   Make sure to record all new labels found.  */
932
933static gimple_seq
934lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
935			     location_t loc)
936{
937  gtry *region = NULL;
938  gimple_seq new_seq;
939  gimple_stmt_iterator gsi;
940
941  new_seq = copy_gimple_seq_and_replace_locals (seq);
942
943  for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
944    {
945      gimple stmt = gsi_stmt (gsi);
946      if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
947	{
948	  tree block = gimple_block (stmt);
949	  gimple_set_location (stmt, loc);
950	  gimple_set_block (stmt, block);
951	}
952    }
953
954  if (outer_state->tf)
955    region = outer_state->tf->try_finally_expr;
956  collect_finally_tree_1 (new_seq, region);
957
958  return new_seq;
959}
960
961/* A subroutine of lower_try_finally.  Create a fallthru label for
962   the given try_finally state.  The only tricky bit here is that
963   we have to make sure to record the label in our outer context.  */
964
965static tree
966lower_try_finally_fallthru_label (struct leh_tf_state *tf)
967{
968  tree label = tf->fallthru_label;
969  treemple temp;
970
971  if (!label)
972    {
973      label = create_artificial_label (gimple_location (tf->try_finally_expr));
974      tf->fallthru_label = label;
975      if (tf->outer->tf)
976        {
977          temp.t = label;
978          record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
979        }
980    }
981  return label;
982}
983
984/* A subroutine of lower_try_finally.  If FINALLY consits of a
985   GIMPLE_EH_ELSE node, return it.  */
986
987static inline geh_else *
988get_eh_else (gimple_seq finally)
989{
990  gimple x = gimple_seq_first_stmt (finally);
991  if (gimple_code (x) == GIMPLE_EH_ELSE)
992    {
993      gcc_assert (gimple_seq_singleton_p (finally));
994      return as_a <geh_else *> (x);
995    }
996  return NULL;
997}
998
999/* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
1000   langhook returns non-null, then the language requires that the exception
1001   path out of a try_finally be treated specially.  To wit: the code within
1002   the finally block may not itself throw an exception.  We have two choices
1003   here. First we can duplicate the finally block and wrap it in a
1004   must_not_throw region.  Second, we can generate code like
1005
1006	try {
1007	  finally_block;
1008	} catch {
1009	  if (fintmp == eh_edge)
1010	    protect_cleanup_actions;
1011	}
1012
1013   where "fintmp" is the temporary used in the switch statement generation
1014   alternative considered below.  For the nonce, we always choose the first
1015   option.
1016
1017   THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
1018
1019static void
1020honor_protect_cleanup_actions (struct leh_state *outer_state,
1021			       struct leh_state *this_state,
1022			       struct leh_tf_state *tf)
1023{
1024  tree protect_cleanup_actions;
1025  gimple_stmt_iterator gsi;
1026  bool finally_may_fallthru;
1027  gimple_seq finally;
1028  gimple x;
1029  geh_mnt *eh_mnt;
1030  gtry *try_stmt;
1031  geh_else *eh_else;
1032
1033  /* First check for nothing to do.  */
1034  if (lang_hooks.eh_protect_cleanup_actions == NULL)
1035    return;
1036  protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1037  if (protect_cleanup_actions == NULL)
1038    return;
1039
1040  finally = gimple_try_cleanup (tf->top_p);
1041  eh_else = get_eh_else (finally);
1042
1043  /* Duplicate the FINALLY block.  Only need to do this for try-finally,
1044     and not for cleanups.  If we've got an EH_ELSE, extract it now.  */
1045  if (eh_else)
1046    {
1047      finally = gimple_eh_else_e_body (eh_else);
1048      gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1049    }
1050  else if (this_state)
1051    finally = lower_try_finally_dup_block (finally, outer_state,
1052	gimple_location (tf->try_finally_expr));
1053  finally_may_fallthru = gimple_seq_may_fallthru (finally);
1054
1055  /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1056     set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1057     to be in an enclosing scope, but needs to be implemented at this level
1058     to avoid a nesting violation (see wrap_temporary_cleanups in
1059     cp/decl.c).  Since it's logically at an outer level, we should call
1060     terminate before we get to it, so strip it away before adding the
1061     MUST_NOT_THROW filter.  */
1062  gsi = gsi_start (finally);
1063  x = gsi_stmt (gsi);
1064  if (gimple_code (x) == GIMPLE_TRY
1065      && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1066      && gimple_try_catch_is_cleanup (x))
1067    {
1068      gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1069      gsi_remove (&gsi, false);
1070    }
1071
1072  /* Wrap the block with protect_cleanup_actions as the action.  */
1073  eh_mnt = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1074  try_stmt = gimple_build_try (finally, gimple_seq_alloc_with_stmt (eh_mnt),
1075			       GIMPLE_TRY_CATCH);
1076  finally = lower_eh_must_not_throw (outer_state, try_stmt);
1077
1078  /* Drop all of this into the exception sequence.  */
1079  emit_post_landing_pad (&eh_seq, tf->region);
1080  gimple_seq_add_seq (&eh_seq, finally);
1081  if (finally_may_fallthru)
1082    emit_resx (&eh_seq, tf->region);
1083
1084  /* Having now been handled, EH isn't to be considered with
1085     the rest of the outgoing edges.  */
1086  tf->may_throw = false;
1087}
1088
1089/* A subroutine of lower_try_finally.  We have determined that there is
1090   no fallthru edge out of the finally block.  This means that there is
1091   no outgoing edge corresponding to any incoming edge.  Restructure the
1092   try_finally node for this special case.  */
1093
1094static void
1095lower_try_finally_nofallthru (struct leh_state *state,
1096			      struct leh_tf_state *tf)
1097{
1098  tree lab;
1099  gimple x;
1100  geh_else *eh_else;
1101  gimple_seq finally;
1102  struct goto_queue_node *q, *qe;
1103
1104  lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1105
1106  /* We expect that tf->top_p is a GIMPLE_TRY. */
1107  finally = gimple_try_cleanup (tf->top_p);
1108  tf->top_p_seq = gimple_try_eval (tf->top_p);
1109
1110  x = gimple_build_label (lab);
1111  gimple_seq_add_stmt (&tf->top_p_seq, x);
1112
1113  q = tf->goto_queue;
1114  qe = q + tf->goto_queue_active;
1115  for (; q < qe; ++q)
1116    if (q->index < 0)
1117      do_return_redirection (q, lab, NULL);
1118    else
1119      do_goto_redirection (q, lab, NULL, tf);
1120
1121  replace_goto_queue (tf);
1122
1123  /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1124  eh_else = get_eh_else (finally);
1125  if (eh_else)
1126    {
1127      finally = gimple_eh_else_n_body (eh_else);
1128      lower_eh_constructs_1 (state, &finally);
1129      gimple_seq_add_seq (&tf->top_p_seq, finally);
1130
1131      if (tf->may_throw)
1132	{
1133	  finally = gimple_eh_else_e_body (eh_else);
1134	  lower_eh_constructs_1 (state, &finally);
1135
1136	  emit_post_landing_pad (&eh_seq, tf->region);
1137	  gimple_seq_add_seq (&eh_seq, finally);
1138	}
1139    }
1140  else
1141    {
1142      lower_eh_constructs_1 (state, &finally);
1143      gimple_seq_add_seq (&tf->top_p_seq, finally);
1144
1145      if (tf->may_throw)
1146	{
1147	  emit_post_landing_pad (&eh_seq, tf->region);
1148
1149	  x = gimple_build_goto (lab);
1150	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1151	  gimple_seq_add_stmt (&eh_seq, x);
1152	}
1153    }
1154}
1155
1156/* A subroutine of lower_try_finally.  We have determined that there is
1157   exactly one destination of the finally block.  Restructure the
1158   try_finally node for this special case.  */
1159
1160static void
1161lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1162{
1163  struct goto_queue_node *q, *qe;
1164  geh_else *eh_else;
1165  glabel *label_stmt;
1166  gimple x;
1167  gimple_seq finally;
1168  gimple_stmt_iterator gsi;
1169  tree finally_label;
1170  location_t loc = gimple_location (tf->try_finally_expr);
1171
1172  finally = gimple_try_cleanup (tf->top_p);
1173  tf->top_p_seq = gimple_try_eval (tf->top_p);
1174
1175  /* Since there's only one destination, and the destination edge can only
1176     either be EH or non-EH, that implies that all of our incoming edges
1177     are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1178  eh_else = get_eh_else (finally);
1179  if (eh_else)
1180    {
1181      if (tf->may_throw)
1182	finally = gimple_eh_else_e_body (eh_else);
1183      else
1184	finally = gimple_eh_else_n_body (eh_else);
1185    }
1186
1187  lower_eh_constructs_1 (state, &finally);
1188
1189  for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1190    {
1191      gimple stmt = gsi_stmt (gsi);
1192      if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1193	{
1194	  tree block = gimple_block (stmt);
1195	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1196	  gimple_set_block (stmt, block);
1197	}
1198    }
1199
1200  if (tf->may_throw)
1201    {
1202      /* Only reachable via the exception edge.  Add the given label to
1203         the head of the FINALLY block.  Append a RESX at the end.  */
1204      emit_post_landing_pad (&eh_seq, tf->region);
1205      gimple_seq_add_seq (&eh_seq, finally);
1206      emit_resx (&eh_seq, tf->region);
1207      return;
1208    }
1209
1210  if (tf->may_fallthru)
1211    {
1212      /* Only reachable via the fallthru edge.  Do nothing but let
1213	 the two blocks run together; we'll fall out the bottom.  */
1214      gimple_seq_add_seq (&tf->top_p_seq, finally);
1215      return;
1216    }
1217
1218  finally_label = create_artificial_label (loc);
1219  label_stmt = gimple_build_label (finally_label);
1220  gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1221
1222  gimple_seq_add_seq (&tf->top_p_seq, finally);
1223
1224  q = tf->goto_queue;
1225  qe = q + tf->goto_queue_active;
1226
1227  if (tf->may_return)
1228    {
1229      /* Reachable by return expressions only.  Redirect them.  */
1230      for (; q < qe; ++q)
1231	do_return_redirection (q, finally_label, NULL);
1232      replace_goto_queue (tf);
1233    }
1234  else
1235    {
1236      /* Reachable by goto expressions only.  Redirect them.  */
1237      for (; q < qe; ++q)
1238	do_goto_redirection (q, finally_label, NULL, tf);
1239      replace_goto_queue (tf);
1240
1241      if (tf->dest_array[0] == tf->fallthru_label)
1242	{
1243	  /* Reachable by goto to fallthru label only.  Redirect it
1244	     to the new label (already created, sadly), and do not
1245	     emit the final branch out, or the fallthru label.  */
1246	  tf->fallthru_label = NULL;
1247	  return;
1248	}
1249    }
1250
1251  /* Place the original return/goto to the original destination
1252     immediately after the finally block. */
1253  x = tf->goto_queue[0].cont_stmt;
1254  gimple_seq_add_stmt (&tf->top_p_seq, x);
1255  maybe_record_in_goto_queue (state, x);
1256}
1257
1258/* A subroutine of lower_try_finally.  There are multiple edges incoming
1259   and outgoing from the finally block.  Implement this by duplicating the
1260   finally block for every destination.  */
1261
1262static void
1263lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1264{
1265  gimple_seq finally;
1266  gimple_seq new_stmt;
1267  gimple_seq seq;
1268  gimple x;
1269  geh_else *eh_else;
1270  tree tmp;
1271  location_t tf_loc = gimple_location (tf->try_finally_expr);
1272
1273  finally = gimple_try_cleanup (tf->top_p);
1274
1275  /* Notice EH_ELSE, and simplify some of the remaining code
1276     by considering FINALLY to be the normal return path only.  */
1277  eh_else = get_eh_else (finally);
1278  if (eh_else)
1279    finally = gimple_eh_else_n_body (eh_else);
1280
1281  tf->top_p_seq = gimple_try_eval (tf->top_p);
1282  new_stmt = NULL;
1283
1284  if (tf->may_fallthru)
1285    {
1286      seq = lower_try_finally_dup_block (finally, state, tf_loc);
1287      lower_eh_constructs_1 (state, &seq);
1288      gimple_seq_add_seq (&new_stmt, seq);
1289
1290      tmp = lower_try_finally_fallthru_label (tf);
1291      x = gimple_build_goto (tmp);
1292      gimple_set_location (x, tf_loc);
1293      gimple_seq_add_stmt (&new_stmt, x);
1294    }
1295
1296  if (tf->may_throw)
1297    {
1298      /* We don't need to copy the EH path of EH_ELSE,
1299	 since it is only emitted once.  */
1300      if (eh_else)
1301	seq = gimple_eh_else_e_body (eh_else);
1302      else
1303	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1304      lower_eh_constructs_1 (state, &seq);
1305
1306      emit_post_landing_pad (&eh_seq, tf->region);
1307      gimple_seq_add_seq (&eh_seq, seq);
1308      emit_resx (&eh_seq, tf->region);
1309    }
1310
1311  if (tf->goto_queue)
1312    {
1313      struct goto_queue_node *q, *qe;
1314      int return_index, index;
1315      struct labels_s
1316      {
1317	struct goto_queue_node *q;
1318	tree label;
1319      } *labels;
1320
1321      return_index = tf->dest_array.length ();
1322      labels = XCNEWVEC (struct labels_s, return_index + 1);
1323
1324      q = tf->goto_queue;
1325      qe = q + tf->goto_queue_active;
1326      for (; q < qe; q++)
1327	{
1328	  index = q->index < 0 ? return_index : q->index;
1329
1330	  if (!labels[index].q)
1331	    labels[index].q = q;
1332	}
1333
1334      for (index = 0; index < return_index + 1; index++)
1335	{
1336	  tree lab;
1337
1338	  q = labels[index].q;
1339	  if (! q)
1340	    continue;
1341
1342	  lab = labels[index].label
1343	    = create_artificial_label (tf_loc);
1344
1345	  if (index == return_index)
1346	    do_return_redirection (q, lab, NULL);
1347	  else
1348	    do_goto_redirection (q, lab, NULL, tf);
1349
1350	  x = gimple_build_label (lab);
1351          gimple_seq_add_stmt (&new_stmt, x);
1352
1353	  seq = lower_try_finally_dup_block (finally, state, q->location);
1354	  lower_eh_constructs_1 (state, &seq);
1355          gimple_seq_add_seq (&new_stmt, seq);
1356
1357          gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1358	  maybe_record_in_goto_queue (state, q->cont_stmt);
1359	}
1360
1361      for (q = tf->goto_queue; q < qe; q++)
1362	{
1363	  tree lab;
1364
1365	  index = q->index < 0 ? return_index : q->index;
1366
1367	  if (labels[index].q == q)
1368	    continue;
1369
1370	  lab = labels[index].label;
1371
1372	  if (index == return_index)
1373	    do_return_redirection (q, lab, NULL);
1374	  else
1375	    do_goto_redirection (q, lab, NULL, tf);
1376	}
1377
1378      replace_goto_queue (tf);
1379      free (labels);
1380    }
1381
1382  /* Need to link new stmts after running replace_goto_queue due
1383     to not wanting to process the same goto stmts twice.  */
1384  gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1385}
1386
1387/* A subroutine of lower_try_finally.  There are multiple edges incoming
1388   and outgoing from the finally block.  Implement this by instrumenting
1389   each incoming edge and creating a switch statement at the end of the
1390   finally block that branches to the appropriate destination.  */
1391
1392static void
1393lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1394{
1395  struct goto_queue_node *q, *qe;
1396  tree finally_tmp, finally_label;
1397  int return_index, eh_index, fallthru_index;
1398  int nlabels, ndests, j, last_case_index;
1399  tree last_case;
1400  vec<tree> case_label_vec;
1401  gimple_seq switch_body = NULL;
1402  gimple x;
1403  geh_else *eh_else;
1404  tree tmp;
1405  gimple switch_stmt;
1406  gimple_seq finally;
1407  hash_map<tree, gimple> *cont_map = NULL;
1408  /* The location of the TRY_FINALLY stmt.  */
1409  location_t tf_loc = gimple_location (tf->try_finally_expr);
1410  /* The location of the finally block.  */
1411  location_t finally_loc;
1412
1413  finally = gimple_try_cleanup (tf->top_p);
1414  eh_else = get_eh_else (finally);
1415
1416  /* Mash the TRY block to the head of the chain.  */
1417  tf->top_p_seq = gimple_try_eval (tf->top_p);
1418
1419  /* The location of the finally is either the last stmt in the finally
1420     block or the location of the TRY_FINALLY itself.  */
1421  x = gimple_seq_last_stmt (finally);
1422  finally_loc = x ? gimple_location (x) : tf_loc;
1423
1424  /* Prepare for switch statement generation.  */
1425  nlabels = tf->dest_array.length ();
1426  return_index = nlabels;
1427  eh_index = return_index + tf->may_return;
1428  fallthru_index = eh_index + (tf->may_throw && !eh_else);
1429  ndests = fallthru_index + tf->may_fallthru;
1430
1431  finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1432  finally_label = create_artificial_label (finally_loc);
1433
1434  /* We use vec::quick_push on case_label_vec throughout this function,
1435     since we know the size in advance and allocate precisely as muce
1436     space as needed.  */
1437  case_label_vec.create (ndests);
1438  last_case = NULL;
1439  last_case_index = 0;
1440
1441  /* Begin inserting code for getting to the finally block.  Things
1442     are done in this order to correspond to the sequence the code is
1443     laid out.  */
1444
1445  if (tf->may_fallthru)
1446    {
1447      x = gimple_build_assign (finally_tmp,
1448			       build_int_cst (integer_type_node,
1449					      fallthru_index));
1450      gimple_seq_add_stmt (&tf->top_p_seq, x);
1451
1452      tmp = build_int_cst (integer_type_node, fallthru_index);
1453      last_case = build_case_label (tmp, NULL,
1454				    create_artificial_label (tf_loc));
1455      case_label_vec.quick_push (last_case);
1456      last_case_index++;
1457
1458      x = gimple_build_label (CASE_LABEL (last_case));
1459      gimple_seq_add_stmt (&switch_body, x);
1460
1461      tmp = lower_try_finally_fallthru_label (tf);
1462      x = gimple_build_goto (tmp);
1463      gimple_set_location (x, tf_loc);
1464      gimple_seq_add_stmt (&switch_body, x);
1465    }
1466
1467  /* For EH_ELSE, emit the exception path (plus resx) now, then
1468     subsequently we only need consider the normal path.  */
1469  if (eh_else)
1470    {
1471      if (tf->may_throw)
1472	{
1473	  finally = gimple_eh_else_e_body (eh_else);
1474	  lower_eh_constructs_1 (state, &finally);
1475
1476	  emit_post_landing_pad (&eh_seq, tf->region);
1477	  gimple_seq_add_seq (&eh_seq, finally);
1478	  emit_resx (&eh_seq, tf->region);
1479	}
1480
1481      finally = gimple_eh_else_n_body (eh_else);
1482    }
1483  else if (tf->may_throw)
1484    {
1485      emit_post_landing_pad (&eh_seq, tf->region);
1486
1487      x = gimple_build_assign (finally_tmp,
1488			       build_int_cst (integer_type_node, eh_index));
1489      gimple_seq_add_stmt (&eh_seq, x);
1490
1491      x = gimple_build_goto (finally_label);
1492      gimple_set_location (x, tf_loc);
1493      gimple_seq_add_stmt (&eh_seq, x);
1494
1495      tmp = build_int_cst (integer_type_node, eh_index);
1496      last_case = build_case_label (tmp, NULL,
1497				    create_artificial_label (tf_loc));
1498      case_label_vec.quick_push (last_case);
1499      last_case_index++;
1500
1501      x = gimple_build_label (CASE_LABEL (last_case));
1502      gimple_seq_add_stmt (&eh_seq, x);
1503      emit_resx (&eh_seq, tf->region);
1504    }
1505
1506  x = gimple_build_label (finally_label);
1507  gimple_seq_add_stmt (&tf->top_p_seq, x);
1508
1509  lower_eh_constructs_1 (state, &finally);
1510  gimple_seq_add_seq (&tf->top_p_seq, finally);
1511
1512  /* Redirect each incoming goto edge.  */
1513  q = tf->goto_queue;
1514  qe = q + tf->goto_queue_active;
1515  j = last_case_index + tf->may_return;
1516  /* Prepare the assignments to finally_tmp that are executed upon the
1517     entrance through a particular edge. */
1518  for (; q < qe; ++q)
1519    {
1520      gimple_seq mod = NULL;
1521      int switch_id;
1522      unsigned int case_index;
1523
1524      if (q->index < 0)
1525	{
1526	  x = gimple_build_assign (finally_tmp,
1527				   build_int_cst (integer_type_node,
1528						  return_index));
1529	  gimple_seq_add_stmt (&mod, x);
1530	  do_return_redirection (q, finally_label, mod);
1531	  switch_id = return_index;
1532	}
1533      else
1534	{
1535	  x = gimple_build_assign (finally_tmp,
1536				   build_int_cst (integer_type_node, q->index));
1537	  gimple_seq_add_stmt (&mod, x);
1538	  do_goto_redirection (q, finally_label, mod, tf);
1539	  switch_id = q->index;
1540	}
1541
1542      case_index = j + q->index;
1543      if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1544        {
1545          tree case_lab;
1546	  tmp = build_int_cst (integer_type_node, switch_id);
1547          case_lab = build_case_label (tmp, NULL,
1548				       create_artificial_label (tf_loc));
1549          /* We store the cont_stmt in the pointer map, so that we can recover
1550             it in the loop below.  */
1551          if (!cont_map)
1552            cont_map = new hash_map<tree, gimple>;
1553          cont_map->put (case_lab, q->cont_stmt);
1554          case_label_vec.quick_push (case_lab);
1555        }
1556    }
1557  for (j = last_case_index; j < last_case_index + nlabels; j++)
1558    {
1559      gimple cont_stmt;
1560
1561      last_case = case_label_vec[j];
1562
1563      gcc_assert (last_case);
1564      gcc_assert (cont_map);
1565
1566      cont_stmt = *cont_map->get (last_case);
1567
1568      x = gimple_build_label (CASE_LABEL (last_case));
1569      gimple_seq_add_stmt (&switch_body, x);
1570      gimple_seq_add_stmt (&switch_body, cont_stmt);
1571      maybe_record_in_goto_queue (state, cont_stmt);
1572    }
1573  if (cont_map)
1574    delete cont_map;
1575
1576  replace_goto_queue (tf);
1577
1578  /* Make sure that the last case is the default label, as one is required.
1579     Then sort the labels, which is also required in GIMPLE.  */
1580  CASE_LOW (last_case) = NULL;
1581  tree tem = case_label_vec.pop ();
1582  gcc_assert (tem == last_case);
1583  sort_case_labels (case_label_vec);
1584
1585  /* Build the switch statement, setting last_case to be the default
1586     label.  */
1587  switch_stmt = gimple_build_switch (finally_tmp, last_case,
1588				     case_label_vec);
1589  gimple_set_location (switch_stmt, finally_loc);
1590
1591  /* Need to link SWITCH_STMT after running replace_goto_queue
1592     due to not wanting to process the same goto stmts twice.  */
1593  gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1594  gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1595}
1596
1597/* Decide whether or not we are going to duplicate the finally block.
1598   There are several considerations.
1599
1600   First, if this is Java, then the finally block contains code
1601   written by the user.  It has line numbers associated with it,
1602   so duplicating the block means it's difficult to set a breakpoint.
1603   Since controlling code generation via -g is verboten, we simply
1604   never duplicate code without optimization.
1605
1606   Second, we'd like to prevent egregious code growth.  One way to
1607   do this is to estimate the size of the finally block, multiply
1608   that by the number of copies we'd need to make, and compare against
1609   the estimate of the size of the switch machinery we'd have to add.  */
1610
1611static bool
1612decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1613{
1614  int f_estimate, sw_estimate;
1615  geh_else *eh_else;
1616
1617  /* If there's an EH_ELSE involved, the exception path is separate
1618     and really doesn't come into play for this computation.  */
1619  eh_else = get_eh_else (finally);
1620  if (eh_else)
1621    {
1622      ndests -= may_throw;
1623      finally = gimple_eh_else_n_body (eh_else);
1624    }
1625
1626  if (!optimize)
1627    {
1628      gimple_stmt_iterator gsi;
1629
1630      if (ndests == 1)
1631        return true;
1632
1633      for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1634	{
1635	  gimple stmt = gsi_stmt (gsi);
1636	  if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1637	    return false;
1638	}
1639      return true;
1640    }
1641
1642  /* Finally estimate N times, plus N gotos.  */
1643  f_estimate = count_insns_seq (finally, &eni_size_weights);
1644  f_estimate = (f_estimate + 1) * ndests;
1645
1646  /* Switch statement (cost 10), N variable assignments, N gotos.  */
1647  sw_estimate = 10 + 2 * ndests;
1648
1649  /* Optimize for size clearly wants our best guess.  */
1650  if (optimize_function_for_size_p (cfun))
1651    return f_estimate < sw_estimate;
1652
1653  /* ??? These numbers are completely made up so far.  */
1654  if (optimize > 1)
1655    return f_estimate < 100 || f_estimate < sw_estimate * 2;
1656  else
1657    return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1658}
1659
1660/* REG is the enclosing region for a possible cleanup region, or the region
1661   itself.  Returns TRUE if such a region would be unreachable.
1662
1663   Cleanup regions within a must-not-throw region aren't actually reachable
1664   even if there are throwing stmts within them, because the personality
1665   routine will call terminate before unwinding.  */
1666
1667static bool
1668cleanup_is_dead_in (eh_region reg)
1669{
1670  while (reg && reg->type == ERT_CLEANUP)
1671    reg = reg->outer;
1672  return (reg && reg->type == ERT_MUST_NOT_THROW);
1673}
1674
1675/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1676   to a sequence of labels and blocks, plus the exception region trees
1677   that record all the magic.  This is complicated by the need to
1678   arrange for the FINALLY block to be executed on all exits.  */
1679
1680static gimple_seq
1681lower_try_finally (struct leh_state *state, gtry *tp)
1682{
1683  struct leh_tf_state this_tf;
1684  struct leh_state this_state;
1685  int ndests;
1686  gimple_seq old_eh_seq;
1687
1688  /* Process the try block.  */
1689
1690  memset (&this_tf, 0, sizeof (this_tf));
1691  this_tf.try_finally_expr = tp;
1692  this_tf.top_p = tp;
1693  this_tf.outer = state;
1694  if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1695    {
1696      this_tf.region = gen_eh_region_cleanup (state->cur_region);
1697      this_state.cur_region = this_tf.region;
1698    }
1699  else
1700    {
1701      this_tf.region = NULL;
1702      this_state.cur_region = state->cur_region;
1703    }
1704
1705  this_state.ehp_region = state->ehp_region;
1706  this_state.tf = &this_tf;
1707
1708  old_eh_seq = eh_seq;
1709  eh_seq = NULL;
1710
1711  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1712
1713  /* Determine if the try block is escaped through the bottom.  */
1714  this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1715
1716  /* Determine if any exceptions are possible within the try block.  */
1717  if (this_tf.region)
1718    this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1719  if (this_tf.may_throw)
1720    honor_protect_cleanup_actions (state, &this_state, &this_tf);
1721
1722  /* Determine how many edges (still) reach the finally block.  Or rather,
1723     how many destinations are reached by the finally block.  Use this to
1724     determine how we process the finally block itself.  */
1725
1726  ndests = this_tf.dest_array.length ();
1727  ndests += this_tf.may_fallthru;
1728  ndests += this_tf.may_return;
1729  ndests += this_tf.may_throw;
1730
1731  /* If the FINALLY block is not reachable, dike it out.  */
1732  if (ndests == 0)
1733    {
1734      gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1735      gimple_try_set_cleanup (tp, NULL);
1736    }
1737  /* If the finally block doesn't fall through, then any destination
1738     we might try to impose there isn't reached either.  There may be
1739     some minor amount of cleanup and redirection still needed.  */
1740  else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1741    lower_try_finally_nofallthru (state, &this_tf);
1742
1743  /* We can easily special-case redirection to a single destination.  */
1744  else if (ndests == 1)
1745    lower_try_finally_onedest (state, &this_tf);
1746  else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1747				    gimple_try_cleanup (tp)))
1748    lower_try_finally_copy (state, &this_tf);
1749  else
1750    lower_try_finally_switch (state, &this_tf);
1751
1752  /* If someone requested we add a label at the end of the transformed
1753     block, do so.  */
1754  if (this_tf.fallthru_label)
1755    {
1756      /* This must be reached only if ndests == 0. */
1757      gimple x = gimple_build_label (this_tf.fallthru_label);
1758      gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1759    }
1760
1761  this_tf.dest_array.release ();
1762  free (this_tf.goto_queue);
1763  if (this_tf.goto_queue_map)
1764    delete this_tf.goto_queue_map;
1765
1766  /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1767     If there was no old eh_seq, then the append is trivially already done.  */
1768  if (old_eh_seq)
1769    {
1770      if (eh_seq == NULL)
1771	eh_seq = old_eh_seq;
1772      else
1773	{
1774	  gimple_seq new_eh_seq = eh_seq;
1775	  eh_seq = old_eh_seq;
1776	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1777	}
1778    }
1779
1780  return this_tf.top_p_seq;
1781}
1782
1783/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1784   list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1785   exception region trees that records all the magic.  */
1786
1787static gimple_seq
1788lower_catch (struct leh_state *state, gtry *tp)
1789{
1790  eh_region try_region = NULL;
1791  struct leh_state this_state = *state;
1792  gimple_stmt_iterator gsi;
1793  tree out_label;
1794  gimple_seq new_seq, cleanup;
1795  gimple x;
1796  location_t try_catch_loc = gimple_location (tp);
1797
1798  if (flag_exceptions)
1799    {
1800      try_region = gen_eh_region_try (state->cur_region);
1801      this_state.cur_region = try_region;
1802    }
1803
1804  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1805
1806  if (!eh_region_may_contain_throw (try_region))
1807    return gimple_try_eval (tp);
1808
1809  new_seq = NULL;
1810  emit_eh_dispatch (&new_seq, try_region);
1811  emit_resx (&new_seq, try_region);
1812
1813  this_state.cur_region = state->cur_region;
1814  this_state.ehp_region = try_region;
1815
1816  /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1817     itself, so that e.g. for coverage purposes the nested cleanups don't
1818     appear before the cleanup body.  See PR64634 for details.  */
1819  gimple_seq old_eh_seq = eh_seq;
1820  eh_seq = NULL;
1821
1822  out_label = NULL;
1823  cleanup = gimple_try_cleanup (tp);
1824  for (gsi = gsi_start (cleanup);
1825       !gsi_end_p (gsi);
1826       gsi_next (&gsi))
1827    {
1828      eh_catch c;
1829      gcatch *catch_stmt;
1830      gimple_seq handler;
1831
1832      catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1833      c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1834
1835      handler = gimple_catch_handler (catch_stmt);
1836      lower_eh_constructs_1 (&this_state, &handler);
1837
1838      c->label = create_artificial_label (UNKNOWN_LOCATION);
1839      x = gimple_build_label (c->label);
1840      gimple_seq_add_stmt (&new_seq, x);
1841
1842      gimple_seq_add_seq (&new_seq, handler);
1843
1844      if (gimple_seq_may_fallthru (new_seq))
1845	{
1846	  if (!out_label)
1847	    out_label = create_artificial_label (try_catch_loc);
1848
1849	  x = gimple_build_goto (out_label);
1850	  gimple_seq_add_stmt (&new_seq, x);
1851	}
1852      if (!c->type_list)
1853	break;
1854    }
1855
1856  gimple_try_set_cleanup (tp, new_seq);
1857
1858  gimple_seq new_eh_seq = eh_seq;
1859  eh_seq = old_eh_seq;
1860  gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1861  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1862  return ret_seq;
1863}
1864
1865/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1866   GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1867   region trees that record all the magic.  */
1868
1869static gimple_seq
1870lower_eh_filter (struct leh_state *state, gtry *tp)
1871{
1872  struct leh_state this_state = *state;
1873  eh_region this_region = NULL;
1874  gimple inner, x;
1875  gimple_seq new_seq;
1876
1877  inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1878
1879  if (flag_exceptions)
1880    {
1881      this_region = gen_eh_region_allowed (state->cur_region,
1882				           gimple_eh_filter_types (inner));
1883      this_state.cur_region = this_region;
1884    }
1885
1886  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1887
1888  if (!eh_region_may_contain_throw (this_region))
1889    return gimple_try_eval (tp);
1890
1891  new_seq = NULL;
1892  this_state.cur_region = state->cur_region;
1893  this_state.ehp_region = this_region;
1894
1895  emit_eh_dispatch (&new_seq, this_region);
1896  emit_resx (&new_seq, this_region);
1897
1898  this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1899  x = gimple_build_label (this_region->u.allowed.label);
1900  gimple_seq_add_stmt (&new_seq, x);
1901
1902  lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1903  gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1904
1905  gimple_try_set_cleanup (tp, new_seq);
1906
1907  return frob_into_branch_around (tp, this_region, NULL);
1908}
1909
1910/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1911   an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1912   plus the exception region trees that record all the magic.  */
1913
1914static gimple_seq
1915lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1916{
1917  struct leh_state this_state = *state;
1918
1919  if (flag_exceptions)
1920    {
1921      gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1922      eh_region this_region;
1923
1924      this_region = gen_eh_region_must_not_throw (state->cur_region);
1925      this_region->u.must_not_throw.failure_decl
1926	= gimple_eh_must_not_throw_fndecl (
1927	    as_a <geh_mnt *> (inner));
1928      this_region->u.must_not_throw.failure_loc
1929	= LOCATION_LOCUS (gimple_location (tp));
1930
1931      /* In order to get mangling applied to this decl, we must mark it
1932	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1933	 needs to happen.  */
1934      TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1935
1936      this_state.cur_region = this_region;
1937    }
1938
1939  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1940
1941  return gimple_try_eval (tp);
1942}
1943
1944/* Implement a cleanup expression.  This is similar to try-finally,
1945   except that we only execute the cleanup block for exception edges.  */
1946
1947static gimple_seq
1948lower_cleanup (struct leh_state *state, gtry *tp)
1949{
1950  struct leh_state this_state = *state;
1951  eh_region this_region = NULL;
1952  struct leh_tf_state fake_tf;
1953  gimple_seq result;
1954  bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1955
1956  if (flag_exceptions && !cleanup_dead)
1957    {
1958      this_region = gen_eh_region_cleanup (state->cur_region);
1959      this_state.cur_region = this_region;
1960    }
1961
1962  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1963
1964  if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1965    return gimple_try_eval (tp);
1966
1967  /* Build enough of a try-finally state so that we can reuse
1968     honor_protect_cleanup_actions.  */
1969  memset (&fake_tf, 0, sizeof (fake_tf));
1970  fake_tf.top_p = fake_tf.try_finally_expr = tp;
1971  fake_tf.outer = state;
1972  fake_tf.region = this_region;
1973  fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1974  fake_tf.may_throw = true;
1975
1976  honor_protect_cleanup_actions (state, NULL, &fake_tf);
1977
1978  if (fake_tf.may_throw)
1979    {
1980      /* In this case honor_protect_cleanup_actions had nothing to do,
1981	 and we should process this normally.  */
1982      lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1983      result = frob_into_branch_around (tp, this_region,
1984                                        fake_tf.fallthru_label);
1985    }
1986  else
1987    {
1988      /* In this case honor_protect_cleanup_actions did nearly all of
1989	 the work.  All we have left is to append the fallthru_label.  */
1990
1991      result = gimple_try_eval (tp);
1992      if (fake_tf.fallthru_label)
1993	{
1994	  gimple x = gimple_build_label (fake_tf.fallthru_label);
1995	  gimple_seq_add_stmt (&result, x);
1996	}
1997    }
1998  return result;
1999}
2000
2001/* Main loop for lowering eh constructs. Also moves gsi to the next
2002   statement. */
2003
2004static void
2005lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
2006{
2007  gimple_seq replace;
2008  gimple x;
2009  gimple stmt = gsi_stmt (*gsi);
2010
2011  switch (gimple_code (stmt))
2012    {
2013    case GIMPLE_CALL:
2014      {
2015	tree fndecl = gimple_call_fndecl (stmt);
2016	tree rhs, lhs;
2017
2018	if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2019	  switch (DECL_FUNCTION_CODE (fndecl))
2020	    {
2021	    case BUILT_IN_EH_POINTER:
2022	      /* The front end may have generated a call to
2023		 __builtin_eh_pointer (0) within a catch region.  Replace
2024		 this zero argument with the current catch region number.  */
2025	      if (state->ehp_region)
2026		{
2027		  tree nr = build_int_cst (integer_type_node,
2028					   state->ehp_region->index);
2029		  gimple_call_set_arg (stmt, 0, nr);
2030		}
2031	      else
2032		{
2033		  /* The user has dome something silly.  Remove it.  */
2034		  rhs = null_pointer_node;
2035		  goto do_replace;
2036		}
2037	      break;
2038
2039	    case BUILT_IN_EH_FILTER:
2040	      /* ??? This should never appear, but since it's a builtin it
2041		 is accessible to abuse by users.  Just remove it and
2042		 replace the use with the arbitrary value zero.  */
2043	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2044	    do_replace:
2045	      lhs = gimple_call_lhs (stmt);
2046	      x = gimple_build_assign (lhs, rhs);
2047	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2048	      /* FALLTHRU */
2049
2050	    case BUILT_IN_EH_COPY_VALUES:
2051	      /* Likewise this should not appear.  Remove it.  */
2052	      gsi_remove (gsi, true);
2053	      return;
2054
2055	    default:
2056	      break;
2057	    }
2058      }
2059      /* FALLTHRU */
2060
2061    case GIMPLE_ASSIGN:
2062      /* If the stmt can throw use a new temporary for the assignment
2063         to a LHS.  This makes sure the old value of the LHS is
2064	 available on the EH edge.  Only do so for statements that
2065	 potentially fall through (no noreturn calls e.g.), otherwise
2066	 this new assignment might create fake fallthru regions.  */
2067      if (stmt_could_throw_p (stmt)
2068	  && gimple_has_lhs (stmt)
2069	  && gimple_stmt_may_fallthru (stmt)
2070	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2071	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2072	{
2073	  tree lhs = gimple_get_lhs (stmt);
2074	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2075	  gimple s = gimple_build_assign (lhs, tmp);
2076	  gimple_set_location (s, gimple_location (stmt));
2077	  gimple_set_block (s, gimple_block (stmt));
2078	  gimple_set_lhs (stmt, tmp);
2079	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2080	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2081	    DECL_GIMPLE_REG_P (tmp) = 1;
2082	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2083	}
2084      /* Look for things that can throw exceptions, and record them.  */
2085      if (state->cur_region && stmt_could_throw_p (stmt))
2086	{
2087	  record_stmt_eh_region (state->cur_region, stmt);
2088	  note_eh_region_may_contain_throw (state->cur_region);
2089	}
2090      break;
2091
2092    case GIMPLE_COND:
2093    case GIMPLE_GOTO:
2094    case GIMPLE_RETURN:
2095      maybe_record_in_goto_queue (state, stmt);
2096      break;
2097
2098    case GIMPLE_SWITCH:
2099      verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2100      break;
2101
2102    case GIMPLE_TRY:
2103      {
2104	gtry *try_stmt = as_a <gtry *> (stmt);
2105	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2106	  replace = lower_try_finally (state, try_stmt);
2107	else
2108	  {
2109	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2110	    if (!x)
2111	      {
2112		replace = gimple_try_eval (try_stmt);
2113		lower_eh_constructs_1 (state, &replace);
2114	      }
2115	    else
2116	      switch (gimple_code (x))
2117		{
2118		case GIMPLE_CATCH:
2119		  replace = lower_catch (state, try_stmt);
2120		  break;
2121		case GIMPLE_EH_FILTER:
2122		  replace = lower_eh_filter (state, try_stmt);
2123		  break;
2124		case GIMPLE_EH_MUST_NOT_THROW:
2125		  replace = lower_eh_must_not_throw (state, try_stmt);
2126		  break;
2127		case GIMPLE_EH_ELSE:
2128		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2129		  gcc_unreachable ();
2130		default:
2131		  replace = lower_cleanup (state, try_stmt);
2132		  break;
2133		}
2134	  }
2135      }
2136
2137      /* Remove the old stmt and insert the transformed sequence
2138	 instead. */
2139      gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2140      gsi_remove (gsi, true);
2141
2142      /* Return since we don't want gsi_next () */
2143      return;
2144
2145    case GIMPLE_EH_ELSE:
2146      /* We should be eliminating this in lower_try_finally et al.  */
2147      gcc_unreachable ();
2148
2149    default:
2150      /* A type, a decl, or some kind of statement that we're not
2151	 interested in.  Don't walk them.  */
2152      break;
2153    }
2154
2155  gsi_next (gsi);
2156}
2157
2158/* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2159
2160static void
2161lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2162{
2163  gimple_stmt_iterator gsi;
2164  for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2165    lower_eh_constructs_2 (state, &gsi);
2166}
2167
2168namespace {
2169
2170const pass_data pass_data_lower_eh =
2171{
2172  GIMPLE_PASS, /* type */
2173  "eh", /* name */
2174  OPTGROUP_NONE, /* optinfo_flags */
2175  TV_TREE_EH, /* tv_id */
2176  PROP_gimple_lcf, /* properties_required */
2177  PROP_gimple_leh, /* properties_provided */
2178  0, /* properties_destroyed */
2179  0, /* todo_flags_start */
2180  0, /* todo_flags_finish */
2181};
2182
2183class pass_lower_eh : public gimple_opt_pass
2184{
2185public:
2186  pass_lower_eh (gcc::context *ctxt)
2187    : gimple_opt_pass (pass_data_lower_eh, ctxt)
2188  {}
2189
2190  /* opt_pass methods: */
2191  virtual unsigned int execute (function *);
2192
2193}; // class pass_lower_eh
2194
2195unsigned int
2196pass_lower_eh::execute (function *fun)
2197{
2198  struct leh_state null_state;
2199  gimple_seq bodyp;
2200
2201  bodyp = gimple_body (current_function_decl);
2202  if (bodyp == NULL)
2203    return 0;
2204
2205  finally_tree = new hash_table<finally_tree_hasher> (31);
2206  eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2207  memset (&null_state, 0, sizeof (null_state));
2208
2209  collect_finally_tree_1 (bodyp, NULL);
2210  lower_eh_constructs_1 (&null_state, &bodyp);
2211  gimple_set_body (current_function_decl, bodyp);
2212
2213  /* We assume there's a return statement, or something, at the end of
2214     the function, and thus ploping the EH sequence afterward won't
2215     change anything.  */
2216  gcc_assert (!gimple_seq_may_fallthru (bodyp));
2217  gimple_seq_add_seq (&bodyp, eh_seq);
2218
2219  /* We assume that since BODYP already existed, adding EH_SEQ to it
2220     didn't change its value, and we don't have to re-set the function.  */
2221  gcc_assert (bodyp == gimple_body (current_function_decl));
2222
2223  delete finally_tree;
2224  finally_tree = NULL;
2225  BITMAP_FREE (eh_region_may_contain_throw_map);
2226  eh_seq = NULL;
2227
2228  /* If this function needs a language specific EH personality routine
2229     and the frontend didn't already set one do so now.  */
2230  if (function_needs_eh_personality (fun) == eh_personality_lang
2231      && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2232    DECL_FUNCTION_PERSONALITY (current_function_decl)
2233      = lang_hooks.eh_personality ();
2234
2235  return 0;
2236}
2237
2238} // anon namespace
2239
2240gimple_opt_pass *
2241make_pass_lower_eh (gcc::context *ctxt)
2242{
2243  return new pass_lower_eh (ctxt);
2244}
2245
2246/* Create the multiple edges from an EH_DISPATCH statement to all of
2247   the possible handlers for its EH region.  Return true if there's
2248   no fallthru edge; false if there is.  */
2249
2250bool
2251make_eh_dispatch_edges (geh_dispatch *stmt)
2252{
2253  eh_region r;
2254  eh_catch c;
2255  basic_block src, dst;
2256
2257  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2258  src = gimple_bb (stmt);
2259
2260  switch (r->type)
2261    {
2262    case ERT_TRY:
2263      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2264	{
2265	  dst = label_to_block (c->label);
2266	  make_edge (src, dst, 0);
2267
2268	  /* A catch-all handler doesn't have a fallthru.  */
2269	  if (c->type_list == NULL)
2270	    return false;
2271	}
2272      break;
2273
2274    case ERT_ALLOWED_EXCEPTIONS:
2275      dst = label_to_block (r->u.allowed.label);
2276      make_edge (src, dst, 0);
2277      break;
2278
2279    default:
2280      gcc_unreachable ();
2281    }
2282
2283  return true;
2284}
2285
2286/* Create the single EH edge from STMT to its nearest landing pad,
2287   if there is such a landing pad within the current function.  */
2288
2289void
2290make_eh_edges (gimple stmt)
2291{
2292  basic_block src, dst;
2293  eh_landing_pad lp;
2294  int lp_nr;
2295
2296  lp_nr = lookup_stmt_eh_lp (stmt);
2297  if (lp_nr <= 0)
2298    return;
2299
2300  lp = get_eh_landing_pad_from_number (lp_nr);
2301  gcc_assert (lp != NULL);
2302
2303  src = gimple_bb (stmt);
2304  dst = label_to_block (lp->post_landing_pad);
2305  make_edge (src, dst, EDGE_EH);
2306}
2307
2308/* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2309   do not actually perform the final edge redirection.
2310
2311   CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2312   we intend to change the destination EH region as well; this means
2313   EH_LANDING_PAD_NR must already be set on the destination block label.
2314   If false, we're being called from generic cfg manipulation code and we
2315   should preserve our place within the region tree.  */
2316
2317static void
2318redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2319{
2320  eh_landing_pad old_lp, new_lp;
2321  basic_block old_bb;
2322  gimple throw_stmt;
2323  int old_lp_nr, new_lp_nr;
2324  tree old_label, new_label;
2325  edge_iterator ei;
2326  edge e;
2327
2328  old_bb = edge_in->dest;
2329  old_label = gimple_block_label (old_bb);
2330  old_lp_nr = EH_LANDING_PAD_NR (old_label);
2331  gcc_assert (old_lp_nr > 0);
2332  old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2333
2334  throw_stmt = last_stmt (edge_in->src);
2335  gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2336
2337  new_label = gimple_block_label (new_bb);
2338
2339  /* Look for an existing region that might be using NEW_BB already.  */
2340  new_lp_nr = EH_LANDING_PAD_NR (new_label);
2341  if (new_lp_nr)
2342    {
2343      new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2344      gcc_assert (new_lp);
2345
2346      /* Unless CHANGE_REGION is true, the new and old landing pad
2347	 had better be associated with the same EH region.  */
2348      gcc_assert (change_region || new_lp->region == old_lp->region);
2349    }
2350  else
2351    {
2352      new_lp = NULL;
2353      gcc_assert (!change_region);
2354    }
2355
2356  /* Notice when we redirect the last EH edge away from OLD_BB.  */
2357  FOR_EACH_EDGE (e, ei, old_bb->preds)
2358    if (e != edge_in && (e->flags & EDGE_EH))
2359      break;
2360
2361  if (new_lp)
2362    {
2363      /* NEW_LP already exists.  If there are still edges into OLD_LP,
2364	 there's nothing to do with the EH tree.  If there are no more
2365	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2366	 If CHANGE_REGION is true, then our caller is expecting to remove
2367	 the landing pad.  */
2368      if (e == NULL && !change_region)
2369	remove_eh_landing_pad (old_lp);
2370    }
2371  else
2372    {
2373      /* No correct landing pad exists.  If there are no more edges
2374	 into OLD_LP, then we can simply re-use the existing landing pad.
2375	 Otherwise, we have to create a new landing pad.  */
2376      if (e == NULL)
2377	{
2378	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2379	  new_lp = old_lp;
2380	}
2381      else
2382	new_lp = gen_eh_landing_pad (old_lp->region);
2383      new_lp->post_landing_pad = new_label;
2384      EH_LANDING_PAD_NR (new_label) = new_lp->index;
2385    }
2386
2387  /* Maybe move the throwing statement to the new region.  */
2388  if (old_lp != new_lp)
2389    {
2390      remove_stmt_from_eh_lp (throw_stmt);
2391      add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2392    }
2393}
2394
2395/* Redirect EH edge E to NEW_BB.  */
2396
2397edge
2398redirect_eh_edge (edge edge_in, basic_block new_bb)
2399{
2400  redirect_eh_edge_1 (edge_in, new_bb, false);
2401  return ssa_redirect_edge (edge_in, new_bb);
2402}
2403
2404/* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2405   labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2406   The actual edge update will happen in the caller.  */
2407
2408void
2409redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2410{
2411  tree new_lab = gimple_block_label (new_bb);
2412  bool any_changed = false;
2413  basic_block old_bb;
2414  eh_region r;
2415  eh_catch c;
2416
2417  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2418  switch (r->type)
2419    {
2420    case ERT_TRY:
2421      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2422	{
2423	  old_bb = label_to_block (c->label);
2424	  if (old_bb == e->dest)
2425	    {
2426	      c->label = new_lab;
2427	      any_changed = true;
2428	    }
2429	}
2430      break;
2431
2432    case ERT_ALLOWED_EXCEPTIONS:
2433      old_bb = label_to_block (r->u.allowed.label);
2434      gcc_assert (old_bb == e->dest);
2435      r->u.allowed.label = new_lab;
2436      any_changed = true;
2437      break;
2438
2439    default:
2440      gcc_unreachable ();
2441    }
2442
2443  gcc_assert (any_changed);
2444}
2445
2446/* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2447
2448bool
2449operation_could_trap_helper_p (enum tree_code op,
2450			       bool fp_operation,
2451			       bool honor_trapv,
2452			       bool honor_nans,
2453			       bool honor_snans,
2454			       tree divisor,
2455			       bool *handled)
2456{
2457  *handled = true;
2458  switch (op)
2459    {
2460    case TRUNC_DIV_EXPR:
2461    case CEIL_DIV_EXPR:
2462    case FLOOR_DIV_EXPR:
2463    case ROUND_DIV_EXPR:
2464    case EXACT_DIV_EXPR:
2465    case CEIL_MOD_EXPR:
2466    case FLOOR_MOD_EXPR:
2467    case ROUND_MOD_EXPR:
2468    case TRUNC_MOD_EXPR:
2469    case RDIV_EXPR:
2470      if (honor_snans || honor_trapv)
2471	return true;
2472      if (fp_operation)
2473	return flag_trapping_math;
2474      if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2475        return true;
2476      return false;
2477
2478    case LT_EXPR:
2479    case LE_EXPR:
2480    case GT_EXPR:
2481    case GE_EXPR:
2482    case LTGT_EXPR:
2483      /* Some floating point comparisons may trap.  */
2484      return honor_nans;
2485
2486    case EQ_EXPR:
2487    case NE_EXPR:
2488    case UNORDERED_EXPR:
2489    case ORDERED_EXPR:
2490    case UNLT_EXPR:
2491    case UNLE_EXPR:
2492    case UNGT_EXPR:
2493    case UNGE_EXPR:
2494    case UNEQ_EXPR:
2495      return honor_snans;
2496
2497    case NEGATE_EXPR:
2498    case ABS_EXPR:
2499    case CONJ_EXPR:
2500      /* These operations don't trap with floating point.  */
2501      if (honor_trapv)
2502	return true;
2503      return false;
2504
2505    case PLUS_EXPR:
2506    case MINUS_EXPR:
2507    case MULT_EXPR:
2508      /* Any floating arithmetic may trap.  */
2509      if (fp_operation && flag_trapping_math)
2510	return true;
2511      if (honor_trapv)
2512	return true;
2513      return false;
2514
2515    case COMPLEX_EXPR:
2516    case CONSTRUCTOR:
2517      /* Constructing an object cannot trap.  */
2518      return false;
2519
2520    default:
2521      /* Any floating arithmetic may trap.  */
2522      if (fp_operation && flag_trapping_math)
2523	return true;
2524
2525      *handled = false;
2526      return false;
2527    }
2528}
2529
2530/* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2531   on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2532   type operands that may trap.  If OP is a division operator, DIVISOR contains
2533   the value of the divisor.  */
2534
2535bool
2536operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2537			tree divisor)
2538{
2539  bool honor_nans = (fp_operation && flag_trapping_math
2540		     && !flag_finite_math_only);
2541  bool honor_snans = fp_operation && flag_signaling_nans != 0;
2542  bool handled;
2543
2544  if (TREE_CODE_CLASS (op) != tcc_comparison
2545      && TREE_CODE_CLASS (op) != tcc_unary
2546      && TREE_CODE_CLASS (op) != tcc_binary)
2547    return false;
2548
2549  return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2550					honor_nans, honor_snans, divisor,
2551					&handled);
2552}
2553
2554
2555/* Returns true if it is possible to prove that the index of
2556   an array access REF (an ARRAY_REF expression) falls into the
2557   array bounds.  */
2558
2559static bool
2560in_array_bounds_p (tree ref)
2561{
2562  tree idx = TREE_OPERAND (ref, 1);
2563  tree min, max;
2564
2565  if (TREE_CODE (idx) != INTEGER_CST)
2566    return false;
2567
2568  min = array_ref_low_bound (ref);
2569  max = array_ref_up_bound (ref);
2570  if (!min
2571      || !max
2572      || TREE_CODE (min) != INTEGER_CST
2573      || TREE_CODE (max) != INTEGER_CST)
2574    return false;
2575
2576  if (tree_int_cst_lt (idx, min)
2577      || tree_int_cst_lt (max, idx))
2578    return false;
2579
2580  return true;
2581}
2582
2583/* Returns true if it is possible to prove that the range of
2584   an array access REF (an ARRAY_RANGE_REF expression) falls
2585   into the array bounds.  */
2586
2587static bool
2588range_in_array_bounds_p (tree ref)
2589{
2590  tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2591  tree range_min, range_max, min, max;
2592
2593  range_min = TYPE_MIN_VALUE (domain_type);
2594  range_max = TYPE_MAX_VALUE (domain_type);
2595  if (!range_min
2596      || !range_max
2597      || TREE_CODE (range_min) != INTEGER_CST
2598      || TREE_CODE (range_max) != INTEGER_CST)
2599    return false;
2600
2601  min = array_ref_low_bound (ref);
2602  max = array_ref_up_bound (ref);
2603  if (!min
2604      || !max
2605      || TREE_CODE (min) != INTEGER_CST
2606      || TREE_CODE (max) != INTEGER_CST)
2607    return false;
2608
2609  if (tree_int_cst_lt (range_min, min)
2610      || tree_int_cst_lt (max, range_max))
2611    return false;
2612
2613  return true;
2614}
2615
2616/* Return true if EXPR can trap, as in dereferencing an invalid pointer
2617   location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2618   This routine expects only GIMPLE lhs or rhs input.  */
2619
2620bool
2621tree_could_trap_p (tree expr)
2622{
2623  enum tree_code code;
2624  bool fp_operation = false;
2625  bool honor_trapv = false;
2626  tree t, base, div = NULL_TREE;
2627
2628  if (!expr)
2629    return false;
2630
2631  code = TREE_CODE (expr);
2632  t = TREE_TYPE (expr);
2633
2634  if (t)
2635    {
2636      if (COMPARISON_CLASS_P (expr))
2637	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2638      else
2639	fp_operation = FLOAT_TYPE_P (t);
2640      honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2641    }
2642
2643  if (TREE_CODE_CLASS (code) == tcc_binary)
2644    div = TREE_OPERAND (expr, 1);
2645  if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2646    return true;
2647
2648 restart:
2649  switch (code)
2650    {
2651    case COMPONENT_REF:
2652    case REALPART_EXPR:
2653    case IMAGPART_EXPR:
2654    case BIT_FIELD_REF:
2655    case VIEW_CONVERT_EXPR:
2656    case WITH_SIZE_EXPR:
2657      expr = TREE_OPERAND (expr, 0);
2658      code = TREE_CODE (expr);
2659      goto restart;
2660
2661    case ARRAY_RANGE_REF:
2662      base = TREE_OPERAND (expr, 0);
2663      if (tree_could_trap_p (base))
2664	return true;
2665      if (TREE_THIS_NOTRAP (expr))
2666	return false;
2667      return !range_in_array_bounds_p (expr);
2668
2669    case ARRAY_REF:
2670      base = TREE_OPERAND (expr, 0);
2671      if (tree_could_trap_p (base))
2672	return true;
2673      if (TREE_THIS_NOTRAP (expr))
2674	return false;
2675      return !in_array_bounds_p (expr);
2676
2677    case TARGET_MEM_REF:
2678    case MEM_REF:
2679      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2680	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2681	return true;
2682      if (TREE_THIS_NOTRAP (expr))
2683	return false;
2684      /* We cannot prove that the access is in-bounds when we have
2685         variable-index TARGET_MEM_REFs.  */
2686      if (code == TARGET_MEM_REF
2687	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2688	return true;
2689      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2690	{
2691	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2692	  offset_int off = mem_ref_offset (expr);
2693	  if (wi::neg_p (off, SIGNED))
2694	    return true;
2695	  if (TREE_CODE (base) == STRING_CST)
2696	    return wi::leu_p (TREE_STRING_LENGTH (base), off);
2697	  else if (DECL_SIZE_UNIT (base) == NULL_TREE
2698		   || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2699		   || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2700	    return true;
2701	  /* Now we are sure the first byte of the access is inside
2702	     the object.  */
2703	  return false;
2704	}
2705      return true;
2706
2707    case INDIRECT_REF:
2708      return !TREE_THIS_NOTRAP (expr);
2709
2710    case ASM_EXPR:
2711      return TREE_THIS_VOLATILE (expr);
2712
2713    case CALL_EXPR:
2714      t = get_callee_fndecl (expr);
2715      /* Assume that calls to weak functions may trap.  */
2716      if (!t || !DECL_P (t))
2717	return true;
2718      if (DECL_WEAK (t))
2719	return tree_could_trap_p (t);
2720      return false;
2721
2722    case FUNCTION_DECL:
2723      /* Assume that accesses to weak functions may trap, unless we know
2724	 they are certainly defined in current TU or in some other
2725	 LTO partition.  */
2726      if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2727	{
2728	  cgraph_node *node = cgraph_node::get (expr);
2729	  if (node)
2730	    node = node->function_symbol ();
2731	  return !(node && node->in_other_partition);
2732	}
2733      return false;
2734
2735    case VAR_DECL:
2736      /* Assume that accesses to weak vars may trap, unless we know
2737	 they are certainly defined in current TU or in some other
2738	 LTO partition.  */
2739      if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2740	{
2741	  varpool_node *node = varpool_node::get (expr);
2742	  if (node)
2743	    node = node->ultimate_alias_target ();
2744	  return !(node && node->in_other_partition);
2745	}
2746      return false;
2747
2748    default:
2749      return false;
2750    }
2751}
2752
2753
2754/* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2755   an assignment or a conditional) may throw.  */
2756
2757static bool
2758stmt_could_throw_1_p (gimple stmt)
2759{
2760  enum tree_code code = gimple_expr_code (stmt);
2761  bool honor_nans = false;
2762  bool honor_snans = false;
2763  bool fp_operation = false;
2764  bool honor_trapv = false;
2765  tree t;
2766  size_t i;
2767  bool handled, ret;
2768
2769  if (TREE_CODE_CLASS (code) == tcc_comparison
2770      || TREE_CODE_CLASS (code) == tcc_unary
2771      || TREE_CODE_CLASS (code) == tcc_binary)
2772    {
2773      if (is_gimple_assign (stmt)
2774	  && TREE_CODE_CLASS (code) == tcc_comparison)
2775	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2776      else if (gimple_code (stmt) == GIMPLE_COND)
2777	t = TREE_TYPE (gimple_cond_lhs (stmt));
2778      else
2779	t = gimple_expr_type (stmt);
2780      fp_operation = FLOAT_TYPE_P (t);
2781      if (fp_operation)
2782	{
2783	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2784	  honor_snans = flag_signaling_nans != 0;
2785	}
2786      else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2787	honor_trapv = true;
2788    }
2789
2790  /* Check if the main expression may trap.  */
2791  t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2792  ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2793				       honor_nans, honor_snans, t,
2794				       &handled);
2795  if (handled)
2796    return ret;
2797
2798  /* If the expression does not trap, see if any of the individual operands may
2799     trap.  */
2800  for (i = 0; i < gimple_num_ops (stmt); i++)
2801    if (tree_could_trap_p (gimple_op (stmt, i)))
2802      return true;
2803
2804  return false;
2805}
2806
2807
2808/* Return true if statement STMT could throw an exception.  */
2809
2810bool
2811stmt_could_throw_p (gimple stmt)
2812{
2813  if (!flag_exceptions)
2814    return false;
2815
2816  /* The only statements that can throw an exception are assignments,
2817     conditionals, calls, resx, and asms.  */
2818  switch (gimple_code (stmt))
2819    {
2820    case GIMPLE_RESX:
2821      return true;
2822
2823    case GIMPLE_CALL:
2824      return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2825
2826    case GIMPLE_ASSIGN:
2827    case GIMPLE_COND:
2828      if (!cfun->can_throw_non_call_exceptions)
2829        return false;
2830      return stmt_could_throw_1_p (stmt);
2831
2832    case GIMPLE_ASM:
2833      if (!cfun->can_throw_non_call_exceptions)
2834        return false;
2835      return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2836
2837    default:
2838      return false;
2839    }
2840}
2841
2842
2843/* Return true if expression T could throw an exception.  */
2844
2845bool
2846tree_could_throw_p (tree t)
2847{
2848  if (!flag_exceptions)
2849    return false;
2850  if (TREE_CODE (t) == MODIFY_EXPR)
2851    {
2852      if (cfun->can_throw_non_call_exceptions
2853          && tree_could_trap_p (TREE_OPERAND (t, 0)))
2854        return true;
2855      t = TREE_OPERAND (t, 1);
2856    }
2857
2858  if (TREE_CODE (t) == WITH_SIZE_EXPR)
2859    t = TREE_OPERAND (t, 0);
2860  if (TREE_CODE (t) == CALL_EXPR)
2861    return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2862  if (cfun->can_throw_non_call_exceptions)
2863    return tree_could_trap_p (t);
2864  return false;
2865}
2866
2867/* Return true if STMT can throw an exception that is not caught within
2868   the current function (CFUN).  */
2869
2870bool
2871stmt_can_throw_external (gimple stmt)
2872{
2873  int lp_nr;
2874
2875  if (!stmt_could_throw_p (stmt))
2876    return false;
2877
2878  lp_nr = lookup_stmt_eh_lp (stmt);
2879  return lp_nr == 0;
2880}
2881
2882/* Return true if STMT can throw an exception that is caught within
2883   the current function (CFUN).  */
2884
2885bool
2886stmt_can_throw_internal (gimple stmt)
2887{
2888  int lp_nr;
2889
2890  if (!stmt_could_throw_p (stmt))
2891    return false;
2892
2893  lp_nr = lookup_stmt_eh_lp (stmt);
2894  return lp_nr > 0;
2895}
2896
2897/* Given a statement STMT in IFUN, if STMT can no longer throw, then
2898   remove any entry it might have from the EH table.  Return true if
2899   any change was made.  */
2900
2901bool
2902maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2903{
2904  if (stmt_could_throw_p (stmt))
2905    return false;
2906  return remove_stmt_from_eh_lp_fn (ifun, stmt);
2907}
2908
2909/* Likewise, but always use the current function.  */
2910
2911bool
2912maybe_clean_eh_stmt (gimple stmt)
2913{
2914  return maybe_clean_eh_stmt_fn (cfun, stmt);
2915}
2916
2917/* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2918   OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2919   in the table if it should be in there.  Return TRUE if a replacement was
2920   done that my require an EH edge purge.  */
2921
2922bool
2923maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2924{
2925  int lp_nr = lookup_stmt_eh_lp (old_stmt);
2926
2927  if (lp_nr != 0)
2928    {
2929      bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2930
2931      if (new_stmt == old_stmt && new_stmt_could_throw)
2932	return false;
2933
2934      remove_stmt_from_eh_lp (old_stmt);
2935      if (new_stmt_could_throw)
2936	{
2937	  add_stmt_to_eh_lp (new_stmt, lp_nr);
2938	  return false;
2939	}
2940      else
2941	return true;
2942    }
2943
2944  return false;
2945}
2946
2947/* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2948   in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
2949   operand is the return value of duplicate_eh_regions.  */
2950
2951bool
2952maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2953			    struct function *old_fun, gimple old_stmt,
2954			    hash_map<void *, void *> *map,
2955			    int default_lp_nr)
2956{
2957  int old_lp_nr, new_lp_nr;
2958
2959  if (!stmt_could_throw_p (new_stmt))
2960    return false;
2961
2962  old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2963  if (old_lp_nr == 0)
2964    {
2965      if (default_lp_nr == 0)
2966	return false;
2967      new_lp_nr = default_lp_nr;
2968    }
2969  else if (old_lp_nr > 0)
2970    {
2971      eh_landing_pad old_lp, new_lp;
2972
2973      old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2974      new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2975      new_lp_nr = new_lp->index;
2976    }
2977  else
2978    {
2979      eh_region old_r, new_r;
2980
2981      old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2982      new_r = static_cast<eh_region> (*map->get (old_r));
2983      new_lp_nr = -new_r->index;
2984    }
2985
2986  add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2987  return true;
2988}
2989
2990/* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2991   and thus no remapping is required.  */
2992
2993bool
2994maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2995{
2996  int lp_nr;
2997
2998  if (!stmt_could_throw_p (new_stmt))
2999    return false;
3000
3001  lp_nr = lookup_stmt_eh_lp (old_stmt);
3002  if (lp_nr == 0)
3003    return false;
3004
3005  add_stmt_to_eh_lp (new_stmt, lp_nr);
3006  return true;
3007}
3008
3009/* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3010   GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3011   this only handles handlers consisting of a single call, as that's the
3012   important case for C++: a destructor call for a particular object showing
3013   up in multiple handlers.  */
3014
3015static bool
3016same_handler_p (gimple_seq oneh, gimple_seq twoh)
3017{
3018  gimple_stmt_iterator gsi;
3019  gimple ones, twos;
3020  unsigned int ai;
3021
3022  gsi = gsi_start (oneh);
3023  if (!gsi_one_before_end_p (gsi))
3024    return false;
3025  ones = gsi_stmt (gsi);
3026
3027  gsi = gsi_start (twoh);
3028  if (!gsi_one_before_end_p (gsi))
3029    return false;
3030  twos = gsi_stmt (gsi);
3031
3032  if (!is_gimple_call (ones)
3033      || !is_gimple_call (twos)
3034      || gimple_call_lhs (ones)
3035      || gimple_call_lhs (twos)
3036      || gimple_call_chain (ones)
3037      || gimple_call_chain (twos)
3038      || !gimple_call_same_target_p (ones, twos)
3039      || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3040    return false;
3041
3042  for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3043    if (!operand_equal_p (gimple_call_arg (ones, ai),
3044                          gimple_call_arg (twos, ai), 0))
3045      return false;
3046
3047  return true;
3048}
3049
3050/* Optimize
3051    try { A() } finally { try { ~B() } catch { ~A() } }
3052    try { ... } finally { ~A() }
3053   into
3054    try { A() } catch { ~B() }
3055    try { ~B() ... } finally { ~A() }
3056
3057   This occurs frequently in C++, where A is a local variable and B is a
3058   temporary used in the initializer for A.  */
3059
3060static void
3061optimize_double_finally (gtry *one, gtry *two)
3062{
3063  gimple oneh;
3064  gimple_stmt_iterator gsi;
3065  gimple_seq cleanup;
3066
3067  cleanup = gimple_try_cleanup (one);
3068  gsi = gsi_start (cleanup);
3069  if (!gsi_one_before_end_p (gsi))
3070    return;
3071
3072  oneh = gsi_stmt (gsi);
3073  if (gimple_code (oneh) != GIMPLE_TRY
3074      || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3075    return;
3076
3077  if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3078    {
3079      gimple_seq seq = gimple_try_eval (oneh);
3080
3081      gimple_try_set_cleanup (one, seq);
3082      gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3083      seq = copy_gimple_seq_and_replace_locals (seq);
3084      gimple_seq_add_seq (&seq, gimple_try_eval (two));
3085      gimple_try_set_eval (two, seq);
3086    }
3087}
3088
3089/* Perform EH refactoring optimizations that are simpler to do when code
3090   flow has been lowered but EH structures haven't.  */
3091
3092static void
3093refactor_eh_r (gimple_seq seq)
3094{
3095  gimple_stmt_iterator gsi;
3096  gimple one, two;
3097
3098  one = NULL;
3099  two = NULL;
3100  gsi = gsi_start (seq);
3101  while (1)
3102    {
3103      one = two;
3104      if (gsi_end_p (gsi))
3105	two = NULL;
3106      else
3107	two = gsi_stmt (gsi);
3108      if (one && two)
3109	if (gtry *try_one = dyn_cast <gtry *> (one))
3110	  if (gtry *try_two = dyn_cast <gtry *> (two))
3111	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3112		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3113	      optimize_double_finally (try_one, try_two);
3114      if (one)
3115	switch (gimple_code (one))
3116	  {
3117	  case GIMPLE_TRY:
3118	    refactor_eh_r (gimple_try_eval (one));
3119	    refactor_eh_r (gimple_try_cleanup (one));
3120	    break;
3121	  case GIMPLE_CATCH:
3122	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3123	    break;
3124	  case GIMPLE_EH_FILTER:
3125	    refactor_eh_r (gimple_eh_filter_failure (one));
3126	    break;
3127	  case GIMPLE_EH_ELSE:
3128	    {
3129	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3130	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3131	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3132	    }
3133	    break;
3134	  default:
3135	    break;
3136	  }
3137      if (two)
3138	gsi_next (&gsi);
3139      else
3140	break;
3141    }
3142}
3143
3144namespace {
3145
3146const pass_data pass_data_refactor_eh =
3147{
3148  GIMPLE_PASS, /* type */
3149  "ehopt", /* name */
3150  OPTGROUP_NONE, /* optinfo_flags */
3151  TV_TREE_EH, /* tv_id */
3152  PROP_gimple_lcf, /* properties_required */
3153  0, /* properties_provided */
3154  0, /* properties_destroyed */
3155  0, /* todo_flags_start */
3156  0, /* todo_flags_finish */
3157};
3158
3159class pass_refactor_eh : public gimple_opt_pass
3160{
3161public:
3162  pass_refactor_eh (gcc::context *ctxt)
3163    : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3164  {}
3165
3166  /* opt_pass methods: */
3167  virtual bool gate (function *) { return flag_exceptions != 0; }
3168  virtual unsigned int execute (function *)
3169    {
3170      refactor_eh_r (gimple_body (current_function_decl));
3171      return 0;
3172    }
3173
3174}; // class pass_refactor_eh
3175
3176} // anon namespace
3177
3178gimple_opt_pass *
3179make_pass_refactor_eh (gcc::context *ctxt)
3180{
3181  return new pass_refactor_eh (ctxt);
3182}
3183
3184/* At the end of gimple optimization, we can lower RESX.  */
3185
3186static bool
3187lower_resx (basic_block bb, gresx *stmt,
3188	    hash_map<eh_region, tree> *mnt_map)
3189{
3190  int lp_nr;
3191  eh_region src_r, dst_r;
3192  gimple_stmt_iterator gsi;
3193  gimple x;
3194  tree fn, src_nr;
3195  bool ret = false;
3196
3197  lp_nr = lookup_stmt_eh_lp (stmt);
3198  if (lp_nr != 0)
3199    dst_r = get_eh_region_from_lp_number (lp_nr);
3200  else
3201    dst_r = NULL;
3202
3203  src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3204  gsi = gsi_last_bb (bb);
3205
3206  if (src_r == NULL)
3207    {
3208      /* We can wind up with no source region when pass_cleanup_eh shows
3209	 that there are no entries into an eh region and deletes it, but
3210	 then the block that contains the resx isn't removed.  This can
3211	 happen without optimization when the switch statement created by
3212	 lower_try_finally_switch isn't simplified to remove the eh case.
3213
3214	 Resolve this by expanding the resx node to an abort.  */
3215
3216      fn = builtin_decl_implicit (BUILT_IN_TRAP);
3217      x = gimple_build_call (fn, 0);
3218      gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3219
3220      while (EDGE_COUNT (bb->succs) > 0)
3221	remove_edge (EDGE_SUCC (bb, 0));
3222    }
3223  else if (dst_r)
3224    {
3225      /* When we have a destination region, we resolve this by copying
3226	 the excptr and filter values into place, and changing the edge
3227	 to immediately after the landing pad.  */
3228      edge e;
3229
3230      if (lp_nr < 0)
3231	{
3232	  basic_block new_bb;
3233	  tree lab;
3234
3235	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3236	     the failure decl into a new block, if needed.  */
3237	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3238
3239	  tree *slot = mnt_map->get (dst_r);
3240	  if (slot == NULL)
3241	    {
3242	      gimple_stmt_iterator gsi2;
3243
3244	      new_bb = create_empty_bb (bb);
3245	      add_bb_to_loop (new_bb, bb->loop_father);
3246	      lab = gimple_block_label (new_bb);
3247	      gsi2 = gsi_start_bb (new_bb);
3248
3249	      fn = dst_r->u.must_not_throw.failure_decl;
3250	      x = gimple_build_call (fn, 0);
3251	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3252	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3253
3254	      mnt_map->put (dst_r, lab);
3255	    }
3256	  else
3257	    {
3258	      lab = *slot;
3259	      new_bb = label_to_block (lab);
3260	    }
3261
3262	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3263	  e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3264	  e->count = bb->count;
3265	  e->probability = REG_BR_PROB_BASE;
3266	}
3267      else
3268	{
3269	  edge_iterator ei;
3270	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3271
3272	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3273	  src_nr = build_int_cst (integer_type_node, src_r->index);
3274	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3275	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3276
3277	  /* Update the flags for the outgoing edge.  */
3278	  e = single_succ_edge (bb);
3279	  gcc_assert (e->flags & EDGE_EH);
3280	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3281
3282	  /* If there are no more EH users of the landing pad, delete it.  */
3283	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3284	    if (e->flags & EDGE_EH)
3285	      break;
3286	  if (e == NULL)
3287	    {
3288	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3289	      remove_eh_landing_pad (lp);
3290	    }
3291	}
3292
3293      ret = true;
3294    }
3295  else
3296    {
3297      tree var;
3298
3299      /* When we don't have a destination region, this exception escapes
3300	 up the call chain.  We resolve this by generating a call to the
3301	 _Unwind_Resume library function.  */
3302
3303      /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3304	 with no arguments for C++ and Java.  Check for that.  */
3305      if (src_r->use_cxa_end_cleanup)
3306	{
3307	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3308	  x = gimple_build_call (fn, 0);
3309	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3310	}
3311      else
3312	{
3313	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3314	  src_nr = build_int_cst (integer_type_node, src_r->index);
3315	  x = gimple_build_call (fn, 1, src_nr);
3316	  var = create_tmp_var (ptr_type_node);
3317	  var = make_ssa_name (var, x);
3318	  gimple_call_set_lhs (x, var);
3319	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3320
3321	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3322	  x = gimple_build_call (fn, 1, var);
3323	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3324	}
3325
3326      gcc_assert (EDGE_COUNT (bb->succs) == 0);
3327    }
3328
3329  gsi_remove (&gsi, true);
3330
3331  return ret;
3332}
3333
3334namespace {
3335
3336const pass_data pass_data_lower_resx =
3337{
3338  GIMPLE_PASS, /* type */
3339  "resx", /* name */
3340  OPTGROUP_NONE, /* optinfo_flags */
3341  TV_TREE_EH, /* tv_id */
3342  PROP_gimple_lcf, /* properties_required */
3343  0, /* properties_provided */
3344  0, /* properties_destroyed */
3345  0, /* todo_flags_start */
3346  0, /* todo_flags_finish */
3347};
3348
3349class pass_lower_resx : public gimple_opt_pass
3350{
3351public:
3352  pass_lower_resx (gcc::context *ctxt)
3353    : gimple_opt_pass (pass_data_lower_resx, ctxt)
3354  {}
3355
3356  /* opt_pass methods: */
3357  virtual bool gate (function *) { return flag_exceptions != 0; }
3358  virtual unsigned int execute (function *);
3359
3360}; // class pass_lower_resx
3361
3362unsigned
3363pass_lower_resx::execute (function *fun)
3364{
3365  basic_block bb;
3366  bool dominance_invalidated = false;
3367  bool any_rewritten = false;
3368
3369  hash_map<eh_region, tree> mnt_map;
3370
3371  FOR_EACH_BB_FN (bb, fun)
3372    {
3373      gimple last = last_stmt (bb);
3374      if (last && is_gimple_resx (last))
3375	{
3376	  dominance_invalidated |=
3377	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3378	  any_rewritten = true;
3379	}
3380    }
3381
3382  if (dominance_invalidated)
3383    {
3384      free_dominance_info (CDI_DOMINATORS);
3385      free_dominance_info (CDI_POST_DOMINATORS);
3386    }
3387
3388  return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3389}
3390
3391} // anon namespace
3392
3393gimple_opt_pass *
3394make_pass_lower_resx (gcc::context *ctxt)
3395{
3396  return new pass_lower_resx (ctxt);
3397}
3398
3399/* Try to optimize var = {v} {CLOBBER} stmts followed just by
3400   external throw.  */
3401
3402static void
3403optimize_clobbers (basic_block bb)
3404{
3405  gimple_stmt_iterator gsi = gsi_last_bb (bb);
3406  bool any_clobbers = false;
3407  bool seen_stack_restore = false;
3408  edge_iterator ei;
3409  edge e;
3410
3411  /* Only optimize anything if the bb contains at least one clobber,
3412     ends with resx (checked by caller), optionally contains some
3413     debug stmts or labels, or at most one __builtin_stack_restore
3414     call, and has an incoming EH edge.  */
3415  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3416    {
3417      gimple stmt = gsi_stmt (gsi);
3418      if (is_gimple_debug (stmt))
3419	continue;
3420      if (gimple_clobber_p (stmt))
3421	{
3422	  any_clobbers = true;
3423	  continue;
3424	}
3425      if (!seen_stack_restore
3426	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3427	{
3428	  seen_stack_restore = true;
3429	  continue;
3430	}
3431      if (gimple_code (stmt) == GIMPLE_LABEL)
3432	break;
3433      return;
3434    }
3435  if (!any_clobbers)
3436    return;
3437  FOR_EACH_EDGE (e, ei, bb->preds)
3438    if (e->flags & EDGE_EH)
3439      break;
3440  if (e == NULL)
3441    return;
3442  gsi = gsi_last_bb (bb);
3443  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3444    {
3445      gimple stmt = gsi_stmt (gsi);
3446      if (!gimple_clobber_p (stmt))
3447	continue;
3448      unlink_stmt_vdef (stmt);
3449      gsi_remove (&gsi, true);
3450      release_defs (stmt);
3451    }
3452}
3453
3454/* Try to sink var = {v} {CLOBBER} stmts followed just by
3455   internal throw to successor BB.  */
3456
3457static int
3458sink_clobbers (basic_block bb)
3459{
3460  edge e;
3461  edge_iterator ei;
3462  gimple_stmt_iterator gsi, dgsi;
3463  basic_block succbb;
3464  bool any_clobbers = false;
3465  unsigned todo = 0;
3466
3467  /* Only optimize if BB has a single EH successor and
3468     all predecessor edges are EH too.  */
3469  if (!single_succ_p (bb)
3470      || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3471    return 0;
3472
3473  FOR_EACH_EDGE (e, ei, bb->preds)
3474    {
3475      if ((e->flags & EDGE_EH) == 0)
3476	return 0;
3477    }
3478
3479  /* And BB contains only CLOBBER stmts before the final
3480     RESX.  */
3481  gsi = gsi_last_bb (bb);
3482  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3483    {
3484      gimple stmt = gsi_stmt (gsi);
3485      if (is_gimple_debug (stmt))
3486	continue;
3487      if (gimple_code (stmt) == GIMPLE_LABEL)
3488	break;
3489      if (!gimple_clobber_p (stmt))
3490	return 0;
3491      any_clobbers = true;
3492    }
3493  if (!any_clobbers)
3494    return 0;
3495
3496  edge succe = single_succ_edge (bb);
3497  succbb = succe->dest;
3498
3499  /* See if there is a virtual PHI node to take an updated virtual
3500     operand from.  */
3501  gphi *vphi = NULL;
3502  tree vuse = NULL_TREE;
3503  for (gphi_iterator gpi = gsi_start_phis (succbb);
3504       !gsi_end_p (gpi); gsi_next (&gpi))
3505    {
3506      tree res = gimple_phi_result (gpi.phi ());
3507      if (virtual_operand_p (res))
3508	{
3509	  vphi = gpi.phi ();
3510	  vuse = res;
3511	  break;
3512	}
3513    }
3514
3515  dgsi = gsi_after_labels (succbb);
3516  gsi = gsi_last_bb (bb);
3517  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3518    {
3519      gimple stmt = gsi_stmt (gsi);
3520      tree lhs;
3521      if (is_gimple_debug (stmt))
3522	continue;
3523      if (gimple_code (stmt) == GIMPLE_LABEL)
3524	break;
3525      lhs = gimple_assign_lhs (stmt);
3526      /* Unfortunately we don't have dominance info updated at this
3527	 point, so checking if
3528	 dominated_by_p (CDI_DOMINATORS, succbb,
3529			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3530	 would be too costly.  Thus, avoid sinking any clobbers that
3531	 refer to non-(D) SSA_NAMEs.  */
3532      if (TREE_CODE (lhs) == MEM_REF
3533	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3534	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3535	{
3536	  unlink_stmt_vdef (stmt);
3537	  gsi_remove (&gsi, true);
3538	  release_defs (stmt);
3539	  continue;
3540	}
3541
3542      /* As we do not change stmt order when sinking across a
3543         forwarder edge we can keep virtual operands in place.  */
3544      gsi_remove (&gsi, false);
3545      gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3546
3547      /* But adjust virtual operands if we sunk across a PHI node.  */
3548      if (vuse)
3549	{
3550	  gimple use_stmt;
3551	  imm_use_iterator iter;
3552	  use_operand_p use_p;
3553	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3554	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3555	      SET_USE (use_p, gimple_vdef (stmt));
3556	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3557	    {
3558	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3559	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3560	    }
3561	  /* Adjust the incoming virtual operand.  */
3562	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3563	  SET_USE (gimple_vuse_op (stmt), vuse);
3564	}
3565      /* If there isn't a single predecessor but no virtual PHI node
3566         arrange for virtual operands to be renamed.  */
3567      else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3568	       && !single_pred_p (succbb))
3569	{
3570	  /* In this case there will be no use of the VDEF of this stmt.
3571	     ???  Unless this is a secondary opportunity and we have not
3572	     removed unreachable blocks yet, so we cannot assert this.
3573	     Which also means we will end up renaming too many times.  */
3574	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3575	  mark_virtual_operands_for_renaming (cfun);
3576	  todo |= TODO_update_ssa_only_virtuals;
3577	}
3578    }
3579
3580  return todo;
3581}
3582
3583/* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3584   we have found some duplicate labels and removed some edges.  */
3585
3586static bool
3587lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3588{
3589  gimple_stmt_iterator gsi;
3590  int region_nr;
3591  eh_region r;
3592  tree filter, fn;
3593  gimple x;
3594  bool redirected = false;
3595
3596  region_nr = gimple_eh_dispatch_region (stmt);
3597  r = get_eh_region_from_number (region_nr);
3598
3599  gsi = gsi_last_bb (src);
3600
3601  switch (r->type)
3602    {
3603    case ERT_TRY:
3604      {
3605	auto_vec<tree> labels;
3606	tree default_label = NULL;
3607	eh_catch c;
3608	edge_iterator ei;
3609	edge e;
3610	hash_set<tree> seen_values;
3611
3612	/* Collect the labels for a switch.  Zero the post_landing_pad
3613	   field becase we'll no longer have anything keeping these labels
3614	   in existence and the optimizer will be free to merge these
3615	   blocks at will.  */
3616	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3617	  {
3618	    tree tp_node, flt_node, lab = c->label;
3619	    bool have_label = false;
3620
3621	    c->label = NULL;
3622	    tp_node = c->type_list;
3623	    flt_node = c->filter_list;
3624
3625	    if (tp_node == NULL)
3626	      {
3627	        default_label = lab;
3628		break;
3629	      }
3630	    do
3631	      {
3632		/* Filter out duplicate labels that arise when this handler
3633		   is shadowed by an earlier one.  When no labels are
3634		   attached to the handler anymore, we remove
3635		   the corresponding edge and then we delete unreachable
3636		   blocks at the end of this pass.  */
3637		if (! seen_values.contains (TREE_VALUE (flt_node)))
3638		  {
3639		    tree t = build_case_label (TREE_VALUE (flt_node),
3640					       NULL, lab);
3641		    labels.safe_push (t);
3642		    seen_values.add (TREE_VALUE (flt_node));
3643		    have_label = true;
3644		  }
3645
3646		tp_node = TREE_CHAIN (tp_node);
3647		flt_node = TREE_CHAIN (flt_node);
3648	      }
3649	    while (tp_node);
3650	    if (! have_label)
3651	      {
3652	        remove_edge (find_edge (src, label_to_block (lab)));
3653	        redirected = true;
3654	      }
3655	  }
3656
3657	/* Clean up the edge flags.  */
3658	FOR_EACH_EDGE (e, ei, src->succs)
3659	  {
3660	    if (e->flags & EDGE_FALLTHRU)
3661	      {
3662		/* If there was no catch-all, use the fallthru edge.  */
3663		if (default_label == NULL)
3664		  default_label = gimple_block_label (e->dest);
3665		e->flags &= ~EDGE_FALLTHRU;
3666	      }
3667	  }
3668	gcc_assert (default_label != NULL);
3669
3670	/* Don't generate a switch if there's only a default case.
3671	   This is common in the form of try { A; } catch (...) { B; }.  */
3672	if (!labels.exists ())
3673	  {
3674	    e = single_succ_edge (src);
3675	    e->flags |= EDGE_FALLTHRU;
3676	  }
3677	else
3678	  {
3679	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3680	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3681							 region_nr));
3682	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3683	    filter = make_ssa_name (filter, x);
3684	    gimple_call_set_lhs (x, filter);
3685	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3686
3687	    /* Turn the default label into a default case.  */
3688	    default_label = build_case_label (NULL, NULL, default_label);
3689	    sort_case_labels (labels);
3690
3691	    x = gimple_build_switch (filter, default_label, labels);
3692	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3693	  }
3694      }
3695      break;
3696
3697    case ERT_ALLOWED_EXCEPTIONS:
3698      {
3699	edge b_e = BRANCH_EDGE (src);
3700	edge f_e = FALLTHRU_EDGE (src);
3701
3702	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3703	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3704						     region_nr));
3705	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3706	filter = make_ssa_name (filter, x);
3707	gimple_call_set_lhs (x, filter);
3708	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3709
3710	r->u.allowed.label = NULL;
3711	x = gimple_build_cond (EQ_EXPR, filter,
3712			       build_int_cst (TREE_TYPE (filter),
3713					      r->u.allowed.filter),
3714			       NULL_TREE, NULL_TREE);
3715	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3716
3717	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3718        f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3719      }
3720      break;
3721
3722    default:
3723      gcc_unreachable ();
3724    }
3725
3726  /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3727  gsi_remove (&gsi, true);
3728  return redirected;
3729}
3730
3731namespace {
3732
3733const pass_data pass_data_lower_eh_dispatch =
3734{
3735  GIMPLE_PASS, /* type */
3736  "ehdisp", /* name */
3737  OPTGROUP_NONE, /* optinfo_flags */
3738  TV_TREE_EH, /* tv_id */
3739  PROP_gimple_lcf, /* properties_required */
3740  0, /* properties_provided */
3741  0, /* properties_destroyed */
3742  0, /* todo_flags_start */
3743  0, /* todo_flags_finish */
3744};
3745
3746class pass_lower_eh_dispatch : public gimple_opt_pass
3747{
3748public:
3749  pass_lower_eh_dispatch (gcc::context *ctxt)
3750    : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3751  {}
3752
3753  /* opt_pass methods: */
3754  virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3755  virtual unsigned int execute (function *);
3756
3757}; // class pass_lower_eh_dispatch
3758
3759unsigned
3760pass_lower_eh_dispatch::execute (function *fun)
3761{
3762  basic_block bb;
3763  int flags = 0;
3764  bool redirected = false;
3765
3766  assign_filter_values ();
3767
3768  FOR_EACH_BB_FN (bb, fun)
3769    {
3770      gimple last = last_stmt (bb);
3771      if (last == NULL)
3772	continue;
3773      if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3774	{
3775	  redirected |= lower_eh_dispatch (bb,
3776					   as_a <geh_dispatch *> (last));
3777	  flags |= TODO_update_ssa_only_virtuals;
3778	}
3779      else if (gimple_code (last) == GIMPLE_RESX)
3780	{
3781	  if (stmt_can_throw_external (last))
3782	    optimize_clobbers (bb);
3783	  else
3784	    flags |= sink_clobbers (bb);
3785	}
3786    }
3787
3788  if (redirected)
3789    delete_unreachable_blocks ();
3790  return flags;
3791}
3792
3793} // anon namespace
3794
3795gimple_opt_pass *
3796make_pass_lower_eh_dispatch (gcc::context *ctxt)
3797{
3798  return new pass_lower_eh_dispatch (ctxt);
3799}
3800
3801/* Walk statements, see what regions and, optionally, landing pads
3802   are really referenced.
3803
3804   Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3805   and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3806
3807   Passing NULL for LP_REACHABLE is valid, in this case only reachable
3808   regions are marked.
3809
3810   The caller is responsible for freeing the returned sbitmaps.  */
3811
3812static void
3813mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3814{
3815  sbitmap r_reachable, lp_reachable;
3816  basic_block bb;
3817  bool mark_landing_pads = (lp_reachablep != NULL);
3818  gcc_checking_assert (r_reachablep != NULL);
3819
3820  r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3821  bitmap_clear (r_reachable);
3822  *r_reachablep = r_reachable;
3823
3824  if (mark_landing_pads)
3825    {
3826      lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3827      bitmap_clear (lp_reachable);
3828      *lp_reachablep = lp_reachable;
3829    }
3830  else
3831    lp_reachable = NULL;
3832
3833  FOR_EACH_BB_FN (bb, cfun)
3834    {
3835      gimple_stmt_iterator gsi;
3836
3837      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3838	{
3839	  gimple stmt = gsi_stmt (gsi);
3840
3841	  if (mark_landing_pads)
3842	    {
3843	      int lp_nr = lookup_stmt_eh_lp (stmt);
3844
3845	      /* Negative LP numbers are MUST_NOT_THROW regions which
3846		 are not considered BB enders.  */
3847	      if (lp_nr < 0)
3848		bitmap_set_bit (r_reachable, -lp_nr);
3849
3850	      /* Positive LP numbers are real landing pads, and BB enders.  */
3851	      else if (lp_nr > 0)
3852		{
3853		  gcc_assert (gsi_one_before_end_p (gsi));
3854		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3855		  bitmap_set_bit (r_reachable, region->index);
3856		  bitmap_set_bit (lp_reachable, lp_nr);
3857		}
3858	    }
3859
3860	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3861	  switch (gimple_code (stmt))
3862	    {
3863	    case GIMPLE_RESX:
3864	      bitmap_set_bit (r_reachable,
3865			      gimple_resx_region (as_a <gresx *> (stmt)));
3866	      break;
3867	    case GIMPLE_EH_DISPATCH:
3868	      bitmap_set_bit (r_reachable,
3869			      gimple_eh_dispatch_region (
3870                                as_a <geh_dispatch *> (stmt)));
3871	      break;
3872	    case GIMPLE_CALL:
3873	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3874		for (int i = 0; i < 2; ++i)
3875		  {
3876		    tree rt = gimple_call_arg (stmt, i);
3877		    HOST_WIDE_INT ri = tree_to_shwi (rt);
3878
3879		    gcc_assert (ri = (int)ri);
3880		    bitmap_set_bit (r_reachable, ri);
3881		  }
3882	      break;
3883	    default:
3884	      break;
3885	    }
3886	}
3887    }
3888}
3889
3890/* Remove unreachable handlers and unreachable landing pads.  */
3891
3892static void
3893remove_unreachable_handlers (void)
3894{
3895  sbitmap r_reachable, lp_reachable;
3896  eh_region region;
3897  eh_landing_pad lp;
3898  unsigned i;
3899
3900  mark_reachable_handlers (&r_reachable, &lp_reachable);
3901
3902  if (dump_file)
3903    {
3904      fprintf (dump_file, "Before removal of unreachable regions:\n");
3905      dump_eh_tree (dump_file, cfun);
3906      fprintf (dump_file, "Reachable regions: ");
3907      dump_bitmap_file (dump_file, r_reachable);
3908      fprintf (dump_file, "Reachable landing pads: ");
3909      dump_bitmap_file (dump_file, lp_reachable);
3910    }
3911
3912  if (dump_file)
3913    {
3914      FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3915	if (region && !bitmap_bit_p (r_reachable, region->index))
3916	  fprintf (dump_file,
3917		   "Removing unreachable region %d\n",
3918		   region->index);
3919    }
3920
3921  remove_unreachable_eh_regions (r_reachable);
3922
3923  FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3924    if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3925      {
3926	if (dump_file)
3927	  fprintf (dump_file,
3928		   "Removing unreachable landing pad %d\n",
3929		   lp->index);
3930	remove_eh_landing_pad (lp);
3931      }
3932
3933  if (dump_file)
3934    {
3935      fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3936      dump_eh_tree (dump_file, cfun);
3937      fprintf (dump_file, "\n\n");
3938    }
3939
3940  sbitmap_free (r_reachable);
3941  sbitmap_free (lp_reachable);
3942
3943#ifdef ENABLE_CHECKING
3944  verify_eh_tree (cfun);
3945#endif
3946}
3947
3948/* Remove unreachable handlers if any landing pads have been removed after
3949   last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
3950
3951void
3952maybe_remove_unreachable_handlers (void)
3953{
3954  eh_landing_pad lp;
3955  unsigned i;
3956
3957  if (cfun->eh == NULL)
3958    return;
3959
3960  FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3961    if (lp && lp->post_landing_pad)
3962      {
3963	if (label_to_block (lp->post_landing_pad) == NULL)
3964	  {
3965	    remove_unreachable_handlers ();
3966	    return;
3967	  }
3968      }
3969}
3970
3971/* Remove regions that do not have landing pads.  This assumes
3972   that remove_unreachable_handlers has already been run, and
3973   that we've just manipulated the landing pads since then.
3974
3975   Preserve regions with landing pads and regions that prevent
3976   exceptions from propagating further, even if these regions
3977   are not reachable.  */
3978
3979static void
3980remove_unreachable_handlers_no_lp (void)
3981{
3982  eh_region region;
3983  sbitmap r_reachable;
3984  unsigned i;
3985
3986  mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3987
3988  FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3989    {
3990      if (! region)
3991	continue;
3992
3993      if (region->landing_pads != NULL
3994	  || region->type == ERT_MUST_NOT_THROW)
3995	bitmap_set_bit (r_reachable, region->index);
3996
3997      if (dump_file
3998	  && !bitmap_bit_p (r_reachable, region->index))
3999	fprintf (dump_file,
4000		 "Removing unreachable region %d\n",
4001		 region->index);
4002    }
4003
4004  remove_unreachable_eh_regions (r_reachable);
4005
4006  sbitmap_free (r_reachable);
4007}
4008
4009/* Undo critical edge splitting on an EH landing pad.  Earlier, we
4010   optimisticaly split all sorts of edges, including EH edges.  The
4011   optimization passes in between may not have needed them; if not,
4012   we should undo the split.
4013
4014   Recognize this case by having one EH edge incoming to the BB and
4015   one normal edge outgoing; BB should be empty apart from the
4016   post_landing_pad label.
4017
4018   Note that this is slightly different from the empty handler case
4019   handled by cleanup_empty_eh, in that the actual handler may yet
4020   have actual code but the landing pad has been separated from the
4021   handler.  As such, cleanup_empty_eh relies on this transformation
4022   having been done first.  */
4023
4024static bool
4025unsplit_eh (eh_landing_pad lp)
4026{
4027  basic_block bb = label_to_block (lp->post_landing_pad);
4028  gimple_stmt_iterator gsi;
4029  edge e_in, e_out;
4030
4031  /* Quickly check the edge counts on BB for singularity.  */
4032  if (!single_pred_p (bb) || !single_succ_p (bb))
4033    return false;
4034  e_in = single_pred_edge (bb);
4035  e_out = single_succ_edge (bb);
4036
4037  /* Input edge must be EH and output edge must be normal.  */
4038  if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4039    return false;
4040
4041  /* The block must be empty except for the labels and debug insns.  */
4042  gsi = gsi_after_labels (bb);
4043  if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4044    gsi_next_nondebug (&gsi);
4045  if (!gsi_end_p (gsi))
4046    return false;
4047
4048  /* The destination block must not already have a landing pad
4049     for a different region.  */
4050  for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4051    {
4052      glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4053      tree lab;
4054      int lp_nr;
4055
4056      if (!label_stmt)
4057	break;
4058      lab = gimple_label_label (label_stmt);
4059      lp_nr = EH_LANDING_PAD_NR (lab);
4060      if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4061	return false;
4062    }
4063
4064  /* The new destination block must not already be a destination of
4065     the source block, lest we merge fallthru and eh edges and get
4066     all sorts of confused.  */
4067  if (find_edge (e_in->src, e_out->dest))
4068    return false;
4069
4070  /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4071     thought this should have been cleaned up by a phicprop pass, but
4072     that doesn't appear to handle virtuals.  Propagate by hand.  */
4073  if (!gimple_seq_empty_p (phi_nodes (bb)))
4074    {
4075      for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4076	{
4077	  gimple use_stmt;
4078	  gphi *phi = gpi.phi ();
4079	  tree lhs = gimple_phi_result (phi);
4080	  tree rhs = gimple_phi_arg_def (phi, 0);
4081	  use_operand_p use_p;
4082	  imm_use_iterator iter;
4083
4084	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4085	    {
4086	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4087		SET_USE (use_p, rhs);
4088	    }
4089
4090	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4091	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4092
4093	  remove_phi_node (&gpi, true);
4094	}
4095    }
4096
4097  if (dump_file && (dump_flags & TDF_DETAILS))
4098    fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4099	     lp->index, e_out->dest->index);
4100
4101  /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4102     a successor edge, humor it.  But do the real CFG change with the
4103     predecessor of E_OUT in order to preserve the ordering of arguments
4104     to the PHI nodes in E_OUT->DEST.  */
4105  redirect_eh_edge_1 (e_in, e_out->dest, false);
4106  redirect_edge_pred (e_out, e_in->src);
4107  e_out->flags = e_in->flags;
4108  e_out->probability = e_in->probability;
4109  e_out->count = e_in->count;
4110  remove_edge (e_in);
4111
4112  return true;
4113}
4114
4115/* Examine each landing pad block and see if it matches unsplit_eh.  */
4116
4117static bool
4118unsplit_all_eh (void)
4119{
4120  bool changed = false;
4121  eh_landing_pad lp;
4122  int i;
4123
4124  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4125    if (lp)
4126      changed |= unsplit_eh (lp);
4127
4128  return changed;
4129}
4130
4131/* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4132   to OLD_BB to NEW_BB; return true on success, false on failure.
4133
4134   OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4135   PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4136   Virtual PHIs may be deleted and marked for renaming.  */
4137
4138static bool
4139cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4140			     edge old_bb_out, bool change_region)
4141{
4142  gphi_iterator ngsi, ogsi;
4143  edge_iterator ei;
4144  edge e;
4145  bitmap ophi_handled;
4146
4147  /* The destination block must not be a regular successor for any
4148     of the preds of the landing pad.  Thus, avoid turning
4149        <..>
4150	 |  \ EH
4151	 |  <..>
4152	 |  /
4153	<..>
4154     into
4155        <..>
4156	|  | EH
4157	<..>
4158     which CFG verification would choke on.  See PR45172 and PR51089.  */
4159  FOR_EACH_EDGE (e, ei, old_bb->preds)
4160    if (find_edge (e->src, new_bb))
4161      return false;
4162
4163  FOR_EACH_EDGE (e, ei, old_bb->preds)
4164    redirect_edge_var_map_clear (e);
4165
4166  ophi_handled = BITMAP_ALLOC (NULL);
4167
4168  /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4169     for the edges we're going to move.  */
4170  for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4171    {
4172      gphi *ophi, *nphi = ngsi.phi ();
4173      tree nresult, nop;
4174
4175      nresult = gimple_phi_result (nphi);
4176      nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4177
4178      /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4179	 the source ssa_name.  */
4180      ophi = NULL;
4181      for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4182	{
4183	  ophi = ogsi.phi ();
4184	  if (gimple_phi_result (ophi) == nop)
4185	    break;
4186	  ophi = NULL;
4187	}
4188
4189      /* If we did find the corresponding PHI, copy those inputs.  */
4190      if (ophi)
4191	{
4192	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4193	  if (!has_single_use (nop))
4194	    {
4195	      imm_use_iterator imm_iter;
4196	      use_operand_p use_p;
4197
4198	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4199		{
4200		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4201		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4202			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4203		    goto fail;
4204		}
4205	    }
4206	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4207	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4208	    {
4209	      location_t oloc;
4210	      tree oop;
4211
4212	      if ((e->flags & EDGE_EH) == 0)
4213		continue;
4214	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4215	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4216	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4217	    }
4218	}
4219      /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4220	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4221	 variable is unchanged from input to the block and we can simply
4222	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4223      else
4224	{
4225	  location_t nloc
4226	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4227	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4228	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4229	}
4230    }
4231
4232  /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4233     we don't know what values from the other edges into NEW_BB to use.  */
4234  for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4235    {
4236      gphi *ophi = ogsi.phi ();
4237      tree oresult = gimple_phi_result (ophi);
4238      if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4239	goto fail;
4240    }
4241
4242  /* Finally, move the edges and update the PHIs.  */
4243  for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4244    if (e->flags & EDGE_EH)
4245      {
4246	/* ???  CFG manipluation routines do not try to update loop
4247	   form on edge redirection.  Do so manually here for now.  */
4248	/* If we redirect a loop entry or latch edge that will either create
4249	   a multiple entry loop or rotate the loop.  If the loops merge
4250	   we may have created a loop with multiple latches.
4251	   All of this isn't easily fixed thus cancel the affected loop
4252	   and mark the other loop as possibly having multiple latches.  */
4253	if (e->dest == e->dest->loop_father->header)
4254	  {
4255	    mark_loop_for_removal (e->dest->loop_father);
4256	    new_bb->loop_father->latch = NULL;
4257	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4258	  }
4259	redirect_eh_edge_1 (e, new_bb, change_region);
4260	redirect_edge_succ (e, new_bb);
4261	flush_pending_stmts (e);
4262      }
4263    else
4264      ei_next (&ei);
4265
4266  BITMAP_FREE (ophi_handled);
4267  return true;
4268
4269 fail:
4270  FOR_EACH_EDGE (e, ei, old_bb->preds)
4271    redirect_edge_var_map_clear (e);
4272  BITMAP_FREE (ophi_handled);
4273  return false;
4274}
4275
4276/* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4277   old region to NEW_REGION at BB.  */
4278
4279static void
4280cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4281			  eh_landing_pad lp, eh_region new_region)
4282{
4283  gimple_stmt_iterator gsi;
4284  eh_landing_pad *pp;
4285
4286  for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4287    continue;
4288  *pp = lp->next_lp;
4289
4290  lp->region = new_region;
4291  lp->next_lp = new_region->landing_pads;
4292  new_region->landing_pads = lp;
4293
4294  /* Delete the RESX that was matched within the empty handler block.  */
4295  gsi = gsi_last_bb (bb);
4296  unlink_stmt_vdef (gsi_stmt (gsi));
4297  gsi_remove (&gsi, true);
4298
4299  /* Clean up E_OUT for the fallthru.  */
4300  e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4301  e_out->probability = REG_BR_PROB_BASE;
4302}
4303
4304/* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4305   unsplitting than unsplit_eh was prepared to handle, e.g. when
4306   multiple incoming edges and phis are involved.  */
4307
4308static bool
4309cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4310{
4311  gimple_stmt_iterator gsi;
4312  tree lab;
4313
4314  /* We really ought not have totally lost everything following
4315     a landing pad label.  Given that BB is empty, there had better
4316     be a successor.  */
4317  gcc_assert (e_out != NULL);
4318
4319  /* The destination block must not already have a landing pad
4320     for a different region.  */
4321  lab = NULL;
4322  for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4323    {
4324      glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4325      int lp_nr;
4326
4327      if (!stmt)
4328	break;
4329      lab = gimple_label_label (stmt);
4330      lp_nr = EH_LANDING_PAD_NR (lab);
4331      if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4332	return false;
4333    }
4334
4335  /* Attempt to move the PHIs into the successor block.  */
4336  if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4337    {
4338      if (dump_file && (dump_flags & TDF_DETAILS))
4339	fprintf (dump_file,
4340		 "Unsplit EH landing pad %d to block %i "
4341		 "(via cleanup_empty_eh).\n",
4342		 lp->index, e_out->dest->index);
4343      return true;
4344    }
4345
4346  return false;
4347}
4348
4349/* Return true if edge E_FIRST is part of an empty infinite loop
4350   or leads to such a loop through a series of single successor
4351   empty bbs.  */
4352
4353static bool
4354infinite_empty_loop_p (edge e_first)
4355{
4356  bool inf_loop = false;
4357  edge e;
4358
4359  if (e_first->dest == e_first->src)
4360    return true;
4361
4362  e_first->src->aux = (void *) 1;
4363  for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4364    {
4365      gimple_stmt_iterator gsi;
4366      if (e->dest->aux)
4367	{
4368	  inf_loop = true;
4369	  break;
4370	}
4371      e->dest->aux = (void *) 1;
4372      gsi = gsi_after_labels (e->dest);
4373      if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4374	gsi_next_nondebug (&gsi);
4375      if (!gsi_end_p (gsi))
4376	break;
4377    }
4378  e_first->src->aux = NULL;
4379  for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4380    e->dest->aux = NULL;
4381
4382  return inf_loop;
4383}
4384
4385/* Examine the block associated with LP to determine if it's an empty
4386   handler for its EH region.  If so, attempt to redirect EH edges to
4387   an outer region.  Return true the CFG was updated in any way.  This
4388   is similar to jump forwarding, just across EH edges.  */
4389
4390static bool
4391cleanup_empty_eh (eh_landing_pad lp)
4392{
4393  basic_block bb = label_to_block (lp->post_landing_pad);
4394  gimple_stmt_iterator gsi;
4395  gimple resx;
4396  eh_region new_region;
4397  edge_iterator ei;
4398  edge e, e_out;
4399  bool has_non_eh_pred;
4400  bool ret = false;
4401  int new_lp_nr;
4402
4403  /* There can be zero or one edges out of BB.  This is the quickest test.  */
4404  switch (EDGE_COUNT (bb->succs))
4405    {
4406    case 0:
4407      e_out = NULL;
4408      break;
4409    case 1:
4410      e_out = single_succ_edge (bb);
4411      break;
4412    default:
4413      return false;
4414    }
4415
4416  resx = last_stmt (bb);
4417  if (resx && is_gimple_resx (resx))
4418    {
4419      if (stmt_can_throw_external (resx))
4420	optimize_clobbers (bb);
4421      else if (sink_clobbers (bb))
4422	ret = true;
4423    }
4424
4425  gsi = gsi_after_labels (bb);
4426
4427  /* Make sure to skip debug statements.  */
4428  if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4429    gsi_next_nondebug (&gsi);
4430
4431  /* If the block is totally empty, look for more unsplitting cases.  */
4432  if (gsi_end_p (gsi))
4433    {
4434      /* For the degenerate case of an infinite loop bail out.
4435	 If bb has no successors and is totally empty, which can happen e.g.
4436	 because of incorrect noreturn attribute, bail out too.  */
4437      if (e_out == NULL
4438	  || infinite_empty_loop_p (e_out))
4439	return ret;
4440
4441      return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4442    }
4443
4444  /* The block should consist only of a single RESX statement, modulo a
4445     preceding call to __builtin_stack_restore if there is no outgoing
4446     edge, since the call can be eliminated in this case.  */
4447  resx = gsi_stmt (gsi);
4448  if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4449    {
4450      gsi_next (&gsi);
4451      resx = gsi_stmt (gsi);
4452    }
4453  if (!is_gimple_resx (resx))
4454    return ret;
4455  gcc_assert (gsi_one_before_end_p (gsi));
4456
4457  /* Determine if there are non-EH edges, or resx edges into the handler.  */
4458  has_non_eh_pred = false;
4459  FOR_EACH_EDGE (e, ei, bb->preds)
4460    if (!(e->flags & EDGE_EH))
4461      has_non_eh_pred = true;
4462
4463  /* Find the handler that's outer of the empty handler by looking at
4464     where the RESX instruction was vectored.  */
4465  new_lp_nr = lookup_stmt_eh_lp (resx);
4466  new_region = get_eh_region_from_lp_number (new_lp_nr);
4467
4468  /* If there's no destination region within the current function,
4469     redirection is trivial via removing the throwing statements from
4470     the EH region, removing the EH edges, and allowing the block
4471     to go unreachable.  */
4472  if (new_region == NULL)
4473    {
4474      gcc_assert (e_out == NULL);
4475      for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4476	if (e->flags & EDGE_EH)
4477	  {
4478	    gimple stmt = last_stmt (e->src);
4479	    remove_stmt_from_eh_lp (stmt);
4480	    remove_edge (e);
4481	  }
4482	else
4483	  ei_next (&ei);
4484      goto succeed;
4485    }
4486
4487  /* If the destination region is a MUST_NOT_THROW, allow the runtime
4488     to handle the abort and allow the blocks to go unreachable.  */
4489  if (new_region->type == ERT_MUST_NOT_THROW)
4490    {
4491      for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4492	if (e->flags & EDGE_EH)
4493	  {
4494	    gimple stmt = last_stmt (e->src);
4495	    remove_stmt_from_eh_lp (stmt);
4496	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4497	    remove_edge (e);
4498	  }
4499	else
4500	  ei_next (&ei);
4501      goto succeed;
4502    }
4503
4504  /* Try to redirect the EH edges and merge the PHIs into the destination
4505     landing pad block.  If the merge succeeds, we'll already have redirected
4506     all the EH edges.  The handler itself will go unreachable if there were
4507     no normal edges.  */
4508  if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4509    goto succeed;
4510
4511  /* Finally, if all input edges are EH edges, then we can (potentially)
4512     reduce the number of transfers from the runtime by moving the landing
4513     pad from the original region to the new region.  This is a win when
4514     we remove the last CLEANUP region along a particular exception
4515     propagation path.  Since nothing changes except for the region with
4516     which the landing pad is associated, the PHI nodes do not need to be
4517     adjusted at all.  */
4518  if (!has_non_eh_pred)
4519    {
4520      cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4521      if (dump_file && (dump_flags & TDF_DETAILS))
4522	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4523		 lp->index, new_region->index);
4524
4525      /* ??? The CFG didn't change, but we may have rendered the
4526	 old EH region unreachable.  Trigger a cleanup there.  */
4527      return true;
4528    }
4529
4530  return ret;
4531
4532 succeed:
4533  if (dump_file && (dump_flags & TDF_DETAILS))
4534    fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4535  remove_eh_landing_pad (lp);
4536  return true;
4537}
4538
4539/* Do a post-order traversal of the EH region tree.  Examine each
4540   post_landing_pad block and see if we can eliminate it as empty.  */
4541
4542static bool
4543cleanup_all_empty_eh (void)
4544{
4545  bool changed = false;
4546  eh_landing_pad lp;
4547  int i;
4548
4549  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4550    if (lp)
4551      changed |= cleanup_empty_eh (lp);
4552
4553  return changed;
4554}
4555
4556/* Perform cleanups and lowering of exception handling
4557    1) cleanups regions with handlers doing nothing are optimized out
4558    2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4559    3) Info about regions that are containing instructions, and regions
4560       reachable via local EH edges is collected
4561    4) Eh tree is pruned for regions no longer necessary.
4562
4563   TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4564	 Unify those that have the same failure decl and locus.
4565*/
4566
4567static unsigned int
4568execute_cleanup_eh_1 (void)
4569{
4570  /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4571     looking up unreachable landing pads.  */
4572  remove_unreachable_handlers ();
4573
4574  /* Watch out for the region tree vanishing due to all unreachable.  */
4575  if (cfun->eh->region_tree)
4576    {
4577      bool changed = false;
4578
4579      if (optimize)
4580	changed |= unsplit_all_eh ();
4581      changed |= cleanup_all_empty_eh ();
4582
4583      if (changed)
4584	{
4585	  free_dominance_info (CDI_DOMINATORS);
4586	  free_dominance_info (CDI_POST_DOMINATORS);
4587
4588          /* We delayed all basic block deletion, as we may have performed
4589	     cleanups on EH edges while non-EH edges were still present.  */
4590	  delete_unreachable_blocks ();
4591
4592	  /* We manipulated the landing pads.  Remove any region that no
4593	     longer has a landing pad.  */
4594	  remove_unreachable_handlers_no_lp ();
4595
4596	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4597	}
4598    }
4599
4600  return 0;
4601}
4602
4603namespace {
4604
4605const pass_data pass_data_cleanup_eh =
4606{
4607  GIMPLE_PASS, /* type */
4608  "ehcleanup", /* name */
4609  OPTGROUP_NONE, /* optinfo_flags */
4610  TV_TREE_EH, /* tv_id */
4611  PROP_gimple_lcf, /* properties_required */
4612  0, /* properties_provided */
4613  0, /* properties_destroyed */
4614  0, /* todo_flags_start */
4615  0, /* todo_flags_finish */
4616};
4617
4618class pass_cleanup_eh : public gimple_opt_pass
4619{
4620public:
4621  pass_cleanup_eh (gcc::context *ctxt)
4622    : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4623  {}
4624
4625  /* opt_pass methods: */
4626  opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4627  virtual bool gate (function *fun)
4628    {
4629      return fun->eh != NULL && fun->eh->region_tree != NULL;
4630    }
4631
4632  virtual unsigned int execute (function *);
4633
4634}; // class pass_cleanup_eh
4635
4636unsigned int
4637pass_cleanup_eh::execute (function *fun)
4638{
4639  int ret = execute_cleanup_eh_1 ();
4640
4641  /* If the function no longer needs an EH personality routine
4642     clear it.  This exposes cross-language inlining opportunities
4643     and avoids references to a never defined personality routine.  */
4644  if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4645      && function_needs_eh_personality (fun) != eh_personality_lang)
4646    DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4647
4648  return ret;
4649}
4650
4651} // anon namespace
4652
4653gimple_opt_pass *
4654make_pass_cleanup_eh (gcc::context *ctxt)
4655{
4656  return new pass_cleanup_eh (ctxt);
4657}
4658
4659/* Verify that BB containing STMT as the last statement, has precisely the
4660   edge that make_eh_edges would create.  */
4661
4662DEBUG_FUNCTION bool
4663verify_eh_edges (gimple stmt)
4664{
4665  basic_block bb = gimple_bb (stmt);
4666  eh_landing_pad lp = NULL;
4667  int lp_nr;
4668  edge_iterator ei;
4669  edge e, eh_edge;
4670
4671  lp_nr = lookup_stmt_eh_lp (stmt);
4672  if (lp_nr > 0)
4673    lp = get_eh_landing_pad_from_number (lp_nr);
4674
4675  eh_edge = NULL;
4676  FOR_EACH_EDGE (e, ei, bb->succs)
4677    {
4678      if (e->flags & EDGE_EH)
4679	{
4680	  if (eh_edge)
4681	    {
4682	      error ("BB %i has multiple EH edges", bb->index);
4683	      return true;
4684	    }
4685	  else
4686	    eh_edge = e;
4687	}
4688    }
4689
4690  if (lp == NULL)
4691    {
4692      if (eh_edge)
4693	{
4694	  error ("BB %i can not throw but has an EH edge", bb->index);
4695	  return true;
4696	}
4697      return false;
4698    }
4699
4700  if (!stmt_could_throw_p (stmt))
4701    {
4702      error ("BB %i last statement has incorrectly set lp", bb->index);
4703      return true;
4704    }
4705
4706  if (eh_edge == NULL)
4707    {
4708      error ("BB %i is missing an EH edge", bb->index);
4709      return true;
4710    }
4711
4712  if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4713    {
4714      error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4715      return true;
4716    }
4717
4718  return false;
4719}
4720
4721/* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4722
4723DEBUG_FUNCTION bool
4724verify_eh_dispatch_edge (geh_dispatch *stmt)
4725{
4726  eh_region r;
4727  eh_catch c;
4728  basic_block src, dst;
4729  bool want_fallthru = true;
4730  edge_iterator ei;
4731  edge e, fall_edge;
4732
4733  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4734  src = gimple_bb (stmt);
4735
4736  FOR_EACH_EDGE (e, ei, src->succs)
4737    gcc_assert (e->aux == NULL);
4738
4739  switch (r->type)
4740    {
4741    case ERT_TRY:
4742      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4743	{
4744	  dst = label_to_block (c->label);
4745	  e = find_edge (src, dst);
4746	  if (e == NULL)
4747	    {
4748	      error ("BB %i is missing an edge", src->index);
4749	      return true;
4750	    }
4751	  e->aux = (void *)e;
4752
4753	  /* A catch-all handler doesn't have a fallthru.  */
4754	  if (c->type_list == NULL)
4755	    {
4756	      want_fallthru = false;
4757	      break;
4758	    }
4759	}
4760      break;
4761
4762    case ERT_ALLOWED_EXCEPTIONS:
4763      dst = label_to_block (r->u.allowed.label);
4764      e = find_edge (src, dst);
4765      if (e == NULL)
4766	{
4767	  error ("BB %i is missing an edge", src->index);
4768	  return true;
4769	}
4770      e->aux = (void *)e;
4771      break;
4772
4773    default:
4774      gcc_unreachable ();
4775    }
4776
4777  fall_edge = NULL;
4778  FOR_EACH_EDGE (e, ei, src->succs)
4779    {
4780      if (e->flags & EDGE_FALLTHRU)
4781	{
4782	  if (fall_edge != NULL)
4783	    {
4784	      error ("BB %i too many fallthru edges", src->index);
4785	      return true;
4786	    }
4787	  fall_edge = e;
4788	}
4789      else if (e->aux)
4790	e->aux = NULL;
4791      else
4792	{
4793	  error ("BB %i has incorrect edge", src->index);
4794	  return true;
4795	}
4796    }
4797  if ((fall_edge != NULL) ^ want_fallthru)
4798    {
4799      error ("BB %i has incorrect fallthru edge", src->index);
4800      return true;
4801    }
4802
4803  return false;
4804}
4805