tree-eh.c revision 1.10
1/* Exception handling semantics and decomposition for trees.
2   Copyright (C) 2003-2018 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 3, or (at your option)
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "backend.h"
24#include "rtl.h"
25#include "tree.h"
26#include "gimple.h"
27#include "cfghooks.h"
28#include "tree-pass.h"
29#include "ssa.h"
30#include "cgraph.h"
31#include "diagnostic-core.h"
32#include "fold-const.h"
33#include "calls.h"
34#include "except.h"
35#include "cfganal.h"
36#include "cfgcleanup.h"
37#include "tree-eh.h"
38#include "gimple-iterator.h"
39#include "tree-cfg.h"
40#include "tree-into-ssa.h"
41#include "tree-ssa.h"
42#include "tree-inline.h"
43#include "langhooks.h"
44#include "cfgloop.h"
45#include "gimple-low.h"
46#include "stringpool.h"
47#include "attribs.h"
48#include "asan.h"
49#include "gimplify.h"
50
51/* In some instances a tree and a gimple need to be stored in a same table,
52   i.e. in hash tables. This is a structure to do this. */
53typedef union {tree *tp; tree t; gimple *g;} treemple;
54
55/* Misc functions used in this file.  */
56
57/* Remember and lookup EH landing pad data for arbitrary statements.
58   Really this means any statement that could_throw_p.  We could
59   stuff this information into the stmt_ann data structure, but:
60
61   (1) We absolutely rely on this information being kept until
62   we get to rtl.  Once we're done with lowering here, if we lose
63   the information there's no way to recover it!
64
65   (2) There are many more statements that *cannot* throw as
66   compared to those that can.  We should be saving some amount
67   of space by only allocating memory for those that can throw.  */
68
69/* Add statement T in function IFUN to landing pad NUM.  */
70
71static void
72add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73{
74  gcc_assert (num != 0);
75
76  if (!get_eh_throw_stmt_table (ifun))
77    set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78
79  gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80}
81
82/* Add statement T in the current function (cfun) to EH landing pad NUM.  */
83
84void
85add_stmt_to_eh_lp (gimple *t, int num)
86{
87  add_stmt_to_eh_lp_fn (cfun, t, num);
88}
89
90/* Add statement T to the single EH landing pad in REGION.  */
91
92static void
93record_stmt_eh_region (eh_region region, gimple *t)
94{
95  if (region == NULL)
96    return;
97  if (region->type == ERT_MUST_NOT_THROW)
98    add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99  else
100    {
101      eh_landing_pad lp = region->landing_pads;
102      if (lp == NULL)
103	lp = gen_eh_landing_pad (region);
104      else
105	gcc_assert (lp->next_lp == NULL);
106      add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107    }
108}
109
110
111/* Remove statement T in function IFUN from its EH landing pad.  */
112
113bool
114remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115{
116  if (!get_eh_throw_stmt_table (ifun))
117    return false;
118
119  if (!get_eh_throw_stmt_table (ifun)->get (t))
120    return false;
121
122  get_eh_throw_stmt_table (ifun)->remove (t);
123      return true;
124}
125
126
127/* Remove statement T in the current function (cfun) from its
128   EH landing pad.  */
129
130bool
131remove_stmt_from_eh_lp (gimple *t)
132{
133  return remove_stmt_from_eh_lp_fn (cfun, t);
134}
135
136/* Determine if statement T is inside an EH region in function IFUN.
137   Positive numbers indicate a landing pad index; negative numbers
138   indicate a MUST_NOT_THROW region index; zero indicates that the
139   statement is not recorded in the region table.  */
140
141int
142lookup_stmt_eh_lp_fn (struct function *ifun, gimple *t)
143{
144  if (ifun->eh->throw_stmt_table == NULL)
145    return 0;
146
147  int *lp_nr = ifun->eh->throw_stmt_table->get (t);
148  return lp_nr ? *lp_nr : 0;
149}
150
151/* Likewise, but always use the current function.  */
152
153int
154lookup_stmt_eh_lp (gimple *t)
155{
156  /* We can get called from initialized data when -fnon-call-exceptions
157     is on; prevent crash.  */
158  if (!cfun)
159    return 0;
160  return lookup_stmt_eh_lp_fn (cfun, t);
161}
162
163/* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
164   nodes and LABEL_DECL nodes.  We will use this during the second phase to
165   determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
166
167struct finally_tree_node
168{
169  /* When storing a GIMPLE_TRY, we have to record a gimple.  However
170     when deciding whether a GOTO to a certain LABEL_DECL (which is a
171     tree) leaves the TRY block, its necessary to record a tree in
172     this field.  Thus a treemple is used. */
173  treemple child;
174  gtry *parent;
175};
176
177/* Hashtable helpers.  */
178
179struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180{
181  static inline hashval_t hash (const finally_tree_node *);
182  static inline bool equal (const finally_tree_node *,
183			    const finally_tree_node *);
184};
185
186inline hashval_t
187finally_tree_hasher::hash (const finally_tree_node *v)
188{
189  return (intptr_t)v->child.t >> 4;
190}
191
192inline bool
193finally_tree_hasher::equal (const finally_tree_node *v,
194			    const finally_tree_node *c)
195{
196  return v->child.t == c->child.t;
197}
198
199/* Note that this table is *not* marked GTY.  It is short-lived.  */
200static hash_table<finally_tree_hasher> *finally_tree;
201
202static void
203record_in_finally_tree (treemple child, gtry *parent)
204{
205  struct finally_tree_node *n;
206  finally_tree_node **slot;
207
208  n = XNEW (struct finally_tree_node);
209  n->child = child;
210  n->parent = parent;
211
212  slot = finally_tree->find_slot (n, INSERT);
213  gcc_assert (!*slot);
214  *slot = n;
215}
216
217static void
218collect_finally_tree (gimple *stmt, gtry *region);
219
220/* Go through the gimple sequence.  Works with collect_finally_tree to
221   record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222
223static void
224collect_finally_tree_1 (gimple_seq seq, gtry *region)
225{
226  gimple_stmt_iterator gsi;
227
228  for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229    collect_finally_tree (gsi_stmt (gsi), region);
230}
231
232static void
233collect_finally_tree (gimple *stmt, gtry *region)
234{
235  treemple temp;
236
237  switch (gimple_code (stmt))
238    {
239    case GIMPLE_LABEL:
240      temp.t = gimple_label_label (as_a <glabel *> (stmt));
241      record_in_finally_tree (temp, region);
242      break;
243
244    case GIMPLE_TRY:
245      if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246        {
247          temp.g = stmt;
248          record_in_finally_tree (temp, region);
249          collect_finally_tree_1 (gimple_try_eval (stmt),
250				  as_a <gtry *> (stmt));
251	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252        }
253      else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254        {
255          collect_finally_tree_1 (gimple_try_eval (stmt), region);
256          collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257        }
258      break;
259
260    case GIMPLE_CATCH:
261      collect_finally_tree_1 (gimple_catch_handler (
262				 as_a <gcatch *> (stmt)),
263			      region);
264      break;
265
266    case GIMPLE_EH_FILTER:
267      collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268      break;
269
270    case GIMPLE_EH_ELSE:
271      {
272	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275      }
276      break;
277
278    default:
279      /* A type, a decl, or some kind of statement that we're not
280	 interested in.  Don't walk them.  */
281      break;
282    }
283}
284
285
286/* Use the finally tree to determine if a jump from START to TARGET
287   would leave the try_finally node that START lives in.  */
288
289static bool
290outside_finally_tree (treemple start, gimple *target)
291{
292  struct finally_tree_node n, *p;
293
294  do
295    {
296      n.child = start;
297      p = finally_tree->find (&n);
298      if (!p)
299	return true;
300      start.g = p->parent;
301    }
302  while (start.g != target);
303
304  return false;
305}
306
307/* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
308   nodes into a set of gotos, magic labels, and eh regions.
309   The eh region creation is straight-forward, but frobbing all the gotos
310   and such into shape isn't.  */
311
312/* The sequence into which we record all EH stuff.  This will be
313   placed at the end of the function when we're all done.  */
314static gimple_seq eh_seq;
315
316/* Record whether an EH region contains something that can throw,
317   indexed by EH region number.  */
318static bitmap eh_region_may_contain_throw_map;
319
320/* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321   statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322   The idea is to record a gimple statement for everything except for
323   the conditionals, which get their labels recorded. Since labels are
324   of type 'tree', we need this node to store both gimple and tree
325   objects.  REPL_STMT is the sequence used to replace the goto/return
326   statement.  CONT_STMT is used to store the statement that allows
327   the return/goto to jump to the original destination. */
328
329struct goto_queue_node
330{
331  treemple stmt;
332  location_t location;
333  gimple_seq repl_stmt;
334  gimple *cont_stmt;
335  int index;
336  /* This is used when index >= 0 to indicate that stmt is a label (as
337     opposed to a goto stmt).  */
338  int is_label;
339};
340
341/* State of the world while lowering.  */
342
343struct leh_state
344{
345  /* What's "current" while constructing the eh region tree.  These
346     correspond to variables of the same name in cfun->eh, which we
347     don't have easy access to.  */
348  eh_region cur_region;
349
350  /* What's "current" for the purposes of __builtin_eh_pointer.  For
351     a CATCH, this is the associated TRY.  For an EH_FILTER, this is
352     the associated ALLOWED_EXCEPTIONS, etc.  */
353  eh_region ehp_region;
354
355  /* Processing of TRY_FINALLY requires a bit more state.  This is
356     split out into a separate structure so that we don't have to
357     copy so much when processing other nodes.  */
358  struct leh_tf_state *tf;
359};
360
361struct leh_tf_state
362{
363  /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
364     try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
365     this so that outside_finally_tree can reliably reference the tree used
366     in the collect_finally_tree data structures.  */
367  gtry *try_finally_expr;
368  gtry *top_p;
369
370  /* While lowering a top_p usually it is expanded into multiple statements,
371     thus we need the following field to store them. */
372  gimple_seq top_p_seq;
373
374  /* The state outside this try_finally node.  */
375  struct leh_state *outer;
376
377  /* The exception region created for it.  */
378  eh_region region;
379
380  /* The goto queue.  */
381  struct goto_queue_node *goto_queue;
382  size_t goto_queue_size;
383  size_t goto_queue_active;
384
385  /* Pointer map to help in searching goto_queue when it is large.  */
386  hash_map<gimple *, goto_queue_node *> *goto_queue_map;
387
388  /* The set of unique labels seen as entries in the goto queue.  */
389  vec<tree> dest_array;
390
391  /* A label to be added at the end of the completed transformed
392     sequence.  It will be set if may_fallthru was true *at one time*,
393     though subsequent transformations may have cleared that flag.  */
394  tree fallthru_label;
395
396  /* True if it is possible to fall out the bottom of the try block.
397     Cleared if the fallthru is converted to a goto.  */
398  bool may_fallthru;
399
400  /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
401  bool may_return;
402
403  /* True if the finally block can receive an exception edge.
404     Cleared if the exception case is handled by code duplication.  */
405  bool may_throw;
406};
407
408static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
409
410/* Search for STMT in the goto queue.  Return the replacement,
411   or null if the statement isn't in the queue.  */
412
413#define LARGE_GOTO_QUEUE 20
414
415static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
416
417static gimple_seq
418find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
419{
420  unsigned int i;
421
422  if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
423    {
424      for (i = 0; i < tf->goto_queue_active; i++)
425	if ( tf->goto_queue[i].stmt.g == stmt.g)
426	  return tf->goto_queue[i].repl_stmt;
427      return NULL;
428    }
429
430  /* If we have a large number of entries in the goto_queue, create a
431     pointer map and use that for searching.  */
432
433  if (!tf->goto_queue_map)
434    {
435      tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
436      for (i = 0; i < tf->goto_queue_active; i++)
437	{
438	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
439						  &tf->goto_queue[i]);
440	  gcc_assert (!existed);
441	}
442    }
443
444  goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
445  if (slot != NULL)
446    return ((*slot)->repl_stmt);
447
448  return NULL;
449}
450
451/* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
452   lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
453   then we can just splat it in, otherwise we add the new stmts immediately
454   after the GIMPLE_COND and redirect.  */
455
456static void
457replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
458				gimple_stmt_iterator *gsi)
459{
460  tree label;
461  gimple_seq new_seq;
462  treemple temp;
463  location_t loc = gimple_location (gsi_stmt (*gsi));
464
465  temp.tp = tp;
466  new_seq = find_goto_replacement (tf, temp);
467  if (!new_seq)
468    return;
469
470  if (gimple_seq_singleton_p (new_seq)
471      && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
472    {
473      *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
474      return;
475    }
476
477  label = create_artificial_label (loc);
478  /* Set the new label for the GIMPLE_COND */
479  *tp = label;
480
481  gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
482  gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
483}
484
485/* The real work of replace_goto_queue.  Returns with TSI updated to
486   point to the next statement.  */
487
488static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
489
490static void
491replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
492		      gimple_stmt_iterator *gsi)
493{
494  gimple_seq seq;
495  treemple temp;
496  temp.g = NULL;
497
498  switch (gimple_code (stmt))
499    {
500    case GIMPLE_GOTO:
501    case GIMPLE_RETURN:
502      temp.g = stmt;
503      seq = find_goto_replacement (tf, temp);
504      if (seq)
505	{
506	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
507	  gsi_remove (gsi, false);
508	  return;
509	}
510      break;
511
512    case GIMPLE_COND:
513      replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
514      replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
515      break;
516
517    case GIMPLE_TRY:
518      replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
519      replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
520      break;
521    case GIMPLE_CATCH:
522      replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
523				      as_a <gcatch *> (stmt)),
524				    tf);
525      break;
526    case GIMPLE_EH_FILTER:
527      replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
528      break;
529    case GIMPLE_EH_ELSE:
530      {
531	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
532	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
533				      tf);
534	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
535				      tf);
536      }
537      break;
538
539    default:
540      /* These won't have gotos in them.  */
541      break;
542    }
543
544  gsi_next (gsi);
545}
546
547/* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
548
549static void
550replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
551{
552  gimple_stmt_iterator gsi = gsi_start (*seq);
553
554  while (!gsi_end_p (gsi))
555    replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
556}
557
558/* Replace all goto queue members.  */
559
560static void
561replace_goto_queue (struct leh_tf_state *tf)
562{
563  if (tf->goto_queue_active == 0)
564    return;
565  replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
566  replace_goto_queue_stmt_list (&eh_seq, tf);
567}
568
569/* Add a new record to the goto queue contained in TF. NEW_STMT is the
570   data to be added, IS_LABEL indicates whether NEW_STMT is a label or
571   a gimple return. */
572
573static void
574record_in_goto_queue (struct leh_tf_state *tf,
575                      treemple new_stmt,
576                      int index,
577                      bool is_label,
578		      location_t location)
579{
580  size_t active, size;
581  struct goto_queue_node *q;
582
583  gcc_assert (!tf->goto_queue_map);
584
585  active = tf->goto_queue_active;
586  size = tf->goto_queue_size;
587  if (active >= size)
588    {
589      size = (size ? size * 2 : 32);
590      tf->goto_queue_size = size;
591      tf->goto_queue
592         = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
593    }
594
595  q = &tf->goto_queue[active];
596  tf->goto_queue_active = active + 1;
597
598  memset (q, 0, sizeof (*q));
599  q->stmt = new_stmt;
600  q->index = index;
601  q->location = location;
602  q->is_label = is_label;
603}
604
605/* Record the LABEL label in the goto queue contained in TF.
606   TF is not null.  */
607
608static void
609record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
610			    location_t location)
611{
612  int index;
613  treemple temp, new_stmt;
614
615  if (!label)
616    return;
617
618  /* Computed and non-local gotos do not get processed.  Given
619     their nature we can neither tell whether we've escaped the
620     finally block nor redirect them if we knew.  */
621  if (TREE_CODE (label) != LABEL_DECL)
622    return;
623
624  /* No need to record gotos that don't leave the try block.  */
625  temp.t = label;
626  if (!outside_finally_tree (temp, tf->try_finally_expr))
627    return;
628
629  if (! tf->dest_array.exists ())
630    {
631      tf->dest_array.create (10);
632      tf->dest_array.quick_push (label);
633      index = 0;
634    }
635  else
636    {
637      int n = tf->dest_array.length ();
638      for (index = 0; index < n; ++index)
639        if (tf->dest_array[index] == label)
640          break;
641      if (index == n)
642        tf->dest_array.safe_push (label);
643    }
644
645  /* In the case of a GOTO we want to record the destination label,
646     since with a GIMPLE_COND we have an easy access to the then/else
647     labels. */
648  new_stmt = stmt;
649  record_in_goto_queue (tf, new_stmt, index, true, location);
650}
651
652/* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
653   node, and if so record that fact in the goto queue associated with that
654   try_finally node.  */
655
656static void
657maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
658{
659  struct leh_tf_state *tf = state->tf;
660  treemple new_stmt;
661
662  if (!tf)
663    return;
664
665  switch (gimple_code (stmt))
666    {
667    case GIMPLE_COND:
668      {
669	gcond *cond_stmt = as_a <gcond *> (stmt);
670	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
671	record_in_goto_queue_label (tf, new_stmt,
672				    gimple_cond_true_label (cond_stmt),
673				    EXPR_LOCATION (*new_stmt.tp));
674	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
675	record_in_goto_queue_label (tf, new_stmt,
676				    gimple_cond_false_label (cond_stmt),
677				    EXPR_LOCATION (*new_stmt.tp));
678      }
679      break;
680    case GIMPLE_GOTO:
681      new_stmt.g = stmt;
682      record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
683				  gimple_location (stmt));
684      break;
685
686    case GIMPLE_RETURN:
687      tf->may_return = true;
688      new_stmt.g = stmt;
689      record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
690      break;
691
692    default:
693      gcc_unreachable ();
694    }
695}
696
697
698#if CHECKING_P
699/* We do not process GIMPLE_SWITCHes for now.  As long as the original source
700   was in fact structured, and we've not yet done jump threading, then none
701   of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
702
703static void
704verify_norecord_switch_expr (struct leh_state *state,
705			     gswitch *switch_expr)
706{
707  struct leh_tf_state *tf = state->tf;
708  size_t i, n;
709
710  if (!tf)
711    return;
712
713  n = gimple_switch_num_labels (switch_expr);
714
715  for (i = 0; i < n; ++i)
716    {
717      treemple temp;
718      tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
719      temp.t = lab;
720      gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
721    }
722}
723#else
724#define verify_norecord_switch_expr(state, switch_expr)
725#endif
726
727/* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
728   non-null, insert it before the new branch.  */
729
730static void
731do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
732{
733  gimple *x;
734
735  /* In the case of a return, the queue node must be a gimple statement.  */
736  gcc_assert (!q->is_label);
737
738  /* Note that the return value may have already been computed, e.g.,
739
740	int x;
741	int foo (void)
742	{
743	  x = 0;
744	  try {
745	    return x;
746	  } finally {
747	    x++;
748	  }
749	}
750
751     should return 0, not 1.  We don't have to do anything to make
752     this happens because the return value has been placed in the
753     RESULT_DECL already.  */
754
755  q->cont_stmt = q->stmt.g;
756
757  if (mod)
758    gimple_seq_add_seq (&q->repl_stmt, mod);
759
760  x = gimple_build_goto (finlab);
761  gimple_set_location (x, q->location);
762  gimple_seq_add_stmt (&q->repl_stmt, x);
763}
764
765/* Similar, but easier, for GIMPLE_GOTO.  */
766
767static void
768do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
769		     struct leh_tf_state *tf)
770{
771  ggoto *x;
772
773  gcc_assert (q->is_label);
774
775  q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
776
777  if (mod)
778    gimple_seq_add_seq (&q->repl_stmt, mod);
779
780  x = gimple_build_goto (finlab);
781  gimple_set_location (x, q->location);
782  gimple_seq_add_stmt (&q->repl_stmt, x);
783}
784
785/* Emit a standard landing pad sequence into SEQ for REGION.  */
786
787static void
788emit_post_landing_pad (gimple_seq *seq, eh_region region)
789{
790  eh_landing_pad lp = region->landing_pads;
791  glabel *x;
792
793  if (lp == NULL)
794    lp = gen_eh_landing_pad (region);
795
796  lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
797  EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
798
799  x = gimple_build_label (lp->post_landing_pad);
800  gimple_seq_add_stmt (seq, x);
801}
802
803/* Emit a RESX statement into SEQ for REGION.  */
804
805static void
806emit_resx (gimple_seq *seq, eh_region region)
807{
808  gresx *x = gimple_build_resx (region->index);
809  gimple_seq_add_stmt (seq, x);
810  if (region->outer)
811    record_stmt_eh_region (region->outer, x);
812}
813
814/* Emit an EH_DISPATCH statement into SEQ for REGION.  */
815
816static void
817emit_eh_dispatch (gimple_seq *seq, eh_region region)
818{
819  geh_dispatch *x = gimple_build_eh_dispatch (region->index);
820  gimple_seq_add_stmt (seq, x);
821}
822
823/* Note that the current EH region may contain a throw, or a
824   call to a function which itself may contain a throw.  */
825
826static void
827note_eh_region_may_contain_throw (eh_region region)
828{
829  while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
830    {
831      if (region->type == ERT_MUST_NOT_THROW)
832	break;
833      region = region->outer;
834      if (region == NULL)
835	break;
836    }
837}
838
839/* Check if REGION has been marked as containing a throw.  If REGION is
840   NULL, this predicate is false.  */
841
842static inline bool
843eh_region_may_contain_throw (eh_region r)
844{
845  return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
846}
847
848/* We want to transform
849	try { body; } catch { stuff; }
850   to
851	normal_sequence:
852	  body;
853	  over:
854	eh_sequence:
855	  landing_pad:
856	  stuff;
857	  goto over;
858
859   TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
860   should be placed before the second operand, or NULL.  OVER is
861   an existing label that should be put at the exit, or NULL.  */
862
863static gimple_seq
864frob_into_branch_around (gtry *tp, eh_region region, tree over)
865{
866  gimple *x;
867  gimple_seq cleanup, result;
868  location_t loc = gimple_location (tp);
869
870  cleanup = gimple_try_cleanup (tp);
871  result = gimple_try_eval (tp);
872
873  if (region)
874    emit_post_landing_pad (&eh_seq, region);
875
876  if (gimple_seq_may_fallthru (cleanup))
877    {
878      if (!over)
879	over = create_artificial_label (loc);
880      x = gimple_build_goto (over);
881      gimple_set_location (x, loc);
882      gimple_seq_add_stmt (&cleanup, x);
883    }
884  gimple_seq_add_seq (&eh_seq, cleanup);
885
886  if (over)
887    {
888      x = gimple_build_label (over);
889      gimple_seq_add_stmt (&result, x);
890    }
891  return result;
892}
893
894/* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
895   Make sure to record all new labels found.  */
896
897static gimple_seq
898lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
899			     location_t loc)
900{
901  gtry *region = NULL;
902  gimple_seq new_seq;
903  gimple_stmt_iterator gsi;
904
905  new_seq = copy_gimple_seq_and_replace_locals (seq);
906
907  for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
908    {
909      gimple *stmt = gsi_stmt (gsi);
910      /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
911	 it on the EH paths.  When it is not eliminated, make it transparent in
912	 the debug info.  */
913      if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
914	gimple_set_location (stmt, UNKNOWN_LOCATION);
915      else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
916	{
917	  tree block = gimple_block (stmt);
918	  gimple_set_location (stmt, loc);
919	  gimple_set_block (stmt, block);
920	}
921    }
922
923  if (outer_state->tf)
924    region = outer_state->tf->try_finally_expr;
925  collect_finally_tree_1 (new_seq, region);
926
927  return new_seq;
928}
929
930/* A subroutine of lower_try_finally.  Create a fallthru label for
931   the given try_finally state.  The only tricky bit here is that
932   we have to make sure to record the label in our outer context.  */
933
934static tree
935lower_try_finally_fallthru_label (struct leh_tf_state *tf)
936{
937  tree label = tf->fallthru_label;
938  treemple temp;
939
940  if (!label)
941    {
942      label = create_artificial_label (gimple_location (tf->try_finally_expr));
943      tf->fallthru_label = label;
944      if (tf->outer->tf)
945        {
946          temp.t = label;
947          record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
948        }
949    }
950  return label;
951}
952
953/* A subroutine of lower_try_finally.  If FINALLY consits of a
954   GIMPLE_EH_ELSE node, return it.  */
955
956static inline geh_else *
957get_eh_else (gimple_seq finally)
958{
959  gimple *x = gimple_seq_first_stmt (finally);
960  if (gimple_code (x) == GIMPLE_EH_ELSE)
961    {
962      gcc_assert (gimple_seq_singleton_p (finally));
963      return as_a <geh_else *> (x);
964    }
965  return NULL;
966}
967
968/* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
969   langhook returns non-null, then the language requires that the exception
970   path out of a try_finally be treated specially.  To wit: the code within
971   the finally block may not itself throw an exception.  We have two choices
972   here. First we can duplicate the finally block and wrap it in a
973   must_not_throw region.  Second, we can generate code like
974
975	try {
976	  finally_block;
977	} catch {
978	  if (fintmp == eh_edge)
979	    protect_cleanup_actions;
980	}
981
982   where "fintmp" is the temporary used in the switch statement generation
983   alternative considered below.  For the nonce, we always choose the first
984   option.
985
986   THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
987
988static void
989honor_protect_cleanup_actions (struct leh_state *outer_state,
990			       struct leh_state *this_state,
991			       struct leh_tf_state *tf)
992{
993  gimple_seq finally = gimple_try_cleanup (tf->top_p);
994
995  /* EH_ELSE doesn't come from user code; only compiler generated stuff.
996     It does need to be handled here, so as to separate the (different)
997     EH path from the normal path.  But we should not attempt to wrap
998     it with a must-not-throw node (which indeed gets in the way).  */
999  if (geh_else *eh_else = get_eh_else (finally))
1000    {
1001      gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1002      finally = gimple_eh_else_e_body (eh_else);
1003
1004      /* Let the ELSE see the exception that's being processed.  */
1005      eh_region save_ehp = this_state->ehp_region;
1006      this_state->ehp_region = this_state->cur_region;
1007      lower_eh_constructs_1 (this_state, &finally);
1008      this_state->ehp_region = save_ehp;
1009    }
1010  else
1011    {
1012      /* First check for nothing to do.  */
1013      if (lang_hooks.eh_protect_cleanup_actions == NULL)
1014	return;
1015      tree actions = lang_hooks.eh_protect_cleanup_actions ();
1016      if (actions == NULL)
1017	return;
1018
1019      if (this_state)
1020	finally = lower_try_finally_dup_block (finally, outer_state,
1021	  gimple_location (tf->try_finally_expr));
1022
1023      /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1024	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1025	 to be in an enclosing scope, but needs to be implemented at this level
1026	 to avoid a nesting violation (see wrap_temporary_cleanups in
1027	 cp/decl.c).  Since it's logically at an outer level, we should call
1028	 terminate before we get to it, so strip it away before adding the
1029	 MUST_NOT_THROW filter.  */
1030      gimple_stmt_iterator gsi = gsi_start (finally);
1031      gimple *x = gsi_stmt (gsi);
1032      if (gimple_code (x) == GIMPLE_TRY
1033	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1034	  && gimple_try_catch_is_cleanup (x))
1035	{
1036	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1037	  gsi_remove (&gsi, false);
1038	}
1039
1040      /* Wrap the block with protect_cleanup_actions as the action.  */
1041      geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1042      gtry *try_stmt = gimple_build_try (finally,
1043					 gimple_seq_alloc_with_stmt (eh_mnt),
1044					 GIMPLE_TRY_CATCH);
1045      finally = lower_eh_must_not_throw (outer_state, try_stmt);
1046    }
1047
1048  /* Drop all of this into the exception sequence.  */
1049  emit_post_landing_pad (&eh_seq, tf->region);
1050  gimple_seq_add_seq (&eh_seq, finally);
1051  if (gimple_seq_may_fallthru (finally))
1052    emit_resx (&eh_seq, tf->region);
1053
1054  /* Having now been handled, EH isn't to be considered with
1055     the rest of the outgoing edges.  */
1056  tf->may_throw = false;
1057}
1058
1059/* A subroutine of lower_try_finally.  We have determined that there is
1060   no fallthru edge out of the finally block.  This means that there is
1061   no outgoing edge corresponding to any incoming edge.  Restructure the
1062   try_finally node for this special case.  */
1063
1064static void
1065lower_try_finally_nofallthru (struct leh_state *state,
1066			      struct leh_tf_state *tf)
1067{
1068  tree lab;
1069  gimple *x;
1070  geh_else *eh_else;
1071  gimple_seq finally;
1072  struct goto_queue_node *q, *qe;
1073
1074  lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1075
1076  /* We expect that tf->top_p is a GIMPLE_TRY. */
1077  finally = gimple_try_cleanup (tf->top_p);
1078  tf->top_p_seq = gimple_try_eval (tf->top_p);
1079
1080  x = gimple_build_label (lab);
1081  gimple_seq_add_stmt (&tf->top_p_seq, x);
1082
1083  q = tf->goto_queue;
1084  qe = q + tf->goto_queue_active;
1085  for (; q < qe; ++q)
1086    if (q->index < 0)
1087      do_return_redirection (q, lab, NULL);
1088    else
1089      do_goto_redirection (q, lab, NULL, tf);
1090
1091  replace_goto_queue (tf);
1092
1093  /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1094  eh_else = get_eh_else (finally);
1095  if (eh_else)
1096    {
1097      finally = gimple_eh_else_n_body (eh_else);
1098      lower_eh_constructs_1 (state, &finally);
1099      gimple_seq_add_seq (&tf->top_p_seq, finally);
1100
1101      if (tf->may_throw)
1102	{
1103	  finally = gimple_eh_else_e_body (eh_else);
1104	  lower_eh_constructs_1 (state, &finally);
1105
1106	  emit_post_landing_pad (&eh_seq, tf->region);
1107	  gimple_seq_add_seq (&eh_seq, finally);
1108	}
1109    }
1110  else
1111    {
1112      lower_eh_constructs_1 (state, &finally);
1113      gimple_seq_add_seq (&tf->top_p_seq, finally);
1114
1115      if (tf->may_throw)
1116	{
1117	  emit_post_landing_pad (&eh_seq, tf->region);
1118
1119	  x = gimple_build_goto (lab);
1120	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1121	  gimple_seq_add_stmt (&eh_seq, x);
1122	}
1123    }
1124}
1125
1126/* A subroutine of lower_try_finally.  We have determined that there is
1127   exactly one destination of the finally block.  Restructure the
1128   try_finally node for this special case.  */
1129
1130static void
1131lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1132{
1133  struct goto_queue_node *q, *qe;
1134  geh_else *eh_else;
1135  glabel *label_stmt;
1136  gimple *x;
1137  gimple_seq finally;
1138  gimple_stmt_iterator gsi;
1139  tree finally_label;
1140  location_t loc = gimple_location (tf->try_finally_expr);
1141
1142  finally = gimple_try_cleanup (tf->top_p);
1143  tf->top_p_seq = gimple_try_eval (tf->top_p);
1144
1145  /* Since there's only one destination, and the destination edge can only
1146     either be EH or non-EH, that implies that all of our incoming edges
1147     are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1148  eh_else = get_eh_else (finally);
1149  if (eh_else)
1150    {
1151      if (tf->may_throw)
1152	finally = gimple_eh_else_e_body (eh_else);
1153      else
1154	finally = gimple_eh_else_n_body (eh_else);
1155    }
1156
1157  lower_eh_constructs_1 (state, &finally);
1158
1159  for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1160    {
1161      gimple *stmt = gsi_stmt (gsi);
1162      if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1163	{
1164	  tree block = gimple_block (stmt);
1165	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1166	  gimple_set_block (stmt, block);
1167	}
1168    }
1169
1170  if (tf->may_throw)
1171    {
1172      /* Only reachable via the exception edge.  Add the given label to
1173         the head of the FINALLY block.  Append a RESX at the end.  */
1174      emit_post_landing_pad (&eh_seq, tf->region);
1175      gimple_seq_add_seq (&eh_seq, finally);
1176      emit_resx (&eh_seq, tf->region);
1177      return;
1178    }
1179
1180  if (tf->may_fallthru)
1181    {
1182      /* Only reachable via the fallthru edge.  Do nothing but let
1183	 the two blocks run together; we'll fall out the bottom.  */
1184      gimple_seq_add_seq (&tf->top_p_seq, finally);
1185      return;
1186    }
1187
1188  finally_label = create_artificial_label (loc);
1189  label_stmt = gimple_build_label (finally_label);
1190  gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1191
1192  gimple_seq_add_seq (&tf->top_p_seq, finally);
1193
1194  q = tf->goto_queue;
1195  qe = q + tf->goto_queue_active;
1196
1197  if (tf->may_return)
1198    {
1199      /* Reachable by return expressions only.  Redirect them.  */
1200      for (; q < qe; ++q)
1201	do_return_redirection (q, finally_label, NULL);
1202      replace_goto_queue (tf);
1203    }
1204  else
1205    {
1206      /* Reachable by goto expressions only.  Redirect them.  */
1207      for (; q < qe; ++q)
1208	do_goto_redirection (q, finally_label, NULL, tf);
1209      replace_goto_queue (tf);
1210
1211      if (tf->dest_array[0] == tf->fallthru_label)
1212	{
1213	  /* Reachable by goto to fallthru label only.  Redirect it
1214	     to the new label (already created, sadly), and do not
1215	     emit the final branch out, or the fallthru label.  */
1216	  tf->fallthru_label = NULL;
1217	  return;
1218	}
1219    }
1220
1221  /* Place the original return/goto to the original destination
1222     immediately after the finally block. */
1223  x = tf->goto_queue[0].cont_stmt;
1224  gimple_seq_add_stmt (&tf->top_p_seq, x);
1225  maybe_record_in_goto_queue (state, x);
1226}
1227
1228/* A subroutine of lower_try_finally.  There are multiple edges incoming
1229   and outgoing from the finally block.  Implement this by duplicating the
1230   finally block for every destination.  */
1231
1232static void
1233lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1234{
1235  gimple_seq finally;
1236  gimple_seq new_stmt;
1237  gimple_seq seq;
1238  gimple *x;
1239  geh_else *eh_else;
1240  tree tmp;
1241  location_t tf_loc = gimple_location (tf->try_finally_expr);
1242
1243  finally = gimple_try_cleanup (tf->top_p);
1244
1245  /* Notice EH_ELSE, and simplify some of the remaining code
1246     by considering FINALLY to be the normal return path only.  */
1247  eh_else = get_eh_else (finally);
1248  if (eh_else)
1249    finally = gimple_eh_else_n_body (eh_else);
1250
1251  tf->top_p_seq = gimple_try_eval (tf->top_p);
1252  new_stmt = NULL;
1253
1254  if (tf->may_fallthru)
1255    {
1256      seq = lower_try_finally_dup_block (finally, state, tf_loc);
1257      lower_eh_constructs_1 (state, &seq);
1258      gimple_seq_add_seq (&new_stmt, seq);
1259
1260      tmp = lower_try_finally_fallthru_label (tf);
1261      x = gimple_build_goto (tmp);
1262      gimple_set_location (x, tf_loc);
1263      gimple_seq_add_stmt (&new_stmt, x);
1264    }
1265
1266  if (tf->may_throw)
1267    {
1268      /* We don't need to copy the EH path of EH_ELSE,
1269	 since it is only emitted once.  */
1270      if (eh_else)
1271	seq = gimple_eh_else_e_body (eh_else);
1272      else
1273	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1274      lower_eh_constructs_1 (state, &seq);
1275
1276      emit_post_landing_pad (&eh_seq, tf->region);
1277      gimple_seq_add_seq (&eh_seq, seq);
1278      emit_resx (&eh_seq, tf->region);
1279    }
1280
1281  if (tf->goto_queue)
1282    {
1283      struct goto_queue_node *q, *qe;
1284      int return_index, index;
1285      struct labels_s
1286      {
1287	struct goto_queue_node *q;
1288	tree label;
1289      } *labels;
1290
1291      return_index = tf->dest_array.length ();
1292      labels = XCNEWVEC (struct labels_s, return_index + 1);
1293
1294      q = tf->goto_queue;
1295      qe = q + tf->goto_queue_active;
1296      for (; q < qe; q++)
1297	{
1298	  index = q->index < 0 ? return_index : q->index;
1299
1300	  if (!labels[index].q)
1301	    labels[index].q = q;
1302	}
1303
1304      for (index = 0; index < return_index + 1; index++)
1305	{
1306	  tree lab;
1307
1308	  q = labels[index].q;
1309	  if (! q)
1310	    continue;
1311
1312	  lab = labels[index].label
1313	    = create_artificial_label (tf_loc);
1314
1315	  if (index == return_index)
1316	    do_return_redirection (q, lab, NULL);
1317	  else
1318	    do_goto_redirection (q, lab, NULL, tf);
1319
1320	  x = gimple_build_label (lab);
1321          gimple_seq_add_stmt (&new_stmt, x);
1322
1323	  seq = lower_try_finally_dup_block (finally, state, q->location);
1324	  lower_eh_constructs_1 (state, &seq);
1325          gimple_seq_add_seq (&new_stmt, seq);
1326
1327          gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1328	  maybe_record_in_goto_queue (state, q->cont_stmt);
1329	}
1330
1331      for (q = tf->goto_queue; q < qe; q++)
1332	{
1333	  tree lab;
1334
1335	  index = q->index < 0 ? return_index : q->index;
1336
1337	  if (labels[index].q == q)
1338	    continue;
1339
1340	  lab = labels[index].label;
1341
1342	  if (index == return_index)
1343	    do_return_redirection (q, lab, NULL);
1344	  else
1345	    do_goto_redirection (q, lab, NULL, tf);
1346	}
1347
1348      replace_goto_queue (tf);
1349      free (labels);
1350    }
1351
1352  /* Need to link new stmts after running replace_goto_queue due
1353     to not wanting to process the same goto stmts twice.  */
1354  gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1355}
1356
1357/* A subroutine of lower_try_finally.  There are multiple edges incoming
1358   and outgoing from the finally block.  Implement this by instrumenting
1359   each incoming edge and creating a switch statement at the end of the
1360   finally block that branches to the appropriate destination.  */
1361
1362static void
1363lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1364{
1365  struct goto_queue_node *q, *qe;
1366  tree finally_tmp, finally_label;
1367  int return_index, eh_index, fallthru_index;
1368  int nlabels, ndests, j, last_case_index;
1369  tree last_case;
1370  auto_vec<tree> case_label_vec;
1371  gimple_seq switch_body = NULL;
1372  gimple *x;
1373  geh_else *eh_else;
1374  tree tmp;
1375  gimple *switch_stmt;
1376  gimple_seq finally;
1377  hash_map<tree, gimple *> *cont_map = NULL;
1378  /* The location of the TRY_FINALLY stmt.  */
1379  location_t tf_loc = gimple_location (tf->try_finally_expr);
1380  /* The location of the finally block.  */
1381  location_t finally_loc;
1382
1383  finally = gimple_try_cleanup (tf->top_p);
1384  eh_else = get_eh_else (finally);
1385
1386  /* Mash the TRY block to the head of the chain.  */
1387  tf->top_p_seq = gimple_try_eval (tf->top_p);
1388
1389  /* The location of the finally is either the last stmt in the finally
1390     block or the location of the TRY_FINALLY itself.  */
1391  x = gimple_seq_last_stmt (finally);
1392  finally_loc = x ? gimple_location (x) : tf_loc;
1393
1394  /* Prepare for switch statement generation.  */
1395  nlabels = tf->dest_array.length ();
1396  return_index = nlabels;
1397  eh_index = return_index + tf->may_return;
1398  fallthru_index = eh_index + (tf->may_throw && !eh_else);
1399  ndests = fallthru_index + tf->may_fallthru;
1400
1401  finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1402  finally_label = create_artificial_label (finally_loc);
1403
1404  /* We use vec::quick_push on case_label_vec throughout this function,
1405     since we know the size in advance and allocate precisely as muce
1406     space as needed.  */
1407  case_label_vec.create (ndests);
1408  last_case = NULL;
1409  last_case_index = 0;
1410
1411  /* Begin inserting code for getting to the finally block.  Things
1412     are done in this order to correspond to the sequence the code is
1413     laid out.  */
1414
1415  if (tf->may_fallthru)
1416    {
1417      x = gimple_build_assign (finally_tmp,
1418			       build_int_cst (integer_type_node,
1419					      fallthru_index));
1420      gimple_set_location (x, finally_loc);
1421      gimple_seq_add_stmt (&tf->top_p_seq, x);
1422
1423      tmp = build_int_cst (integer_type_node, fallthru_index);
1424      last_case = build_case_label (tmp, NULL,
1425				    create_artificial_label (finally_loc));
1426      case_label_vec.quick_push (last_case);
1427      last_case_index++;
1428
1429      x = gimple_build_label (CASE_LABEL (last_case));
1430      gimple_seq_add_stmt (&switch_body, x);
1431
1432      tmp = lower_try_finally_fallthru_label (tf);
1433      x = gimple_build_goto (tmp);
1434      gimple_set_location (x, finally_loc);
1435      gimple_seq_add_stmt (&switch_body, x);
1436    }
1437
1438  /* For EH_ELSE, emit the exception path (plus resx) now, then
1439     subsequently we only need consider the normal path.  */
1440  if (eh_else)
1441    {
1442      if (tf->may_throw)
1443	{
1444	  finally = gimple_eh_else_e_body (eh_else);
1445	  lower_eh_constructs_1 (state, &finally);
1446
1447	  emit_post_landing_pad (&eh_seq, tf->region);
1448	  gimple_seq_add_seq (&eh_seq, finally);
1449	  emit_resx (&eh_seq, tf->region);
1450	}
1451
1452      finally = gimple_eh_else_n_body (eh_else);
1453    }
1454  else if (tf->may_throw)
1455    {
1456      emit_post_landing_pad (&eh_seq, tf->region);
1457
1458      x = gimple_build_assign (finally_tmp,
1459			       build_int_cst (integer_type_node, eh_index));
1460      gimple_seq_add_stmt (&eh_seq, x);
1461
1462      x = gimple_build_goto (finally_label);
1463      gimple_set_location (x, tf_loc);
1464      gimple_seq_add_stmt (&eh_seq, x);
1465
1466      tmp = build_int_cst (integer_type_node, eh_index);
1467      last_case = build_case_label (tmp, NULL,
1468				    create_artificial_label (tf_loc));
1469      case_label_vec.quick_push (last_case);
1470      last_case_index++;
1471
1472      x = gimple_build_label (CASE_LABEL (last_case));
1473      gimple_seq_add_stmt (&eh_seq, x);
1474      emit_resx (&eh_seq, tf->region);
1475    }
1476
1477  x = gimple_build_label (finally_label);
1478  gimple_seq_add_stmt (&tf->top_p_seq, x);
1479
1480  lower_eh_constructs_1 (state, &finally);
1481  gimple_seq_add_seq (&tf->top_p_seq, finally);
1482
1483  /* Redirect each incoming goto edge.  */
1484  q = tf->goto_queue;
1485  qe = q + tf->goto_queue_active;
1486  j = last_case_index + tf->may_return;
1487  /* Prepare the assignments to finally_tmp that are executed upon the
1488     entrance through a particular edge. */
1489  for (; q < qe; ++q)
1490    {
1491      gimple_seq mod = NULL;
1492      int switch_id;
1493      unsigned int case_index;
1494
1495      if (q->index < 0)
1496	{
1497	  x = gimple_build_assign (finally_tmp,
1498				   build_int_cst (integer_type_node,
1499						  return_index));
1500	  gimple_seq_add_stmt (&mod, x);
1501	  do_return_redirection (q, finally_label, mod);
1502	  switch_id = return_index;
1503	}
1504      else
1505	{
1506	  x = gimple_build_assign (finally_tmp,
1507				   build_int_cst (integer_type_node, q->index));
1508	  gimple_seq_add_stmt (&mod, x);
1509	  do_goto_redirection (q, finally_label, mod, tf);
1510	  switch_id = q->index;
1511	}
1512
1513      case_index = j + q->index;
1514      if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1515        {
1516          tree case_lab;
1517	  tmp = build_int_cst (integer_type_node, switch_id);
1518          case_lab = build_case_label (tmp, NULL,
1519				       create_artificial_label (tf_loc));
1520          /* We store the cont_stmt in the pointer map, so that we can recover
1521             it in the loop below.  */
1522          if (!cont_map)
1523	    cont_map = new hash_map<tree, gimple *>;
1524          cont_map->put (case_lab, q->cont_stmt);
1525          case_label_vec.quick_push (case_lab);
1526        }
1527    }
1528  for (j = last_case_index; j < last_case_index + nlabels; j++)
1529    {
1530      gimple *cont_stmt;
1531
1532      last_case = case_label_vec[j];
1533
1534      gcc_assert (last_case);
1535      gcc_assert (cont_map);
1536
1537      cont_stmt = *cont_map->get (last_case);
1538
1539      x = gimple_build_label (CASE_LABEL (last_case));
1540      gimple_seq_add_stmt (&switch_body, x);
1541      gimple_seq_add_stmt (&switch_body, cont_stmt);
1542      maybe_record_in_goto_queue (state, cont_stmt);
1543    }
1544  if (cont_map)
1545    delete cont_map;
1546
1547  replace_goto_queue (tf);
1548
1549  /* Make sure that the last case is the default label, as one is required.
1550     Then sort the labels, which is also required in GIMPLE.  */
1551  CASE_LOW (last_case) = NULL;
1552  tree tem = case_label_vec.pop ();
1553  gcc_assert (tem == last_case);
1554  sort_case_labels (case_label_vec);
1555
1556  /* Build the switch statement, setting last_case to be the default
1557     label.  */
1558  switch_stmt = gimple_build_switch (finally_tmp, last_case,
1559				     case_label_vec);
1560  gimple_set_location (switch_stmt, finally_loc);
1561
1562  /* Need to link SWITCH_STMT after running replace_goto_queue
1563     due to not wanting to process the same goto stmts twice.  */
1564  gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1565  gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1566}
1567
1568/* Decide whether or not we are going to duplicate the finally block.
1569   There are several considerations.
1570
1571   Second, we'd like to prevent egregious code growth.  One way to
1572   do this is to estimate the size of the finally block, multiply
1573   that by the number of copies we'd need to make, and compare against
1574   the estimate of the size of the switch machinery we'd have to add.  */
1575
1576static bool
1577decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1578{
1579  int f_estimate, sw_estimate;
1580  geh_else *eh_else;
1581
1582  /* If there's an EH_ELSE involved, the exception path is separate
1583     and really doesn't come into play for this computation.  */
1584  eh_else = get_eh_else (finally);
1585  if (eh_else)
1586    {
1587      ndests -= may_throw;
1588      finally = gimple_eh_else_n_body (eh_else);
1589    }
1590
1591  if (!optimize)
1592    {
1593      gimple_stmt_iterator gsi;
1594
1595      if (ndests == 1)
1596        return true;
1597
1598      for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1599	{
1600	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1601	     on the EH paths and, consequently, useless cleanups.  */
1602	  gimple *stmt = gsi_stmt (gsi);
1603	  if (!is_gimple_debug (stmt)
1604	      && !gimple_clobber_p (stmt)
1605	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1606	    return false;
1607	}
1608      return true;
1609    }
1610
1611  /* Finally estimate N times, plus N gotos.  */
1612  f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1613  f_estimate = (f_estimate + 1) * ndests;
1614
1615  /* Switch statement (cost 10), N variable assignments, N gotos.  */
1616  sw_estimate = 10 + 2 * ndests;
1617
1618  /* Optimize for size clearly wants our best guess.  */
1619  if (optimize_function_for_size_p (cfun))
1620    return f_estimate < sw_estimate;
1621
1622  /* ??? These numbers are completely made up so far.  */
1623  if (optimize > 1)
1624    return f_estimate < 100 || f_estimate < sw_estimate * 2;
1625  else
1626    return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1627}
1628
1629/* REG is the enclosing region for a possible cleanup region, or the region
1630   itself.  Returns TRUE if such a region would be unreachable.
1631
1632   Cleanup regions within a must-not-throw region aren't actually reachable
1633   even if there are throwing stmts within them, because the personality
1634   routine will call terminate before unwinding.  */
1635
1636static bool
1637cleanup_is_dead_in (eh_region reg)
1638{
1639  while (reg && reg->type == ERT_CLEANUP)
1640    reg = reg->outer;
1641  return (reg && reg->type == ERT_MUST_NOT_THROW);
1642}
1643
1644/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1645   to a sequence of labels and blocks, plus the exception region trees
1646   that record all the magic.  This is complicated by the need to
1647   arrange for the FINALLY block to be executed on all exits.  */
1648
1649static gimple_seq
1650lower_try_finally (struct leh_state *state, gtry *tp)
1651{
1652  struct leh_tf_state this_tf;
1653  struct leh_state this_state;
1654  int ndests;
1655  gimple_seq old_eh_seq;
1656
1657  /* Process the try block.  */
1658
1659  memset (&this_tf, 0, sizeof (this_tf));
1660  this_tf.try_finally_expr = tp;
1661  this_tf.top_p = tp;
1662  this_tf.outer = state;
1663  if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1664    {
1665      this_tf.region = gen_eh_region_cleanup (state->cur_region);
1666      this_state.cur_region = this_tf.region;
1667    }
1668  else
1669    {
1670      this_tf.region = NULL;
1671      this_state.cur_region = state->cur_region;
1672    }
1673
1674  this_state.ehp_region = state->ehp_region;
1675  this_state.tf = &this_tf;
1676
1677  old_eh_seq = eh_seq;
1678  eh_seq = NULL;
1679
1680  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1681
1682  /* Determine if the try block is escaped through the bottom.  */
1683  this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1684
1685  /* Determine if any exceptions are possible within the try block.  */
1686  if (this_tf.region)
1687    this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1688  if (this_tf.may_throw)
1689    honor_protect_cleanup_actions (state, &this_state, &this_tf);
1690
1691  /* Determine how many edges (still) reach the finally block.  Or rather,
1692     how many destinations are reached by the finally block.  Use this to
1693     determine how we process the finally block itself.  */
1694
1695  ndests = this_tf.dest_array.length ();
1696  ndests += this_tf.may_fallthru;
1697  ndests += this_tf.may_return;
1698  ndests += this_tf.may_throw;
1699
1700  /* If the FINALLY block is not reachable, dike it out.  */
1701  if (ndests == 0)
1702    {
1703      gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1704      gimple_try_set_cleanup (tp, NULL);
1705    }
1706  /* If the finally block doesn't fall through, then any destination
1707     we might try to impose there isn't reached either.  There may be
1708     some minor amount of cleanup and redirection still needed.  */
1709  else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1710    lower_try_finally_nofallthru (state, &this_tf);
1711
1712  /* We can easily special-case redirection to a single destination.  */
1713  else if (ndests == 1)
1714    lower_try_finally_onedest (state, &this_tf);
1715  else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1716				    gimple_try_cleanup (tp)))
1717    lower_try_finally_copy (state, &this_tf);
1718  else
1719    lower_try_finally_switch (state, &this_tf);
1720
1721  /* If someone requested we add a label at the end of the transformed
1722     block, do so.  */
1723  if (this_tf.fallthru_label)
1724    {
1725      /* This must be reached only if ndests == 0. */
1726      gimple *x = gimple_build_label (this_tf.fallthru_label);
1727      gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1728    }
1729
1730  this_tf.dest_array.release ();
1731  free (this_tf.goto_queue);
1732  if (this_tf.goto_queue_map)
1733    delete this_tf.goto_queue_map;
1734
1735  /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1736     If there was no old eh_seq, then the append is trivially already done.  */
1737  if (old_eh_seq)
1738    {
1739      if (eh_seq == NULL)
1740	eh_seq = old_eh_seq;
1741      else
1742	{
1743	  gimple_seq new_eh_seq = eh_seq;
1744	  eh_seq = old_eh_seq;
1745	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1746	}
1747    }
1748
1749  return this_tf.top_p_seq;
1750}
1751
1752/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1753   list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1754   exception region trees that records all the magic.  */
1755
1756static gimple_seq
1757lower_catch (struct leh_state *state, gtry *tp)
1758{
1759  eh_region try_region = NULL;
1760  struct leh_state this_state = *state;
1761  gimple_stmt_iterator gsi;
1762  tree out_label;
1763  gimple_seq new_seq, cleanup;
1764  gimple *x;
1765  location_t try_catch_loc = gimple_location (tp);
1766
1767  if (flag_exceptions)
1768    {
1769      try_region = gen_eh_region_try (state->cur_region);
1770      this_state.cur_region = try_region;
1771    }
1772
1773  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1774
1775  if (!eh_region_may_contain_throw (try_region))
1776    return gimple_try_eval (tp);
1777
1778  new_seq = NULL;
1779  emit_eh_dispatch (&new_seq, try_region);
1780  emit_resx (&new_seq, try_region);
1781
1782  this_state.cur_region = state->cur_region;
1783  this_state.ehp_region = try_region;
1784
1785  /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1786     itself, so that e.g. for coverage purposes the nested cleanups don't
1787     appear before the cleanup body.  See PR64634 for details.  */
1788  gimple_seq old_eh_seq = eh_seq;
1789  eh_seq = NULL;
1790
1791  out_label = NULL;
1792  cleanup = gimple_try_cleanup (tp);
1793  for (gsi = gsi_start (cleanup);
1794       !gsi_end_p (gsi);
1795       gsi_next (&gsi))
1796    {
1797      eh_catch c;
1798      gcatch *catch_stmt;
1799      gimple_seq handler;
1800
1801      catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1802      c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1803
1804      handler = gimple_catch_handler (catch_stmt);
1805      lower_eh_constructs_1 (&this_state, &handler);
1806
1807      c->label = create_artificial_label (UNKNOWN_LOCATION);
1808      x = gimple_build_label (c->label);
1809      gimple_seq_add_stmt (&new_seq, x);
1810
1811      gimple_seq_add_seq (&new_seq, handler);
1812
1813      if (gimple_seq_may_fallthru (new_seq))
1814	{
1815	  if (!out_label)
1816	    out_label = create_artificial_label (try_catch_loc);
1817
1818	  x = gimple_build_goto (out_label);
1819	  gimple_seq_add_stmt (&new_seq, x);
1820	}
1821      if (!c->type_list)
1822	break;
1823    }
1824
1825  gimple_try_set_cleanup (tp, new_seq);
1826
1827  gimple_seq new_eh_seq = eh_seq;
1828  eh_seq = old_eh_seq;
1829  gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1830  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1831  return ret_seq;
1832}
1833
1834/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1835   GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1836   region trees that record all the magic.  */
1837
1838static gimple_seq
1839lower_eh_filter (struct leh_state *state, gtry *tp)
1840{
1841  struct leh_state this_state = *state;
1842  eh_region this_region = NULL;
1843  gimple *inner, *x;
1844  gimple_seq new_seq;
1845
1846  inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1847
1848  if (flag_exceptions)
1849    {
1850      this_region = gen_eh_region_allowed (state->cur_region,
1851				           gimple_eh_filter_types (inner));
1852      this_state.cur_region = this_region;
1853    }
1854
1855  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1856
1857  if (!eh_region_may_contain_throw (this_region))
1858    return gimple_try_eval (tp);
1859
1860  new_seq = NULL;
1861  this_state.cur_region = state->cur_region;
1862  this_state.ehp_region = this_region;
1863
1864  emit_eh_dispatch (&new_seq, this_region);
1865  emit_resx (&new_seq, this_region);
1866
1867  this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1868  x = gimple_build_label (this_region->u.allowed.label);
1869  gimple_seq_add_stmt (&new_seq, x);
1870
1871  lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1872  gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1873
1874  gimple_try_set_cleanup (tp, new_seq);
1875
1876  return frob_into_branch_around (tp, this_region, NULL);
1877}
1878
1879/* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1880   an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1881   plus the exception region trees that record all the magic.  */
1882
1883static gimple_seq
1884lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1885{
1886  struct leh_state this_state = *state;
1887
1888  if (flag_exceptions)
1889    {
1890      gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1891      eh_region this_region;
1892
1893      this_region = gen_eh_region_must_not_throw (state->cur_region);
1894      this_region->u.must_not_throw.failure_decl
1895	= gimple_eh_must_not_throw_fndecl (
1896	    as_a <geh_mnt *> (inner));
1897      this_region->u.must_not_throw.failure_loc
1898	= LOCATION_LOCUS (gimple_location (tp));
1899
1900      /* In order to get mangling applied to this decl, we must mark it
1901	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1902	 needs to happen.  */
1903      TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1904
1905      this_state.cur_region = this_region;
1906    }
1907
1908  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1909
1910  return gimple_try_eval (tp);
1911}
1912
1913/* Implement a cleanup expression.  This is similar to try-finally,
1914   except that we only execute the cleanup block for exception edges.  */
1915
1916static gimple_seq
1917lower_cleanup (struct leh_state *state, gtry *tp)
1918{
1919  struct leh_state this_state = *state;
1920  eh_region this_region = NULL;
1921  struct leh_tf_state fake_tf;
1922  gimple_seq result;
1923  bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1924
1925  if (flag_exceptions && !cleanup_dead)
1926    {
1927      this_region = gen_eh_region_cleanup (state->cur_region);
1928      this_state.cur_region = this_region;
1929    }
1930
1931  lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1932
1933  if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1934    return gimple_try_eval (tp);
1935
1936  /* Build enough of a try-finally state so that we can reuse
1937     honor_protect_cleanup_actions.  */
1938  memset (&fake_tf, 0, sizeof (fake_tf));
1939  fake_tf.top_p = fake_tf.try_finally_expr = tp;
1940  fake_tf.outer = state;
1941  fake_tf.region = this_region;
1942  fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1943  fake_tf.may_throw = true;
1944
1945  honor_protect_cleanup_actions (state, NULL, &fake_tf);
1946
1947  if (fake_tf.may_throw)
1948    {
1949      /* In this case honor_protect_cleanup_actions had nothing to do,
1950	 and we should process this normally.  */
1951      lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1952      result = frob_into_branch_around (tp, this_region,
1953                                        fake_tf.fallthru_label);
1954    }
1955  else
1956    {
1957      /* In this case honor_protect_cleanup_actions did nearly all of
1958	 the work.  All we have left is to append the fallthru_label.  */
1959
1960      result = gimple_try_eval (tp);
1961      if (fake_tf.fallthru_label)
1962	{
1963	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1964	  gimple_seq_add_stmt (&result, x);
1965	}
1966    }
1967  return result;
1968}
1969
1970/* Main loop for lowering eh constructs. Also moves gsi to the next
1971   statement. */
1972
1973static void
1974lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1975{
1976  gimple_seq replace;
1977  gimple *x;
1978  gimple *stmt = gsi_stmt (*gsi);
1979
1980  switch (gimple_code (stmt))
1981    {
1982    case GIMPLE_CALL:
1983      {
1984	tree fndecl = gimple_call_fndecl (stmt);
1985	tree rhs, lhs;
1986
1987	if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1988	  switch (DECL_FUNCTION_CODE (fndecl))
1989	    {
1990	    case BUILT_IN_EH_POINTER:
1991	      /* The front end may have generated a call to
1992		 __builtin_eh_pointer (0) within a catch region.  Replace
1993		 this zero argument with the current catch region number.  */
1994	      if (state->ehp_region)
1995		{
1996		  tree nr = build_int_cst (integer_type_node,
1997					   state->ehp_region->index);
1998		  gimple_call_set_arg (stmt, 0, nr);
1999		}
2000	      else
2001		{
2002		  /* The user has dome something silly.  Remove it.  */
2003		  rhs = null_pointer_node;
2004		  goto do_replace;
2005		}
2006	      break;
2007
2008	    case BUILT_IN_EH_FILTER:
2009	      /* ??? This should never appear, but since it's a builtin it
2010		 is accessible to abuse by users.  Just remove it and
2011		 replace the use with the arbitrary value zero.  */
2012	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2013	    do_replace:
2014	      lhs = gimple_call_lhs (stmt);
2015	      x = gimple_build_assign (lhs, rhs);
2016	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2017	      /* FALLTHRU */
2018
2019	    case BUILT_IN_EH_COPY_VALUES:
2020	      /* Likewise this should not appear.  Remove it.  */
2021	      gsi_remove (gsi, true);
2022	      return;
2023
2024	    default:
2025	      break;
2026	    }
2027      }
2028      /* FALLTHRU */
2029
2030    case GIMPLE_ASSIGN:
2031      /* If the stmt can throw use a new temporary for the assignment
2032         to a LHS.  This makes sure the old value of the LHS is
2033	 available on the EH edge.  Only do so for statements that
2034	 potentially fall through (no noreturn calls e.g.), otherwise
2035	 this new assignment might create fake fallthru regions.  */
2036      if (stmt_could_throw_p (stmt)
2037	  && gimple_has_lhs (stmt)
2038	  && gimple_stmt_may_fallthru (stmt)
2039	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2040	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2041	{
2042	  tree lhs = gimple_get_lhs (stmt);
2043	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2044	  gimple *s = gimple_build_assign (lhs, tmp);
2045	  gimple_set_location (s, gimple_location (stmt));
2046	  gimple_set_block (s, gimple_block (stmt));
2047	  gimple_set_lhs (stmt, tmp);
2048	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2049	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2050	    DECL_GIMPLE_REG_P (tmp) = 1;
2051	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2052	}
2053      /* Look for things that can throw exceptions, and record them.  */
2054      if (state->cur_region && stmt_could_throw_p (stmt))
2055	{
2056	  record_stmt_eh_region (state->cur_region, stmt);
2057	  note_eh_region_may_contain_throw (state->cur_region);
2058	}
2059      break;
2060
2061    case GIMPLE_COND:
2062    case GIMPLE_GOTO:
2063    case GIMPLE_RETURN:
2064      maybe_record_in_goto_queue (state, stmt);
2065      break;
2066
2067    case GIMPLE_SWITCH:
2068      verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2069      break;
2070
2071    case GIMPLE_TRY:
2072      {
2073	gtry *try_stmt = as_a <gtry *> (stmt);
2074	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2075	  replace = lower_try_finally (state, try_stmt);
2076	else
2077	  {
2078	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2079	    if (!x)
2080	      {
2081		replace = gimple_try_eval (try_stmt);
2082		lower_eh_constructs_1 (state, &replace);
2083	      }
2084	    else
2085	      switch (gimple_code (x))
2086		{
2087		case GIMPLE_CATCH:
2088		  replace = lower_catch (state, try_stmt);
2089		  break;
2090		case GIMPLE_EH_FILTER:
2091		  replace = lower_eh_filter (state, try_stmt);
2092		  break;
2093		case GIMPLE_EH_MUST_NOT_THROW:
2094		  replace = lower_eh_must_not_throw (state, try_stmt);
2095		  break;
2096		case GIMPLE_EH_ELSE:
2097		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2098		  gcc_unreachable ();
2099		default:
2100		  replace = lower_cleanup (state, try_stmt);
2101		  break;
2102		}
2103	  }
2104      }
2105
2106      /* Remove the old stmt and insert the transformed sequence
2107	 instead. */
2108      gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2109      gsi_remove (gsi, true);
2110
2111      /* Return since we don't want gsi_next () */
2112      return;
2113
2114    case GIMPLE_EH_ELSE:
2115      /* We should be eliminating this in lower_try_finally et al.  */
2116      gcc_unreachable ();
2117
2118    default:
2119      /* A type, a decl, or some kind of statement that we're not
2120	 interested in.  Don't walk them.  */
2121      break;
2122    }
2123
2124  gsi_next (gsi);
2125}
2126
2127/* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2128
2129static void
2130lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2131{
2132  gimple_stmt_iterator gsi;
2133  for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2134    lower_eh_constructs_2 (state, &gsi);
2135}
2136
2137namespace {
2138
2139const pass_data pass_data_lower_eh =
2140{
2141  GIMPLE_PASS, /* type */
2142  "eh", /* name */
2143  OPTGROUP_NONE, /* optinfo_flags */
2144  TV_TREE_EH, /* tv_id */
2145  PROP_gimple_lcf, /* properties_required */
2146  PROP_gimple_leh, /* properties_provided */
2147  0, /* properties_destroyed */
2148  0, /* todo_flags_start */
2149  0, /* todo_flags_finish */
2150};
2151
2152class pass_lower_eh : public gimple_opt_pass
2153{
2154public:
2155  pass_lower_eh (gcc::context *ctxt)
2156    : gimple_opt_pass (pass_data_lower_eh, ctxt)
2157  {}
2158
2159  /* opt_pass methods: */
2160  virtual unsigned int execute (function *);
2161
2162}; // class pass_lower_eh
2163
2164unsigned int
2165pass_lower_eh::execute (function *fun)
2166{
2167  struct leh_state null_state;
2168  gimple_seq bodyp;
2169
2170  bodyp = gimple_body (current_function_decl);
2171  if (bodyp == NULL)
2172    return 0;
2173
2174  finally_tree = new hash_table<finally_tree_hasher> (31);
2175  eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2176  memset (&null_state, 0, sizeof (null_state));
2177
2178  collect_finally_tree_1 (bodyp, NULL);
2179  lower_eh_constructs_1 (&null_state, &bodyp);
2180  gimple_set_body (current_function_decl, bodyp);
2181
2182  /* We assume there's a return statement, or something, at the end of
2183     the function, and thus ploping the EH sequence afterward won't
2184     change anything.  */
2185  gcc_assert (!gimple_seq_may_fallthru (bodyp));
2186  gimple_seq_add_seq (&bodyp, eh_seq);
2187
2188  /* We assume that since BODYP already existed, adding EH_SEQ to it
2189     didn't change its value, and we don't have to re-set the function.  */
2190  gcc_assert (bodyp == gimple_body (current_function_decl));
2191
2192  delete finally_tree;
2193  finally_tree = NULL;
2194  BITMAP_FREE (eh_region_may_contain_throw_map);
2195  eh_seq = NULL;
2196
2197  /* If this function needs a language specific EH personality routine
2198     and the frontend didn't already set one do so now.  */
2199  if (function_needs_eh_personality (fun) == eh_personality_lang
2200      && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2201    DECL_FUNCTION_PERSONALITY (current_function_decl)
2202      = lang_hooks.eh_personality ();
2203
2204  return 0;
2205}
2206
2207} // anon namespace
2208
2209gimple_opt_pass *
2210make_pass_lower_eh (gcc::context *ctxt)
2211{
2212  return new pass_lower_eh (ctxt);
2213}
2214
2215/* Create the multiple edges from an EH_DISPATCH statement to all of
2216   the possible handlers for its EH region.  Return true if there's
2217   no fallthru edge; false if there is.  */
2218
2219bool
2220make_eh_dispatch_edges (geh_dispatch *stmt)
2221{
2222  eh_region r;
2223  eh_catch c;
2224  basic_block src, dst;
2225
2226  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2227  src = gimple_bb (stmt);
2228
2229  switch (r->type)
2230    {
2231    case ERT_TRY:
2232      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2233	{
2234	  dst = label_to_block (c->label);
2235	  make_edge (src, dst, 0);
2236
2237	  /* A catch-all handler doesn't have a fallthru.  */
2238	  if (c->type_list == NULL)
2239	    return false;
2240	}
2241      break;
2242
2243    case ERT_ALLOWED_EXCEPTIONS:
2244      dst = label_to_block (r->u.allowed.label);
2245      make_edge (src, dst, 0);
2246      break;
2247
2248    default:
2249      gcc_unreachable ();
2250    }
2251
2252  return true;
2253}
2254
2255/* Create the single EH edge from STMT to its nearest landing pad,
2256   if there is such a landing pad within the current function.  */
2257
2258void
2259make_eh_edges (gimple *stmt)
2260{
2261  basic_block src, dst;
2262  eh_landing_pad lp;
2263  int lp_nr;
2264
2265  lp_nr = lookup_stmt_eh_lp (stmt);
2266  if (lp_nr <= 0)
2267    return;
2268
2269  lp = get_eh_landing_pad_from_number (lp_nr);
2270  gcc_assert (lp != NULL);
2271
2272  src = gimple_bb (stmt);
2273  dst = label_to_block (lp->post_landing_pad);
2274  make_edge (src, dst, EDGE_EH);
2275}
2276
2277/* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2278   do not actually perform the final edge redirection.
2279
2280   CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2281   we intend to change the destination EH region as well; this means
2282   EH_LANDING_PAD_NR must already be set on the destination block label.
2283   If false, we're being called from generic cfg manipulation code and we
2284   should preserve our place within the region tree.  */
2285
2286static void
2287redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2288{
2289  eh_landing_pad old_lp, new_lp;
2290  basic_block old_bb;
2291  gimple *throw_stmt;
2292  int old_lp_nr, new_lp_nr;
2293  tree old_label, new_label;
2294  edge_iterator ei;
2295  edge e;
2296
2297  old_bb = edge_in->dest;
2298  old_label = gimple_block_label (old_bb);
2299  old_lp_nr = EH_LANDING_PAD_NR (old_label);
2300  gcc_assert (old_lp_nr > 0);
2301  old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2302
2303  throw_stmt = last_stmt (edge_in->src);
2304  gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2305
2306  new_label = gimple_block_label (new_bb);
2307
2308  /* Look for an existing region that might be using NEW_BB already.  */
2309  new_lp_nr = EH_LANDING_PAD_NR (new_label);
2310  if (new_lp_nr)
2311    {
2312      new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2313      gcc_assert (new_lp);
2314
2315      /* Unless CHANGE_REGION is true, the new and old landing pad
2316	 had better be associated with the same EH region.  */
2317      gcc_assert (change_region || new_lp->region == old_lp->region);
2318    }
2319  else
2320    {
2321      new_lp = NULL;
2322      gcc_assert (!change_region);
2323    }
2324
2325  /* Notice when we redirect the last EH edge away from OLD_BB.  */
2326  FOR_EACH_EDGE (e, ei, old_bb->preds)
2327    if (e != edge_in && (e->flags & EDGE_EH))
2328      break;
2329
2330  if (new_lp)
2331    {
2332      /* NEW_LP already exists.  If there are still edges into OLD_LP,
2333	 there's nothing to do with the EH tree.  If there are no more
2334	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2335	 If CHANGE_REGION is true, then our caller is expecting to remove
2336	 the landing pad.  */
2337      if (e == NULL && !change_region)
2338	remove_eh_landing_pad (old_lp);
2339    }
2340  else
2341    {
2342      /* No correct landing pad exists.  If there are no more edges
2343	 into OLD_LP, then we can simply re-use the existing landing pad.
2344	 Otherwise, we have to create a new landing pad.  */
2345      if (e == NULL)
2346	{
2347	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2348	  new_lp = old_lp;
2349	}
2350      else
2351	new_lp = gen_eh_landing_pad (old_lp->region);
2352      new_lp->post_landing_pad = new_label;
2353      EH_LANDING_PAD_NR (new_label) = new_lp->index;
2354    }
2355
2356  /* Maybe move the throwing statement to the new region.  */
2357  if (old_lp != new_lp)
2358    {
2359      remove_stmt_from_eh_lp (throw_stmt);
2360      add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2361    }
2362}
2363
2364/* Redirect EH edge E to NEW_BB.  */
2365
2366edge
2367redirect_eh_edge (edge edge_in, basic_block new_bb)
2368{
2369  redirect_eh_edge_1 (edge_in, new_bb, false);
2370  return ssa_redirect_edge (edge_in, new_bb);
2371}
2372
2373/* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2374   labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2375   The actual edge update will happen in the caller.  */
2376
2377void
2378redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2379{
2380  tree new_lab = gimple_block_label (new_bb);
2381  bool any_changed = false;
2382  basic_block old_bb;
2383  eh_region r;
2384  eh_catch c;
2385
2386  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2387  switch (r->type)
2388    {
2389    case ERT_TRY:
2390      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2391	{
2392	  old_bb = label_to_block (c->label);
2393	  if (old_bb == e->dest)
2394	    {
2395	      c->label = new_lab;
2396	      any_changed = true;
2397	    }
2398	}
2399      break;
2400
2401    case ERT_ALLOWED_EXCEPTIONS:
2402      old_bb = label_to_block (r->u.allowed.label);
2403      gcc_assert (old_bb == e->dest);
2404      r->u.allowed.label = new_lab;
2405      any_changed = true;
2406      break;
2407
2408    default:
2409      gcc_unreachable ();
2410    }
2411
2412  gcc_assert (any_changed);
2413}
2414
2415/* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2416
2417bool
2418operation_could_trap_helper_p (enum tree_code op,
2419			       bool fp_operation,
2420			       bool honor_trapv,
2421			       bool honor_nans,
2422			       bool honor_snans,
2423			       tree divisor,
2424			       bool *handled)
2425{
2426  *handled = true;
2427  switch (op)
2428    {
2429    case TRUNC_DIV_EXPR:
2430    case CEIL_DIV_EXPR:
2431    case FLOOR_DIV_EXPR:
2432    case ROUND_DIV_EXPR:
2433    case EXACT_DIV_EXPR:
2434    case CEIL_MOD_EXPR:
2435    case FLOOR_MOD_EXPR:
2436    case ROUND_MOD_EXPR:
2437    case TRUNC_MOD_EXPR:
2438    case RDIV_EXPR:
2439      if (honor_snans)
2440	return true;
2441      if (fp_operation)
2442	return flag_trapping_math;
2443      if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2444        return true;
2445      return false;
2446
2447    case LT_EXPR:
2448    case LE_EXPR:
2449    case GT_EXPR:
2450    case GE_EXPR:
2451    case LTGT_EXPR:
2452      /* Some floating point comparisons may trap.  */
2453      return honor_nans;
2454
2455    case EQ_EXPR:
2456    case NE_EXPR:
2457    case UNORDERED_EXPR:
2458    case ORDERED_EXPR:
2459    case UNLT_EXPR:
2460    case UNLE_EXPR:
2461    case UNGT_EXPR:
2462    case UNGE_EXPR:
2463    case UNEQ_EXPR:
2464      return honor_snans;
2465
2466    case NEGATE_EXPR:
2467    case ABS_EXPR:
2468    case CONJ_EXPR:
2469      /* These operations don't trap with floating point.  */
2470      if (honor_trapv)
2471	return true;
2472      return false;
2473
2474    case PLUS_EXPR:
2475    case MINUS_EXPR:
2476    case MULT_EXPR:
2477      /* Any floating arithmetic may trap.  */
2478      if (fp_operation && flag_trapping_math)
2479	return true;
2480      if (honor_trapv)
2481	return true;
2482      return false;
2483
2484    case COMPLEX_EXPR:
2485    case CONSTRUCTOR:
2486      /* Constructing an object cannot trap.  */
2487      return false;
2488
2489    default:
2490      /* Any floating arithmetic may trap.  */
2491      if (fp_operation && flag_trapping_math)
2492	return true;
2493
2494      *handled = false;
2495      return false;
2496    }
2497}
2498
2499/* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2500   on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2501   type operands that may trap.  If OP is a division operator, DIVISOR contains
2502   the value of the divisor.  */
2503
2504bool
2505operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2506			tree divisor)
2507{
2508  bool honor_nans = (fp_operation && flag_trapping_math
2509		     && !flag_finite_math_only);
2510  bool honor_snans = fp_operation && flag_signaling_nans != 0;
2511  bool handled;
2512
2513  if (TREE_CODE_CLASS (op) != tcc_comparison
2514      && TREE_CODE_CLASS (op) != tcc_unary
2515      && TREE_CODE_CLASS (op) != tcc_binary
2516      && op != FMA_EXPR)
2517    return false;
2518
2519  return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2520					honor_nans, honor_snans, divisor,
2521					&handled);
2522}
2523
2524
2525/* Returns true if it is possible to prove that the index of
2526   an array access REF (an ARRAY_REF expression) falls into the
2527   array bounds.  */
2528
2529static bool
2530in_array_bounds_p (tree ref)
2531{
2532  tree idx = TREE_OPERAND (ref, 1);
2533  tree min, max;
2534
2535  if (TREE_CODE (idx) != INTEGER_CST)
2536    return false;
2537
2538  min = array_ref_low_bound (ref);
2539  max = array_ref_up_bound (ref);
2540  if (!min
2541      || !max
2542      || TREE_CODE (min) != INTEGER_CST
2543      || TREE_CODE (max) != INTEGER_CST)
2544    return false;
2545
2546  if (tree_int_cst_lt (idx, min)
2547      || tree_int_cst_lt (max, idx))
2548    return false;
2549
2550  return true;
2551}
2552
2553/* Returns true if it is possible to prove that the range of
2554   an array access REF (an ARRAY_RANGE_REF expression) falls
2555   into the array bounds.  */
2556
2557static bool
2558range_in_array_bounds_p (tree ref)
2559{
2560  tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2561  tree range_min, range_max, min, max;
2562
2563  range_min = TYPE_MIN_VALUE (domain_type);
2564  range_max = TYPE_MAX_VALUE (domain_type);
2565  if (!range_min
2566      || !range_max
2567      || TREE_CODE (range_min) != INTEGER_CST
2568      || TREE_CODE (range_max) != INTEGER_CST)
2569    return false;
2570
2571  min = array_ref_low_bound (ref);
2572  max = array_ref_up_bound (ref);
2573  if (!min
2574      || !max
2575      || TREE_CODE (min) != INTEGER_CST
2576      || TREE_CODE (max) != INTEGER_CST)
2577    return false;
2578
2579  if (tree_int_cst_lt (range_min, min)
2580      || tree_int_cst_lt (max, range_max))
2581    return false;
2582
2583  return true;
2584}
2585
2586/* Return true if EXPR can trap, as in dereferencing an invalid pointer
2587   location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2588   This routine expects only GIMPLE lhs or rhs input.  */
2589
2590bool
2591tree_could_trap_p (tree expr)
2592{
2593  enum tree_code code;
2594  bool fp_operation = false;
2595  bool honor_trapv = false;
2596  tree t, base, div = NULL_TREE;
2597
2598  if (!expr)
2599    return false;
2600
2601  code = TREE_CODE (expr);
2602  t = TREE_TYPE (expr);
2603
2604  if (t)
2605    {
2606      if (COMPARISON_CLASS_P (expr))
2607	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2608      else
2609	fp_operation = FLOAT_TYPE_P (t);
2610      honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2611    }
2612
2613  if (TREE_CODE_CLASS (code) == tcc_binary)
2614    div = TREE_OPERAND (expr, 1);
2615  if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2616    return true;
2617
2618 restart:
2619  switch (code)
2620    {
2621    case COMPONENT_REF:
2622    case REALPART_EXPR:
2623    case IMAGPART_EXPR:
2624    case BIT_FIELD_REF:
2625    case VIEW_CONVERT_EXPR:
2626    case WITH_SIZE_EXPR:
2627      expr = TREE_OPERAND (expr, 0);
2628      code = TREE_CODE (expr);
2629      goto restart;
2630
2631    case ARRAY_RANGE_REF:
2632      base = TREE_OPERAND (expr, 0);
2633      if (tree_could_trap_p (base))
2634	return true;
2635      if (TREE_THIS_NOTRAP (expr))
2636	return false;
2637      return !range_in_array_bounds_p (expr);
2638
2639    case ARRAY_REF:
2640      base = TREE_OPERAND (expr, 0);
2641      if (tree_could_trap_p (base))
2642	return true;
2643      if (TREE_THIS_NOTRAP (expr))
2644	return false;
2645      return !in_array_bounds_p (expr);
2646
2647    case TARGET_MEM_REF:
2648    case MEM_REF:
2649      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2650	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2651	return true;
2652      if (TREE_THIS_NOTRAP (expr))
2653	return false;
2654      /* We cannot prove that the access is in-bounds when we have
2655         variable-index TARGET_MEM_REFs.  */
2656      if (code == TARGET_MEM_REF
2657	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2658	return true;
2659      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2660	{
2661	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2662	  poly_offset_int off = mem_ref_offset (expr);
2663	  if (maybe_lt (off, 0))
2664	    return true;
2665	  if (TREE_CODE (base) == STRING_CST)
2666	    return maybe_le (TREE_STRING_LENGTH (base), off);
2667	  tree size = DECL_SIZE_UNIT (base);
2668	  if (size == NULL_TREE
2669	      || !poly_int_tree_p (size)
2670	      || maybe_le (wi::to_poly_offset (size), off))
2671	    return true;
2672	  /* Now we are sure the first byte of the access is inside
2673	     the object.  */
2674	  return false;
2675	}
2676      return true;
2677
2678    case INDIRECT_REF:
2679      return !TREE_THIS_NOTRAP (expr);
2680
2681    case ASM_EXPR:
2682      return TREE_THIS_VOLATILE (expr);
2683
2684    case CALL_EXPR:
2685      t = get_callee_fndecl (expr);
2686      /* Assume that calls to weak functions may trap.  */
2687      if (!t || !DECL_P (t))
2688	return true;
2689      if (DECL_WEAK (t))
2690	return tree_could_trap_p (t);
2691      return false;
2692
2693    case FUNCTION_DECL:
2694      /* Assume that accesses to weak functions may trap, unless we know
2695	 they are certainly defined in current TU or in some other
2696	 LTO partition.  */
2697      if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2698	{
2699	  cgraph_node *node = cgraph_node::get (expr);
2700	  if (node)
2701	    node = node->function_symbol ();
2702	  return !(node && node->in_other_partition);
2703	}
2704      return false;
2705
2706    case VAR_DECL:
2707      /* Assume that accesses to weak vars may trap, unless we know
2708	 they are certainly defined in current TU or in some other
2709	 LTO partition.  */
2710      if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2711	{
2712	  varpool_node *node = varpool_node::get (expr);
2713	  if (node)
2714	    node = node->ultimate_alias_target ();
2715	  return !(node && node->in_other_partition);
2716	}
2717      return false;
2718
2719    default:
2720      return false;
2721    }
2722}
2723
2724/* Return non-NULL if there is an integer operation with trapping overflow
2725   we can rewrite into non-trapping.  Called via walk_tree from
2726   rewrite_to_non_trapping_overflow.  */
2727
2728static tree
2729find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2730{
2731  if (EXPR_P (*tp)
2732      && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2733      && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2734    return *tp;
2735  if (IS_TYPE_OR_DECL_P (*tp)
2736      || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2737    *walk_subtrees = 0;
2738  return NULL_TREE;
2739}
2740
2741/* Rewrite selected operations into unsigned arithmetics, so that they
2742   don't trap on overflow.  */
2743
2744static tree
2745replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2746{
2747  if (find_trapping_overflow (tp, walk_subtrees, data))
2748    {
2749      tree type = TREE_TYPE (*tp);
2750      tree utype = unsigned_type_for (type);
2751      *walk_subtrees = 0;
2752      int len = TREE_OPERAND_LENGTH (*tp);
2753      for (int i = 0; i < len; ++i)
2754	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2755		   data, (hash_set<tree> *) data);
2756
2757      if (TREE_CODE (*tp) == ABS_EXPR)
2758	{
2759	  tree op = TREE_OPERAND (*tp, 0);
2760	  op = save_expr (op);
2761	  /* save_expr skips simple arithmetics, which is undesirable
2762	     here, if it might trap due to flag_trapv.  We need to
2763	     force a SAVE_EXPR in the COND_EXPR condition, to evaluate
2764	     it before the comparison.  */
2765	  if (EXPR_P (op)
2766	      && TREE_CODE (op) != SAVE_EXPR
2767	      && walk_tree (&op, find_trapping_overflow, NULL, NULL))
2768	    {
2769	      op = build1_loc (EXPR_LOCATION (op), SAVE_EXPR, type, op);
2770	      TREE_SIDE_EFFECTS (op) = 1;
2771	    }
2772	  /* Change abs (op) to op < 0 ? -op : op and handle the NEGATE_EXPR
2773	     like other signed integer trapping operations.  */
2774	  tree cond = fold_build2 (LT_EXPR, boolean_type_node,
2775				   op, build_int_cst (type, 0));
2776	  tree neg = fold_build1 (NEGATE_EXPR, utype,
2777				  fold_convert (utype, op));
2778	  *tp = fold_build3 (COND_EXPR, type, cond,
2779			     fold_convert (type, neg), op);
2780	}
2781      else
2782	{
2783	  TREE_TYPE (*tp) = utype;
2784	  len = TREE_OPERAND_LENGTH (*tp);
2785	  for (int i = 0; i < len; ++i)
2786	    TREE_OPERAND (*tp, i)
2787	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2788	  *tp = fold_convert (type, *tp);
2789	}
2790    }
2791  return NULL_TREE;
2792}
2793
2794/* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2795   using unsigned arithmetics to avoid traps in it.  */
2796
2797tree
2798rewrite_to_non_trapping_overflow (tree expr)
2799{
2800  if (!flag_trapv)
2801    return expr;
2802  hash_set<tree> pset;
2803  if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2804    return expr;
2805  expr = unshare_expr (expr);
2806  pset.empty ();
2807  walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2808  return expr;
2809}
2810
2811/* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2812   an assignment or a conditional) may throw.  */
2813
2814static bool
2815stmt_could_throw_1_p (gassign *stmt)
2816{
2817  enum tree_code code = gimple_assign_rhs_code (stmt);
2818  bool honor_nans = false;
2819  bool honor_snans = false;
2820  bool fp_operation = false;
2821  bool honor_trapv = false;
2822  tree t;
2823  size_t i;
2824  bool handled, ret;
2825
2826  if (TREE_CODE_CLASS (code) == tcc_comparison
2827      || TREE_CODE_CLASS (code) == tcc_unary
2828      || TREE_CODE_CLASS (code) == tcc_binary
2829      || code == FMA_EXPR)
2830    {
2831      if (TREE_CODE_CLASS (code) == tcc_comparison)
2832	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2833      else
2834	t = gimple_expr_type (stmt);
2835      fp_operation = FLOAT_TYPE_P (t);
2836      if (fp_operation)
2837	{
2838	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2839	  honor_snans = flag_signaling_nans != 0;
2840	}
2841      else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2842	honor_trapv = true;
2843    }
2844
2845  /* First check the LHS.  */
2846  if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2847    return true;
2848
2849  /* Check if the main expression may trap.  */
2850  ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2851				       honor_nans, honor_snans,
2852				       gimple_assign_rhs2 (stmt),
2853				       &handled);
2854  if (handled)
2855    return ret;
2856
2857  /* If the expression does not trap, see if any of the individual operands may
2858     trap.  */
2859  for (i = 1; i < gimple_num_ops (stmt); i++)
2860    if (tree_could_trap_p (gimple_op (stmt, i)))
2861      return true;
2862
2863  return false;
2864}
2865
2866
2867/* Return true if statement STMT could throw an exception.  */
2868
2869bool
2870stmt_could_throw_p (gimple *stmt)
2871{
2872  if (!flag_exceptions)
2873    return false;
2874
2875  /* The only statements that can throw an exception are assignments,
2876     conditionals, calls, resx, and asms.  */
2877  switch (gimple_code (stmt))
2878    {
2879    case GIMPLE_RESX:
2880      return true;
2881
2882    case GIMPLE_CALL:
2883      return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2884
2885    case GIMPLE_COND:
2886      {
2887	if (!cfun->can_throw_non_call_exceptions)
2888	  return false;
2889	gcond *cond = as_a <gcond *> (stmt);
2890	tree lhs = gimple_cond_lhs (cond);
2891	return operation_could_trap_p (gimple_cond_code (cond),
2892				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2893				       false, NULL_TREE);
2894      }
2895
2896    case GIMPLE_ASSIGN:
2897      if (!cfun->can_throw_non_call_exceptions
2898	  || gimple_clobber_p (stmt))
2899        return false;
2900      return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2901
2902    case GIMPLE_ASM:
2903      if (!cfun->can_throw_non_call_exceptions)
2904        return false;
2905      return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2906
2907    default:
2908      return false;
2909    }
2910}
2911
2912
2913/* Return true if expression T could throw an exception.  */
2914
2915bool
2916tree_could_throw_p (tree t)
2917{
2918  if (!flag_exceptions)
2919    return false;
2920  if (TREE_CODE (t) == MODIFY_EXPR)
2921    {
2922      if (cfun->can_throw_non_call_exceptions
2923          && tree_could_trap_p (TREE_OPERAND (t, 0)))
2924        return true;
2925      t = TREE_OPERAND (t, 1);
2926    }
2927
2928  if (TREE_CODE (t) == WITH_SIZE_EXPR)
2929    t = TREE_OPERAND (t, 0);
2930  if (TREE_CODE (t) == CALL_EXPR)
2931    return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2932  if (cfun->can_throw_non_call_exceptions)
2933    return tree_could_trap_p (t);
2934  return false;
2935}
2936
2937/* Return true if STMT can throw an exception that is not caught within
2938   the current function (CFUN).  */
2939
2940bool
2941stmt_can_throw_external (gimple *stmt)
2942{
2943  int lp_nr;
2944
2945  if (!stmt_could_throw_p (stmt))
2946    return false;
2947
2948  lp_nr = lookup_stmt_eh_lp (stmt);
2949  return lp_nr == 0;
2950}
2951
2952/* Return true if STMT can throw an exception that is caught within
2953   the current function (CFUN).  */
2954
2955bool
2956stmt_can_throw_internal (gimple *stmt)
2957{
2958  int lp_nr;
2959
2960  if (!stmt_could_throw_p (stmt))
2961    return false;
2962
2963  lp_nr = lookup_stmt_eh_lp (stmt);
2964  return lp_nr > 0;
2965}
2966
2967/* Given a statement STMT in IFUN, if STMT can no longer throw, then
2968   remove any entry it might have from the EH table.  Return true if
2969   any change was made.  */
2970
2971bool
2972maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
2973{
2974  if (stmt_could_throw_p (stmt))
2975    return false;
2976  return remove_stmt_from_eh_lp_fn (ifun, stmt);
2977}
2978
2979/* Likewise, but always use the current function.  */
2980
2981bool
2982maybe_clean_eh_stmt (gimple *stmt)
2983{
2984  return maybe_clean_eh_stmt_fn (cfun, stmt);
2985}
2986
2987/* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2988   OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2989   in the table if it should be in there.  Return TRUE if a replacement was
2990   done that my require an EH edge purge.  */
2991
2992bool
2993maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
2994{
2995  int lp_nr = lookup_stmt_eh_lp (old_stmt);
2996
2997  if (lp_nr != 0)
2998    {
2999      bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
3000
3001      if (new_stmt == old_stmt && new_stmt_could_throw)
3002	return false;
3003
3004      remove_stmt_from_eh_lp (old_stmt);
3005      if (new_stmt_could_throw)
3006	{
3007	  add_stmt_to_eh_lp (new_stmt, lp_nr);
3008	  return false;
3009	}
3010      else
3011	return true;
3012    }
3013
3014  return false;
3015}
3016
3017/* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3018   in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3019   operand is the return value of duplicate_eh_regions.  */
3020
3021bool
3022maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3023			    struct function *old_fun, gimple *old_stmt,
3024			    hash_map<void *, void *> *map,
3025			    int default_lp_nr)
3026{
3027  int old_lp_nr, new_lp_nr;
3028
3029  if (!stmt_could_throw_p (new_stmt))
3030    return false;
3031
3032  old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3033  if (old_lp_nr == 0)
3034    {
3035      if (default_lp_nr == 0)
3036	return false;
3037      new_lp_nr = default_lp_nr;
3038    }
3039  else if (old_lp_nr > 0)
3040    {
3041      eh_landing_pad old_lp, new_lp;
3042
3043      old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3044      new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3045      new_lp_nr = new_lp->index;
3046    }
3047  else
3048    {
3049      eh_region old_r, new_r;
3050
3051      old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3052      new_r = static_cast<eh_region> (*map->get (old_r));
3053      new_lp_nr = -new_r->index;
3054    }
3055
3056  add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3057  return true;
3058}
3059
3060/* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3061   and thus no remapping is required.  */
3062
3063bool
3064maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3065{
3066  int lp_nr;
3067
3068  if (!stmt_could_throw_p (new_stmt))
3069    return false;
3070
3071  lp_nr = lookup_stmt_eh_lp (old_stmt);
3072  if (lp_nr == 0)
3073    return false;
3074
3075  add_stmt_to_eh_lp (new_stmt, lp_nr);
3076  return true;
3077}
3078
3079/* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3080   GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3081   this only handles handlers consisting of a single call, as that's the
3082   important case for C++: a destructor call for a particular object showing
3083   up in multiple handlers.  */
3084
3085static bool
3086same_handler_p (gimple_seq oneh, gimple_seq twoh)
3087{
3088  gimple_stmt_iterator gsi;
3089  gimple *ones, *twos;
3090  unsigned int ai;
3091
3092  gsi = gsi_start (oneh);
3093  if (!gsi_one_before_end_p (gsi))
3094    return false;
3095  ones = gsi_stmt (gsi);
3096
3097  gsi = gsi_start (twoh);
3098  if (!gsi_one_before_end_p (gsi))
3099    return false;
3100  twos = gsi_stmt (gsi);
3101
3102  if (!is_gimple_call (ones)
3103      || !is_gimple_call (twos)
3104      || gimple_call_lhs (ones)
3105      || gimple_call_lhs (twos)
3106      || gimple_call_chain (ones)
3107      || gimple_call_chain (twos)
3108      || !gimple_call_same_target_p (ones, twos)
3109      || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3110    return false;
3111
3112  for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3113    if (!operand_equal_p (gimple_call_arg (ones, ai),
3114                          gimple_call_arg (twos, ai), 0))
3115      return false;
3116
3117  return true;
3118}
3119
3120/* Optimize
3121    try { A() } finally { try { ~B() } catch { ~A() } }
3122    try { ... } finally { ~A() }
3123   into
3124    try { A() } catch { ~B() }
3125    try { ~B() ... } finally { ~A() }
3126
3127   This occurs frequently in C++, where A is a local variable and B is a
3128   temporary used in the initializer for A.  */
3129
3130static void
3131optimize_double_finally (gtry *one, gtry *two)
3132{
3133  gimple *oneh;
3134  gimple_stmt_iterator gsi;
3135  gimple_seq cleanup;
3136
3137  cleanup = gimple_try_cleanup (one);
3138  gsi = gsi_start (cleanup);
3139  if (!gsi_one_before_end_p (gsi))
3140    return;
3141
3142  oneh = gsi_stmt (gsi);
3143  if (gimple_code (oneh) != GIMPLE_TRY
3144      || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3145    return;
3146
3147  if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3148    {
3149      gimple_seq seq = gimple_try_eval (oneh);
3150
3151      gimple_try_set_cleanup (one, seq);
3152      gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3153      seq = copy_gimple_seq_and_replace_locals (seq);
3154      gimple_seq_add_seq (&seq, gimple_try_eval (two));
3155      gimple_try_set_eval (two, seq);
3156    }
3157}
3158
3159/* Perform EH refactoring optimizations that are simpler to do when code
3160   flow has been lowered but EH structures haven't.  */
3161
3162static void
3163refactor_eh_r (gimple_seq seq)
3164{
3165  gimple_stmt_iterator gsi;
3166  gimple *one, *two;
3167
3168  one = NULL;
3169  two = NULL;
3170  gsi = gsi_start (seq);
3171  while (1)
3172    {
3173      one = two;
3174      if (gsi_end_p (gsi))
3175	two = NULL;
3176      else
3177	two = gsi_stmt (gsi);
3178      if (one && two)
3179	if (gtry *try_one = dyn_cast <gtry *> (one))
3180	  if (gtry *try_two = dyn_cast <gtry *> (two))
3181	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3182		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3183	      optimize_double_finally (try_one, try_two);
3184      if (one)
3185	switch (gimple_code (one))
3186	  {
3187	  case GIMPLE_TRY:
3188	    refactor_eh_r (gimple_try_eval (one));
3189	    refactor_eh_r (gimple_try_cleanup (one));
3190	    break;
3191	  case GIMPLE_CATCH:
3192	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3193	    break;
3194	  case GIMPLE_EH_FILTER:
3195	    refactor_eh_r (gimple_eh_filter_failure (one));
3196	    break;
3197	  case GIMPLE_EH_ELSE:
3198	    {
3199	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3200	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3201	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3202	    }
3203	    break;
3204	  default:
3205	    break;
3206	  }
3207      if (two)
3208	gsi_next (&gsi);
3209      else
3210	break;
3211    }
3212}
3213
3214namespace {
3215
3216const pass_data pass_data_refactor_eh =
3217{
3218  GIMPLE_PASS, /* type */
3219  "ehopt", /* name */
3220  OPTGROUP_NONE, /* optinfo_flags */
3221  TV_TREE_EH, /* tv_id */
3222  PROP_gimple_lcf, /* properties_required */
3223  0, /* properties_provided */
3224  0, /* properties_destroyed */
3225  0, /* todo_flags_start */
3226  0, /* todo_flags_finish */
3227};
3228
3229class pass_refactor_eh : public gimple_opt_pass
3230{
3231public:
3232  pass_refactor_eh (gcc::context *ctxt)
3233    : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3234  {}
3235
3236  /* opt_pass methods: */
3237  virtual bool gate (function *) { return flag_exceptions != 0; }
3238  virtual unsigned int execute (function *)
3239    {
3240      refactor_eh_r (gimple_body (current_function_decl));
3241      return 0;
3242    }
3243
3244}; // class pass_refactor_eh
3245
3246} // anon namespace
3247
3248gimple_opt_pass *
3249make_pass_refactor_eh (gcc::context *ctxt)
3250{
3251  return new pass_refactor_eh (ctxt);
3252}
3253
3254/* At the end of gimple optimization, we can lower RESX.  */
3255
3256static bool
3257lower_resx (basic_block bb, gresx *stmt,
3258	    hash_map<eh_region, tree> *mnt_map)
3259{
3260  int lp_nr;
3261  eh_region src_r, dst_r;
3262  gimple_stmt_iterator gsi;
3263  gimple *x;
3264  tree fn, src_nr;
3265  bool ret = false;
3266
3267  lp_nr = lookup_stmt_eh_lp (stmt);
3268  if (lp_nr != 0)
3269    dst_r = get_eh_region_from_lp_number (lp_nr);
3270  else
3271    dst_r = NULL;
3272
3273  src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3274  gsi = gsi_last_bb (bb);
3275
3276  if (src_r == NULL)
3277    {
3278      /* We can wind up with no source region when pass_cleanup_eh shows
3279	 that there are no entries into an eh region and deletes it, but
3280	 then the block that contains the resx isn't removed.  This can
3281	 happen without optimization when the switch statement created by
3282	 lower_try_finally_switch isn't simplified to remove the eh case.
3283
3284	 Resolve this by expanding the resx node to an abort.  */
3285
3286      fn = builtin_decl_implicit (BUILT_IN_TRAP);
3287      x = gimple_build_call (fn, 0);
3288      gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3289
3290      while (EDGE_COUNT (bb->succs) > 0)
3291	remove_edge (EDGE_SUCC (bb, 0));
3292    }
3293  else if (dst_r)
3294    {
3295      /* When we have a destination region, we resolve this by copying
3296	 the excptr and filter values into place, and changing the edge
3297	 to immediately after the landing pad.  */
3298      edge e;
3299
3300      if (lp_nr < 0)
3301	{
3302	  basic_block new_bb;
3303	  tree lab;
3304
3305	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3306	     the failure decl into a new block, if needed.  */
3307	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3308
3309	  tree *slot = mnt_map->get (dst_r);
3310	  if (slot == NULL)
3311	    {
3312	      gimple_stmt_iterator gsi2;
3313
3314	      new_bb = create_empty_bb (bb);
3315	      new_bb->count = bb->count;
3316	      add_bb_to_loop (new_bb, bb->loop_father);
3317	      lab = gimple_block_label (new_bb);
3318	      gsi2 = gsi_start_bb (new_bb);
3319
3320	      fn = dst_r->u.must_not_throw.failure_decl;
3321	      x = gimple_build_call (fn, 0);
3322	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3323	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3324
3325	      mnt_map->put (dst_r, lab);
3326	    }
3327	  else
3328	    {
3329	      lab = *slot;
3330	      new_bb = label_to_block (lab);
3331	    }
3332
3333	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3334	  e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3335	}
3336      else
3337	{
3338	  edge_iterator ei;
3339	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3340
3341	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3342	  src_nr = build_int_cst (integer_type_node, src_r->index);
3343	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3344	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3345
3346	  /* Update the flags for the outgoing edge.  */
3347	  e = single_succ_edge (bb);
3348	  gcc_assert (e->flags & EDGE_EH);
3349	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3350	  e->probability = profile_probability::always ();
3351
3352	  /* If there are no more EH users of the landing pad, delete it.  */
3353	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3354	    if (e->flags & EDGE_EH)
3355	      break;
3356	  if (e == NULL)
3357	    {
3358	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3359	      remove_eh_landing_pad (lp);
3360	    }
3361	}
3362
3363      ret = true;
3364    }
3365  else
3366    {
3367      tree var;
3368
3369      /* When we don't have a destination region, this exception escapes
3370	 up the call chain.  We resolve this by generating a call to the
3371	 _Unwind_Resume library function.  */
3372
3373      /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3374	 with no arguments for C++.  Check for that.  */
3375      if (src_r->use_cxa_end_cleanup)
3376	{
3377	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3378	  x = gimple_build_call (fn, 0);
3379	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3380	}
3381      else
3382	{
3383	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3384	  src_nr = build_int_cst (integer_type_node, src_r->index);
3385	  x = gimple_build_call (fn, 1, src_nr);
3386	  var = create_tmp_var (ptr_type_node);
3387	  var = make_ssa_name (var, x);
3388	  gimple_call_set_lhs (x, var);
3389	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3390
3391	  /* When exception handling is delegated to a caller function, we
3392	     have to guarantee that shadow memory variables living on stack
3393	     will be cleaner before control is given to a parent function.  */
3394	  if (sanitize_flags_p (SANITIZE_ADDRESS))
3395	    {
3396	      tree decl
3397		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3398	      gimple *g = gimple_build_call (decl, 0);
3399	      gimple_set_location (g, gimple_location (stmt));
3400	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3401	    }
3402
3403	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3404	  x = gimple_build_call (fn, 1, var);
3405	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3406	}
3407
3408      gcc_assert (EDGE_COUNT (bb->succs) == 0);
3409    }
3410
3411  gsi_remove (&gsi, true);
3412
3413  return ret;
3414}
3415
3416namespace {
3417
3418const pass_data pass_data_lower_resx =
3419{
3420  GIMPLE_PASS, /* type */
3421  "resx", /* name */
3422  OPTGROUP_NONE, /* optinfo_flags */
3423  TV_TREE_EH, /* tv_id */
3424  PROP_gimple_lcf, /* properties_required */
3425  0, /* properties_provided */
3426  0, /* properties_destroyed */
3427  0, /* todo_flags_start */
3428  0, /* todo_flags_finish */
3429};
3430
3431class pass_lower_resx : public gimple_opt_pass
3432{
3433public:
3434  pass_lower_resx (gcc::context *ctxt)
3435    : gimple_opt_pass (pass_data_lower_resx, ctxt)
3436  {}
3437
3438  /* opt_pass methods: */
3439  virtual bool gate (function *) { return flag_exceptions != 0; }
3440  virtual unsigned int execute (function *);
3441
3442}; // class pass_lower_resx
3443
3444unsigned
3445pass_lower_resx::execute (function *fun)
3446{
3447  basic_block bb;
3448  bool dominance_invalidated = false;
3449  bool any_rewritten = false;
3450
3451  hash_map<eh_region, tree> mnt_map;
3452
3453  FOR_EACH_BB_FN (bb, fun)
3454    {
3455      gimple *last = last_stmt (bb);
3456      if (last && is_gimple_resx (last))
3457	{
3458	  dominance_invalidated |=
3459	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3460	  any_rewritten = true;
3461	}
3462    }
3463
3464  if (dominance_invalidated)
3465    {
3466      free_dominance_info (CDI_DOMINATORS);
3467      free_dominance_info (CDI_POST_DOMINATORS);
3468    }
3469
3470  return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3471}
3472
3473} // anon namespace
3474
3475gimple_opt_pass *
3476make_pass_lower_resx (gcc::context *ctxt)
3477{
3478  return new pass_lower_resx (ctxt);
3479}
3480
3481/* Try to optimize var = {v} {CLOBBER} stmts followed just by
3482   external throw.  */
3483
3484static void
3485optimize_clobbers (basic_block bb)
3486{
3487  gimple_stmt_iterator gsi = gsi_last_bb (bb);
3488  bool any_clobbers = false;
3489  bool seen_stack_restore = false;
3490  edge_iterator ei;
3491  edge e;
3492
3493  /* Only optimize anything if the bb contains at least one clobber,
3494     ends with resx (checked by caller), optionally contains some
3495     debug stmts or labels, or at most one __builtin_stack_restore
3496     call, and has an incoming EH edge.  */
3497  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3498    {
3499      gimple *stmt = gsi_stmt (gsi);
3500      if (is_gimple_debug (stmt))
3501	continue;
3502      if (gimple_clobber_p (stmt))
3503	{
3504	  any_clobbers = true;
3505	  continue;
3506	}
3507      if (!seen_stack_restore
3508	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3509	{
3510	  seen_stack_restore = true;
3511	  continue;
3512	}
3513      if (gimple_code (stmt) == GIMPLE_LABEL)
3514	break;
3515      return;
3516    }
3517  if (!any_clobbers)
3518    return;
3519  FOR_EACH_EDGE (e, ei, bb->preds)
3520    if (e->flags & EDGE_EH)
3521      break;
3522  if (e == NULL)
3523    return;
3524  gsi = gsi_last_bb (bb);
3525  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3526    {
3527      gimple *stmt = gsi_stmt (gsi);
3528      if (!gimple_clobber_p (stmt))
3529	continue;
3530      unlink_stmt_vdef (stmt);
3531      gsi_remove (&gsi, true);
3532      release_defs (stmt);
3533    }
3534}
3535
3536/* Try to sink var = {v} {CLOBBER} stmts followed just by
3537   internal throw to successor BB.  */
3538
3539static int
3540sink_clobbers (basic_block bb)
3541{
3542  edge e;
3543  edge_iterator ei;
3544  gimple_stmt_iterator gsi, dgsi;
3545  basic_block succbb;
3546  bool any_clobbers = false;
3547  unsigned todo = 0;
3548
3549  /* Only optimize if BB has a single EH successor and
3550     all predecessor edges are EH too.  */
3551  if (!single_succ_p (bb)
3552      || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3553    return 0;
3554
3555  FOR_EACH_EDGE (e, ei, bb->preds)
3556    {
3557      if ((e->flags & EDGE_EH) == 0)
3558	return 0;
3559    }
3560
3561  /* And BB contains only CLOBBER stmts before the final
3562     RESX.  */
3563  gsi = gsi_last_bb (bb);
3564  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3565    {
3566      gimple *stmt = gsi_stmt (gsi);
3567      if (is_gimple_debug (stmt))
3568	continue;
3569      if (gimple_code (stmt) == GIMPLE_LABEL)
3570	break;
3571      if (!gimple_clobber_p (stmt))
3572	return 0;
3573      any_clobbers = true;
3574    }
3575  if (!any_clobbers)
3576    return 0;
3577
3578  edge succe = single_succ_edge (bb);
3579  succbb = succe->dest;
3580
3581  /* See if there is a virtual PHI node to take an updated virtual
3582     operand from.  */
3583  gphi *vphi = NULL;
3584  tree vuse = NULL_TREE;
3585  for (gphi_iterator gpi = gsi_start_phis (succbb);
3586       !gsi_end_p (gpi); gsi_next (&gpi))
3587    {
3588      tree res = gimple_phi_result (gpi.phi ());
3589      if (virtual_operand_p (res))
3590	{
3591	  vphi = gpi.phi ();
3592	  vuse = res;
3593	  break;
3594	}
3595    }
3596
3597  dgsi = gsi_after_labels (succbb);
3598  gsi = gsi_last_bb (bb);
3599  for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3600    {
3601      gimple *stmt = gsi_stmt (gsi);
3602      tree lhs;
3603      if (is_gimple_debug (stmt))
3604	continue;
3605      if (gimple_code (stmt) == GIMPLE_LABEL)
3606	break;
3607      lhs = gimple_assign_lhs (stmt);
3608      /* Unfortunately we don't have dominance info updated at this
3609	 point, so checking if
3610	 dominated_by_p (CDI_DOMINATORS, succbb,
3611			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3612	 would be too costly.  Thus, avoid sinking any clobbers that
3613	 refer to non-(D) SSA_NAMEs.  */
3614      if (TREE_CODE (lhs) == MEM_REF
3615	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3616	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3617	{
3618	  unlink_stmt_vdef (stmt);
3619	  gsi_remove (&gsi, true);
3620	  release_defs (stmt);
3621	  continue;
3622	}
3623
3624      /* As we do not change stmt order when sinking across a
3625         forwarder edge we can keep virtual operands in place.  */
3626      gsi_remove (&gsi, false);
3627      gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3628
3629      /* But adjust virtual operands if we sunk across a PHI node.  */
3630      if (vuse)
3631	{
3632	  gimple *use_stmt;
3633	  imm_use_iterator iter;
3634	  use_operand_p use_p;
3635	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3636	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3637	      SET_USE (use_p, gimple_vdef (stmt));
3638	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3639	    {
3640	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3641	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3642	    }
3643	  /* Adjust the incoming virtual operand.  */
3644	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3645	  SET_USE (gimple_vuse_op (stmt), vuse);
3646	}
3647      /* If there isn't a single predecessor but no virtual PHI node
3648         arrange for virtual operands to be renamed.  */
3649      else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3650	       && !single_pred_p (succbb))
3651	{
3652	  /* In this case there will be no use of the VDEF of this stmt.
3653	     ???  Unless this is a secondary opportunity and we have not
3654	     removed unreachable blocks yet, so we cannot assert this.
3655	     Which also means we will end up renaming too many times.  */
3656	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3657	  mark_virtual_operands_for_renaming (cfun);
3658	  todo |= TODO_update_ssa_only_virtuals;
3659	}
3660    }
3661
3662  return todo;
3663}
3664
3665/* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3666   we have found some duplicate labels and removed some edges.  */
3667
3668static bool
3669lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3670{
3671  gimple_stmt_iterator gsi;
3672  int region_nr;
3673  eh_region r;
3674  tree filter, fn;
3675  gimple *x;
3676  bool redirected = false;
3677
3678  region_nr = gimple_eh_dispatch_region (stmt);
3679  r = get_eh_region_from_number (region_nr);
3680
3681  gsi = gsi_last_bb (src);
3682
3683  switch (r->type)
3684    {
3685    case ERT_TRY:
3686      {
3687	auto_vec<tree> labels;
3688	tree default_label = NULL;
3689	eh_catch c;
3690	edge_iterator ei;
3691	edge e;
3692	hash_set<tree> seen_values;
3693
3694	/* Collect the labels for a switch.  Zero the post_landing_pad
3695	   field becase we'll no longer have anything keeping these labels
3696	   in existence and the optimizer will be free to merge these
3697	   blocks at will.  */
3698	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3699	  {
3700	    tree tp_node, flt_node, lab = c->label;
3701	    bool have_label = false;
3702
3703	    c->label = NULL;
3704	    tp_node = c->type_list;
3705	    flt_node = c->filter_list;
3706
3707	    if (tp_node == NULL)
3708	      {
3709	        default_label = lab;
3710		break;
3711	      }
3712	    do
3713	      {
3714		/* Filter out duplicate labels that arise when this handler
3715		   is shadowed by an earlier one.  When no labels are
3716		   attached to the handler anymore, we remove
3717		   the corresponding edge and then we delete unreachable
3718		   blocks at the end of this pass.  */
3719		if (! seen_values.contains (TREE_VALUE (flt_node)))
3720		  {
3721		    tree t = build_case_label (TREE_VALUE (flt_node),
3722					       NULL, lab);
3723		    labels.safe_push (t);
3724		    seen_values.add (TREE_VALUE (flt_node));
3725		    have_label = true;
3726		  }
3727
3728		tp_node = TREE_CHAIN (tp_node);
3729		flt_node = TREE_CHAIN (flt_node);
3730	      }
3731	    while (tp_node);
3732	    if (! have_label)
3733	      {
3734	        remove_edge (find_edge (src, label_to_block (lab)));
3735	        redirected = true;
3736	      }
3737	  }
3738
3739	/* Clean up the edge flags.  */
3740	FOR_EACH_EDGE (e, ei, src->succs)
3741	  {
3742	    if (e->flags & EDGE_FALLTHRU)
3743	      {
3744		/* If there was no catch-all, use the fallthru edge.  */
3745		if (default_label == NULL)
3746		  default_label = gimple_block_label (e->dest);
3747		e->flags &= ~EDGE_FALLTHRU;
3748	      }
3749	  }
3750	gcc_assert (default_label != NULL);
3751
3752	/* Don't generate a switch if there's only a default case.
3753	   This is common in the form of try { A; } catch (...) { B; }.  */
3754	if (!labels.exists ())
3755	  {
3756	    e = single_succ_edge (src);
3757	    e->flags |= EDGE_FALLTHRU;
3758	  }
3759	else
3760	  {
3761	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3762	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3763							 region_nr));
3764	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3765	    filter = make_ssa_name (filter, x);
3766	    gimple_call_set_lhs (x, filter);
3767	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3768
3769	    /* Turn the default label into a default case.  */
3770	    default_label = build_case_label (NULL, NULL, default_label);
3771	    sort_case_labels (labels);
3772
3773	    x = gimple_build_switch (filter, default_label, labels);
3774	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3775	  }
3776      }
3777      break;
3778
3779    case ERT_ALLOWED_EXCEPTIONS:
3780      {
3781	edge b_e = BRANCH_EDGE (src);
3782	edge f_e = FALLTHRU_EDGE (src);
3783
3784	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3785	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3786						     region_nr));
3787	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3788	filter = make_ssa_name (filter, x);
3789	gimple_call_set_lhs (x, filter);
3790	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3791
3792	r->u.allowed.label = NULL;
3793	x = gimple_build_cond (EQ_EXPR, filter,
3794			       build_int_cst (TREE_TYPE (filter),
3795					      r->u.allowed.filter),
3796			       NULL_TREE, NULL_TREE);
3797	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3798
3799	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3800        f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3801      }
3802      break;
3803
3804    default:
3805      gcc_unreachable ();
3806    }
3807
3808  /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3809  gsi_remove (&gsi, true);
3810  return redirected;
3811}
3812
3813namespace {
3814
3815const pass_data pass_data_lower_eh_dispatch =
3816{
3817  GIMPLE_PASS, /* type */
3818  "ehdisp", /* name */
3819  OPTGROUP_NONE, /* optinfo_flags */
3820  TV_TREE_EH, /* tv_id */
3821  PROP_gimple_lcf, /* properties_required */
3822  0, /* properties_provided */
3823  0, /* properties_destroyed */
3824  0, /* todo_flags_start */
3825  0, /* todo_flags_finish */
3826};
3827
3828class pass_lower_eh_dispatch : public gimple_opt_pass
3829{
3830public:
3831  pass_lower_eh_dispatch (gcc::context *ctxt)
3832    : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3833  {}
3834
3835  /* opt_pass methods: */
3836  virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3837  virtual unsigned int execute (function *);
3838
3839}; // class pass_lower_eh_dispatch
3840
3841unsigned
3842pass_lower_eh_dispatch::execute (function *fun)
3843{
3844  basic_block bb;
3845  int flags = 0;
3846  bool redirected = false;
3847
3848  assign_filter_values ();
3849
3850  FOR_EACH_BB_FN (bb, fun)
3851    {
3852      gimple *last = last_stmt (bb);
3853      if (last == NULL)
3854	continue;
3855      if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3856	{
3857	  redirected |= lower_eh_dispatch (bb,
3858					   as_a <geh_dispatch *> (last));
3859	  flags |= TODO_update_ssa_only_virtuals;
3860	}
3861      else if (gimple_code (last) == GIMPLE_RESX)
3862	{
3863	  if (stmt_can_throw_external (last))
3864	    optimize_clobbers (bb);
3865	  else
3866	    flags |= sink_clobbers (bb);
3867	}
3868    }
3869
3870  if (redirected)
3871    {
3872      free_dominance_info (CDI_DOMINATORS);
3873      delete_unreachable_blocks ();
3874    }
3875  return flags;
3876}
3877
3878} // anon namespace
3879
3880gimple_opt_pass *
3881make_pass_lower_eh_dispatch (gcc::context *ctxt)
3882{
3883  return new pass_lower_eh_dispatch (ctxt);
3884}
3885
3886/* Walk statements, see what regions and, optionally, landing pads
3887   are really referenced.
3888
3889   Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3890   and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3891
3892   Passing NULL for LP_REACHABLE is valid, in this case only reachable
3893   regions are marked.
3894
3895   The caller is responsible for freeing the returned sbitmaps.  */
3896
3897static void
3898mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3899{
3900  sbitmap r_reachable, lp_reachable;
3901  basic_block bb;
3902  bool mark_landing_pads = (lp_reachablep != NULL);
3903  gcc_checking_assert (r_reachablep != NULL);
3904
3905  r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3906  bitmap_clear (r_reachable);
3907  *r_reachablep = r_reachable;
3908
3909  if (mark_landing_pads)
3910    {
3911      lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3912      bitmap_clear (lp_reachable);
3913      *lp_reachablep = lp_reachable;
3914    }
3915  else
3916    lp_reachable = NULL;
3917
3918  FOR_EACH_BB_FN (bb, cfun)
3919    {
3920      gimple_stmt_iterator gsi;
3921
3922      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3923	{
3924	  gimple *stmt = gsi_stmt (gsi);
3925
3926	  if (mark_landing_pads)
3927	    {
3928	      int lp_nr = lookup_stmt_eh_lp (stmt);
3929
3930	      /* Negative LP numbers are MUST_NOT_THROW regions which
3931		 are not considered BB enders.  */
3932	      if (lp_nr < 0)
3933		bitmap_set_bit (r_reachable, -lp_nr);
3934
3935	      /* Positive LP numbers are real landing pads, and BB enders.  */
3936	      else if (lp_nr > 0)
3937		{
3938		  gcc_assert (gsi_one_before_end_p (gsi));
3939		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3940		  bitmap_set_bit (r_reachable, region->index);
3941		  bitmap_set_bit (lp_reachable, lp_nr);
3942		}
3943	    }
3944
3945	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3946	  switch (gimple_code (stmt))
3947	    {
3948	    case GIMPLE_RESX:
3949	      bitmap_set_bit (r_reachable,
3950			      gimple_resx_region (as_a <gresx *> (stmt)));
3951	      break;
3952	    case GIMPLE_EH_DISPATCH:
3953	      bitmap_set_bit (r_reachable,
3954			      gimple_eh_dispatch_region (
3955                                as_a <geh_dispatch *> (stmt)));
3956	      break;
3957	    case GIMPLE_CALL:
3958	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3959		for (int i = 0; i < 2; ++i)
3960		  {
3961		    tree rt = gimple_call_arg (stmt, i);
3962		    HOST_WIDE_INT ri = tree_to_shwi (rt);
3963
3964		    gcc_assert (ri == (int)ri);
3965		    bitmap_set_bit (r_reachable, ri);
3966		  }
3967	      break;
3968	    default:
3969	      break;
3970	    }
3971	}
3972    }
3973}
3974
3975/* Remove unreachable handlers and unreachable landing pads.  */
3976
3977static void
3978remove_unreachable_handlers (void)
3979{
3980  sbitmap r_reachable, lp_reachable;
3981  eh_region region;
3982  eh_landing_pad lp;
3983  unsigned i;
3984
3985  mark_reachable_handlers (&r_reachable, &lp_reachable);
3986
3987  if (dump_file)
3988    {
3989      fprintf (dump_file, "Before removal of unreachable regions:\n");
3990      dump_eh_tree (dump_file, cfun);
3991      fprintf (dump_file, "Reachable regions: ");
3992      dump_bitmap_file (dump_file, r_reachable);
3993      fprintf (dump_file, "Reachable landing pads: ");
3994      dump_bitmap_file (dump_file, lp_reachable);
3995    }
3996
3997  if (dump_file)
3998    {
3999      FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4000	if (region && !bitmap_bit_p (r_reachable, region->index))
4001	  fprintf (dump_file,
4002		   "Removing unreachable region %d\n",
4003		   region->index);
4004    }
4005
4006  remove_unreachable_eh_regions (r_reachable);
4007
4008  FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4009    if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4010      {
4011	if (dump_file)
4012	  fprintf (dump_file,
4013		   "Removing unreachable landing pad %d\n",
4014		   lp->index);
4015	remove_eh_landing_pad (lp);
4016      }
4017
4018  if (dump_file)
4019    {
4020      fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4021      dump_eh_tree (dump_file, cfun);
4022      fprintf (dump_file, "\n\n");
4023    }
4024
4025  sbitmap_free (r_reachable);
4026  sbitmap_free (lp_reachable);
4027
4028  if (flag_checking)
4029    verify_eh_tree (cfun);
4030}
4031
4032/* Remove unreachable handlers if any landing pads have been removed after
4033   last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4034
4035void
4036maybe_remove_unreachable_handlers (void)
4037{
4038  eh_landing_pad lp;
4039  unsigned i;
4040
4041  if (cfun->eh == NULL)
4042    return;
4043
4044  FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4045    if (lp && lp->post_landing_pad)
4046      {
4047	if (label_to_block (lp->post_landing_pad) == NULL)
4048	  {
4049	    remove_unreachable_handlers ();
4050	    return;
4051	  }
4052      }
4053}
4054
4055/* Remove regions that do not have landing pads.  This assumes
4056   that remove_unreachable_handlers has already been run, and
4057   that we've just manipulated the landing pads since then.
4058
4059   Preserve regions with landing pads and regions that prevent
4060   exceptions from propagating further, even if these regions
4061   are not reachable.  */
4062
4063static void
4064remove_unreachable_handlers_no_lp (void)
4065{
4066  eh_region region;
4067  sbitmap r_reachable;
4068  unsigned i;
4069
4070  mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4071
4072  FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4073    {
4074      if (! region)
4075	continue;
4076
4077      if (region->landing_pads != NULL
4078	  || region->type == ERT_MUST_NOT_THROW)
4079	bitmap_set_bit (r_reachable, region->index);
4080
4081      if (dump_file
4082	  && !bitmap_bit_p (r_reachable, region->index))
4083	fprintf (dump_file,
4084		 "Removing unreachable region %d\n",
4085		 region->index);
4086    }
4087
4088  remove_unreachable_eh_regions (r_reachable);
4089
4090  sbitmap_free (r_reachable);
4091}
4092
4093/* Undo critical edge splitting on an EH landing pad.  Earlier, we
4094   optimisticaly split all sorts of edges, including EH edges.  The
4095   optimization passes in between may not have needed them; if not,
4096   we should undo the split.
4097
4098   Recognize this case by having one EH edge incoming to the BB and
4099   one normal edge outgoing; BB should be empty apart from the
4100   post_landing_pad label.
4101
4102   Note that this is slightly different from the empty handler case
4103   handled by cleanup_empty_eh, in that the actual handler may yet
4104   have actual code but the landing pad has been separated from the
4105   handler.  As such, cleanup_empty_eh relies on this transformation
4106   having been done first.  */
4107
4108static bool
4109unsplit_eh (eh_landing_pad lp)
4110{
4111  basic_block bb = label_to_block (lp->post_landing_pad);
4112  gimple_stmt_iterator gsi;
4113  edge e_in, e_out;
4114
4115  /* Quickly check the edge counts on BB for singularity.  */
4116  if (!single_pred_p (bb) || !single_succ_p (bb))
4117    return false;
4118  e_in = single_pred_edge (bb);
4119  e_out = single_succ_edge (bb);
4120
4121  /* Input edge must be EH and output edge must be normal.  */
4122  if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4123    return false;
4124
4125  /* The block must be empty except for the labels and debug insns.  */
4126  gsi = gsi_after_labels (bb);
4127  if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4128    gsi_next_nondebug (&gsi);
4129  if (!gsi_end_p (gsi))
4130    return false;
4131
4132  /* The destination block must not already have a landing pad
4133     for a different region.  */
4134  for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4135    {
4136      glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4137      tree lab;
4138      int lp_nr;
4139
4140      if (!label_stmt)
4141	break;
4142      lab = gimple_label_label (label_stmt);
4143      lp_nr = EH_LANDING_PAD_NR (lab);
4144      if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4145	return false;
4146    }
4147
4148  /* The new destination block must not already be a destination of
4149     the source block, lest we merge fallthru and eh edges and get
4150     all sorts of confused.  */
4151  if (find_edge (e_in->src, e_out->dest))
4152    return false;
4153
4154  /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4155     thought this should have been cleaned up by a phicprop pass, but
4156     that doesn't appear to handle virtuals.  Propagate by hand.  */
4157  if (!gimple_seq_empty_p (phi_nodes (bb)))
4158    {
4159      for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4160	{
4161	  gimple *use_stmt;
4162	  gphi *phi = gpi.phi ();
4163	  tree lhs = gimple_phi_result (phi);
4164	  tree rhs = gimple_phi_arg_def (phi, 0);
4165	  use_operand_p use_p;
4166	  imm_use_iterator iter;
4167
4168	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4169	    {
4170	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4171		SET_USE (use_p, rhs);
4172	    }
4173
4174	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4175	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4176
4177	  remove_phi_node (&gpi, true);
4178	}
4179    }
4180
4181  if (dump_file && (dump_flags & TDF_DETAILS))
4182    fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4183	     lp->index, e_out->dest->index);
4184
4185  /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4186     a successor edge, humor it.  But do the real CFG change with the
4187     predecessor of E_OUT in order to preserve the ordering of arguments
4188     to the PHI nodes in E_OUT->DEST.  */
4189  redirect_eh_edge_1 (e_in, e_out->dest, false);
4190  redirect_edge_pred (e_out, e_in->src);
4191  e_out->flags = e_in->flags;
4192  e_out->probability = e_in->probability;
4193  remove_edge (e_in);
4194
4195  return true;
4196}
4197
4198/* Examine each landing pad block and see if it matches unsplit_eh.  */
4199
4200static bool
4201unsplit_all_eh (void)
4202{
4203  bool changed = false;
4204  eh_landing_pad lp;
4205  int i;
4206
4207  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4208    if (lp)
4209      changed |= unsplit_eh (lp);
4210
4211  return changed;
4212}
4213
4214/* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4215   to OLD_BB to NEW_BB; return true on success, false on failure.
4216
4217   OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4218   PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4219   Virtual PHIs may be deleted and marked for renaming.  */
4220
4221static bool
4222cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4223			     edge old_bb_out, bool change_region)
4224{
4225  gphi_iterator ngsi, ogsi;
4226  edge_iterator ei;
4227  edge e;
4228  bitmap ophi_handled;
4229
4230  /* The destination block must not be a regular successor for any
4231     of the preds of the landing pad.  Thus, avoid turning
4232        <..>
4233	 |  \ EH
4234	 |  <..>
4235	 |  /
4236	<..>
4237     into
4238        <..>
4239	|  | EH
4240	<..>
4241     which CFG verification would choke on.  See PR45172 and PR51089.  */
4242  FOR_EACH_EDGE (e, ei, old_bb->preds)
4243    if (find_edge (e->src, new_bb))
4244      return false;
4245
4246  FOR_EACH_EDGE (e, ei, old_bb->preds)
4247    redirect_edge_var_map_clear (e);
4248
4249  ophi_handled = BITMAP_ALLOC (NULL);
4250
4251  /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4252     for the edges we're going to move.  */
4253  for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4254    {
4255      gphi *ophi, *nphi = ngsi.phi ();
4256      tree nresult, nop;
4257
4258      nresult = gimple_phi_result (nphi);
4259      nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4260
4261      /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4262	 the source ssa_name.  */
4263      ophi = NULL;
4264      for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4265	{
4266	  ophi = ogsi.phi ();
4267	  if (gimple_phi_result (ophi) == nop)
4268	    break;
4269	  ophi = NULL;
4270	}
4271
4272      /* If we did find the corresponding PHI, copy those inputs.  */
4273      if (ophi)
4274	{
4275	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4276	  if (!has_single_use (nop))
4277	    {
4278	      imm_use_iterator imm_iter;
4279	      use_operand_p use_p;
4280
4281	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4282		{
4283		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4284		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4285			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4286		    goto fail;
4287		}
4288	    }
4289	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4290	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4291	    {
4292	      location_t oloc;
4293	      tree oop;
4294
4295	      if ((e->flags & EDGE_EH) == 0)
4296		continue;
4297	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4298	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4299	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4300	    }
4301	}
4302      /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4303	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4304	 variable is unchanged from input to the block and we can simply
4305	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4306      else
4307	{
4308	  location_t nloc
4309	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4310	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4311	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4312	}
4313    }
4314
4315  /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4316     we don't know what values from the other edges into NEW_BB to use.  */
4317  for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4318    {
4319      gphi *ophi = ogsi.phi ();
4320      tree oresult = gimple_phi_result (ophi);
4321      if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4322	goto fail;
4323    }
4324
4325  /* Finally, move the edges and update the PHIs.  */
4326  for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4327    if (e->flags & EDGE_EH)
4328      {
4329	/* ???  CFG manipluation routines do not try to update loop
4330	   form on edge redirection.  Do so manually here for now.  */
4331	/* If we redirect a loop entry or latch edge that will either create
4332	   a multiple entry loop or rotate the loop.  If the loops merge
4333	   we may have created a loop with multiple latches.
4334	   All of this isn't easily fixed thus cancel the affected loop
4335	   and mark the other loop as possibly having multiple latches.  */
4336	if (e->dest == e->dest->loop_father->header)
4337	  {
4338	    mark_loop_for_removal (e->dest->loop_father);
4339	    new_bb->loop_father->latch = NULL;
4340	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4341	  }
4342	redirect_eh_edge_1 (e, new_bb, change_region);
4343	redirect_edge_succ (e, new_bb);
4344	flush_pending_stmts (e);
4345      }
4346    else
4347      ei_next (&ei);
4348
4349  BITMAP_FREE (ophi_handled);
4350  return true;
4351
4352 fail:
4353  FOR_EACH_EDGE (e, ei, old_bb->preds)
4354    redirect_edge_var_map_clear (e);
4355  BITMAP_FREE (ophi_handled);
4356  return false;
4357}
4358
4359/* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4360   old region to NEW_REGION at BB.  */
4361
4362static void
4363cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4364			  eh_landing_pad lp, eh_region new_region)
4365{
4366  gimple_stmt_iterator gsi;
4367  eh_landing_pad *pp;
4368
4369  for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4370    continue;
4371  *pp = lp->next_lp;
4372
4373  lp->region = new_region;
4374  lp->next_lp = new_region->landing_pads;
4375  new_region->landing_pads = lp;
4376
4377  /* Delete the RESX that was matched within the empty handler block.  */
4378  gsi = gsi_last_bb (bb);
4379  unlink_stmt_vdef (gsi_stmt (gsi));
4380  gsi_remove (&gsi, true);
4381
4382  /* Clean up E_OUT for the fallthru.  */
4383  e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4384  e_out->probability = profile_probability::always ();
4385}
4386
4387/* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4388   unsplitting than unsplit_eh was prepared to handle, e.g. when
4389   multiple incoming edges and phis are involved.  */
4390
4391static bool
4392cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4393{
4394  gimple_stmt_iterator gsi;
4395  tree lab;
4396
4397  /* We really ought not have totally lost everything following
4398     a landing pad label.  Given that BB is empty, there had better
4399     be a successor.  */
4400  gcc_assert (e_out != NULL);
4401
4402  /* The destination block must not already have a landing pad
4403     for a different region.  */
4404  lab = NULL;
4405  for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4406    {
4407      glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4408      int lp_nr;
4409
4410      if (!stmt)
4411	break;
4412      lab = gimple_label_label (stmt);
4413      lp_nr = EH_LANDING_PAD_NR (lab);
4414      if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4415	return false;
4416    }
4417
4418  /* Attempt to move the PHIs into the successor block.  */
4419  if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4420    {
4421      if (dump_file && (dump_flags & TDF_DETAILS))
4422	fprintf (dump_file,
4423		 "Unsplit EH landing pad %d to block %i "
4424		 "(via cleanup_empty_eh).\n",
4425		 lp->index, e_out->dest->index);
4426      return true;
4427    }
4428
4429  return false;
4430}
4431
4432/* Return true if edge E_FIRST is part of an empty infinite loop
4433   or leads to such a loop through a series of single successor
4434   empty bbs.  */
4435
4436static bool
4437infinite_empty_loop_p (edge e_first)
4438{
4439  bool inf_loop = false;
4440  edge e;
4441
4442  if (e_first->dest == e_first->src)
4443    return true;
4444
4445  e_first->src->aux = (void *) 1;
4446  for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4447    {
4448      gimple_stmt_iterator gsi;
4449      if (e->dest->aux)
4450	{
4451	  inf_loop = true;
4452	  break;
4453	}
4454      e->dest->aux = (void *) 1;
4455      gsi = gsi_after_labels (e->dest);
4456      if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4457	gsi_next_nondebug (&gsi);
4458      if (!gsi_end_p (gsi))
4459	break;
4460    }
4461  e_first->src->aux = NULL;
4462  for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4463    e->dest->aux = NULL;
4464
4465  return inf_loop;
4466}
4467
4468/* Examine the block associated with LP to determine if it's an empty
4469   handler for its EH region.  If so, attempt to redirect EH edges to
4470   an outer region.  Return true the CFG was updated in any way.  This
4471   is similar to jump forwarding, just across EH edges.  */
4472
4473static bool
4474cleanup_empty_eh (eh_landing_pad lp)
4475{
4476  basic_block bb = label_to_block (lp->post_landing_pad);
4477  gimple_stmt_iterator gsi;
4478  gimple *resx;
4479  eh_region new_region;
4480  edge_iterator ei;
4481  edge e, e_out;
4482  bool has_non_eh_pred;
4483  bool ret = false;
4484  int new_lp_nr;
4485
4486  /* There can be zero or one edges out of BB.  This is the quickest test.  */
4487  switch (EDGE_COUNT (bb->succs))
4488    {
4489    case 0:
4490      e_out = NULL;
4491      break;
4492    case 1:
4493      e_out = single_succ_edge (bb);
4494      break;
4495    default:
4496      return false;
4497    }
4498
4499  gsi = gsi_last_nondebug_bb (bb);
4500  resx = gsi_stmt (gsi);
4501  if (resx && is_gimple_resx (resx))
4502    {
4503      if (stmt_can_throw_external (resx))
4504	optimize_clobbers (bb);
4505      else if (sink_clobbers (bb))
4506	ret = true;
4507    }
4508
4509  gsi = gsi_after_labels (bb);
4510
4511  /* Make sure to skip debug statements.  */
4512  if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4513    gsi_next_nondebug (&gsi);
4514
4515  /* If the block is totally empty, look for more unsplitting cases.  */
4516  if (gsi_end_p (gsi))
4517    {
4518      /* For the degenerate case of an infinite loop bail out.
4519	 If bb has no successors and is totally empty, which can happen e.g.
4520	 because of incorrect noreturn attribute, bail out too.  */
4521      if (e_out == NULL
4522	  || infinite_empty_loop_p (e_out))
4523	return ret;
4524
4525      return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4526    }
4527
4528  /* The block should consist only of a single RESX statement, modulo a
4529     preceding call to __builtin_stack_restore if there is no outgoing
4530     edge, since the call can be eliminated in this case.  */
4531  resx = gsi_stmt (gsi);
4532  if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4533    {
4534      gsi_next_nondebug (&gsi);
4535      resx = gsi_stmt (gsi);
4536    }
4537  if (!is_gimple_resx (resx))
4538    return ret;
4539  gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4540
4541  /* Determine if there are non-EH edges, or resx edges into the handler.  */
4542  has_non_eh_pred = false;
4543  FOR_EACH_EDGE (e, ei, bb->preds)
4544    if (!(e->flags & EDGE_EH))
4545      has_non_eh_pred = true;
4546
4547  /* Find the handler that's outer of the empty handler by looking at
4548     where the RESX instruction was vectored.  */
4549  new_lp_nr = lookup_stmt_eh_lp (resx);
4550  new_region = get_eh_region_from_lp_number (new_lp_nr);
4551
4552  /* If there's no destination region within the current function,
4553     redirection is trivial via removing the throwing statements from
4554     the EH region, removing the EH edges, and allowing the block
4555     to go unreachable.  */
4556  if (new_region == NULL)
4557    {
4558      gcc_assert (e_out == NULL);
4559      for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4560	if (e->flags & EDGE_EH)
4561	  {
4562	    gimple *stmt = last_stmt (e->src);
4563	    remove_stmt_from_eh_lp (stmt);
4564	    remove_edge (e);
4565	  }
4566	else
4567	  ei_next (&ei);
4568      goto succeed;
4569    }
4570
4571  /* If the destination region is a MUST_NOT_THROW, allow the runtime
4572     to handle the abort and allow the blocks to go unreachable.  */
4573  if (new_region->type == ERT_MUST_NOT_THROW)
4574    {
4575      for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4576	if (e->flags & EDGE_EH)
4577	  {
4578	    gimple *stmt = last_stmt (e->src);
4579	    remove_stmt_from_eh_lp (stmt);
4580	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4581	    remove_edge (e);
4582	  }
4583	else
4584	  ei_next (&ei);
4585      goto succeed;
4586    }
4587
4588  /* Try to redirect the EH edges and merge the PHIs into the destination
4589     landing pad block.  If the merge succeeds, we'll already have redirected
4590     all the EH edges.  The handler itself will go unreachable if there were
4591     no normal edges.  */
4592  if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4593    goto succeed;
4594
4595  /* Finally, if all input edges are EH edges, then we can (potentially)
4596     reduce the number of transfers from the runtime by moving the landing
4597     pad from the original region to the new region.  This is a win when
4598     we remove the last CLEANUP region along a particular exception
4599     propagation path.  Since nothing changes except for the region with
4600     which the landing pad is associated, the PHI nodes do not need to be
4601     adjusted at all.  */
4602  if (!has_non_eh_pred)
4603    {
4604      cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4605      if (dump_file && (dump_flags & TDF_DETAILS))
4606	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4607		 lp->index, new_region->index);
4608
4609      /* ??? The CFG didn't change, but we may have rendered the
4610	 old EH region unreachable.  Trigger a cleanup there.  */
4611      return true;
4612    }
4613
4614  return ret;
4615
4616 succeed:
4617  if (dump_file && (dump_flags & TDF_DETAILS))
4618    fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4619  remove_eh_landing_pad (lp);
4620  return true;
4621}
4622
4623/* Do a post-order traversal of the EH region tree.  Examine each
4624   post_landing_pad block and see if we can eliminate it as empty.  */
4625
4626static bool
4627cleanup_all_empty_eh (void)
4628{
4629  bool changed = false;
4630  eh_landing_pad lp;
4631  int i;
4632
4633  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4634    if (lp)
4635      changed |= cleanup_empty_eh (lp);
4636
4637  return changed;
4638}
4639
4640/* Perform cleanups and lowering of exception handling
4641    1) cleanups regions with handlers doing nothing are optimized out
4642    2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4643    3) Info about regions that are containing instructions, and regions
4644       reachable via local EH edges is collected
4645    4) Eh tree is pruned for regions no longer necessary.
4646
4647   TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4648	 Unify those that have the same failure decl and locus.
4649*/
4650
4651static unsigned int
4652execute_cleanup_eh_1 (void)
4653{
4654  /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4655     looking up unreachable landing pads.  */
4656  remove_unreachable_handlers ();
4657
4658  /* Watch out for the region tree vanishing due to all unreachable.  */
4659  if (cfun->eh->region_tree)
4660    {
4661      bool changed = false;
4662
4663      if (optimize)
4664	changed |= unsplit_all_eh ();
4665      changed |= cleanup_all_empty_eh ();
4666
4667      if (changed)
4668	{
4669	  free_dominance_info (CDI_DOMINATORS);
4670	  free_dominance_info (CDI_POST_DOMINATORS);
4671
4672          /* We delayed all basic block deletion, as we may have performed
4673	     cleanups on EH edges while non-EH edges were still present.  */
4674	  delete_unreachable_blocks ();
4675
4676	  /* We manipulated the landing pads.  Remove any region that no
4677	     longer has a landing pad.  */
4678	  remove_unreachable_handlers_no_lp ();
4679
4680	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4681	}
4682    }
4683
4684  return 0;
4685}
4686
4687namespace {
4688
4689const pass_data pass_data_cleanup_eh =
4690{
4691  GIMPLE_PASS, /* type */
4692  "ehcleanup", /* name */
4693  OPTGROUP_NONE, /* optinfo_flags */
4694  TV_TREE_EH, /* tv_id */
4695  PROP_gimple_lcf, /* properties_required */
4696  0, /* properties_provided */
4697  0, /* properties_destroyed */
4698  0, /* todo_flags_start */
4699  0, /* todo_flags_finish */
4700};
4701
4702class pass_cleanup_eh : public gimple_opt_pass
4703{
4704public:
4705  pass_cleanup_eh (gcc::context *ctxt)
4706    : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4707  {}
4708
4709  /* opt_pass methods: */
4710  opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4711  virtual bool gate (function *fun)
4712    {
4713      return fun->eh != NULL && fun->eh->region_tree != NULL;
4714    }
4715
4716  virtual unsigned int execute (function *);
4717
4718}; // class pass_cleanup_eh
4719
4720unsigned int
4721pass_cleanup_eh::execute (function *fun)
4722{
4723  int ret = execute_cleanup_eh_1 ();
4724
4725  /* If the function no longer needs an EH personality routine
4726     clear it.  This exposes cross-language inlining opportunities
4727     and avoids references to a never defined personality routine.  */
4728  if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4729      && function_needs_eh_personality (fun) != eh_personality_lang)
4730    DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4731
4732  return ret;
4733}
4734
4735} // anon namespace
4736
4737gimple_opt_pass *
4738make_pass_cleanup_eh (gcc::context *ctxt)
4739{
4740  return new pass_cleanup_eh (ctxt);
4741}
4742
4743/* Verify that BB containing STMT as the last statement, has precisely the
4744   edge that make_eh_edges would create.  */
4745
4746DEBUG_FUNCTION bool
4747verify_eh_edges (gimple *stmt)
4748{
4749  basic_block bb = gimple_bb (stmt);
4750  eh_landing_pad lp = NULL;
4751  int lp_nr;
4752  edge_iterator ei;
4753  edge e, eh_edge;
4754
4755  lp_nr = lookup_stmt_eh_lp (stmt);
4756  if (lp_nr > 0)
4757    lp = get_eh_landing_pad_from_number (lp_nr);
4758
4759  eh_edge = NULL;
4760  FOR_EACH_EDGE (e, ei, bb->succs)
4761    {
4762      if (e->flags & EDGE_EH)
4763	{
4764	  if (eh_edge)
4765	    {
4766	      error ("BB %i has multiple EH edges", bb->index);
4767	      return true;
4768	    }
4769	  else
4770	    eh_edge = e;
4771	}
4772    }
4773
4774  if (lp == NULL)
4775    {
4776      if (eh_edge)
4777	{
4778	  error ("BB %i can not throw but has an EH edge", bb->index);
4779	  return true;
4780	}
4781      return false;
4782    }
4783
4784  if (!stmt_could_throw_p (stmt))
4785    {
4786      error ("BB %i last statement has incorrectly set lp", bb->index);
4787      return true;
4788    }
4789
4790  if (eh_edge == NULL)
4791    {
4792      error ("BB %i is missing an EH edge", bb->index);
4793      return true;
4794    }
4795
4796  if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4797    {
4798      error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4799      return true;
4800    }
4801
4802  return false;
4803}
4804
4805/* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4806
4807DEBUG_FUNCTION bool
4808verify_eh_dispatch_edge (geh_dispatch *stmt)
4809{
4810  eh_region r;
4811  eh_catch c;
4812  basic_block src, dst;
4813  bool want_fallthru = true;
4814  edge_iterator ei;
4815  edge e, fall_edge;
4816
4817  r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4818  src = gimple_bb (stmt);
4819
4820  FOR_EACH_EDGE (e, ei, src->succs)
4821    gcc_assert (e->aux == NULL);
4822
4823  switch (r->type)
4824    {
4825    case ERT_TRY:
4826      for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4827	{
4828	  dst = label_to_block (c->label);
4829	  e = find_edge (src, dst);
4830	  if (e == NULL)
4831	    {
4832	      error ("BB %i is missing an edge", src->index);
4833	      return true;
4834	    }
4835	  e->aux = (void *)e;
4836
4837	  /* A catch-all handler doesn't have a fallthru.  */
4838	  if (c->type_list == NULL)
4839	    {
4840	      want_fallthru = false;
4841	      break;
4842	    }
4843	}
4844      break;
4845
4846    case ERT_ALLOWED_EXCEPTIONS:
4847      dst = label_to_block (r->u.allowed.label);
4848      e = find_edge (src, dst);
4849      if (e == NULL)
4850	{
4851	  error ("BB %i is missing an edge", src->index);
4852	  return true;
4853	}
4854      e->aux = (void *)e;
4855      break;
4856
4857    default:
4858      gcc_unreachable ();
4859    }
4860
4861  fall_edge = NULL;
4862  FOR_EACH_EDGE (e, ei, src->succs)
4863    {
4864      if (e->flags & EDGE_FALLTHRU)
4865	{
4866	  if (fall_edge != NULL)
4867	    {
4868	      error ("BB %i too many fallthru edges", src->index);
4869	      return true;
4870	    }
4871	  fall_edge = e;
4872	}
4873      else if (e->aux)
4874	e->aux = NULL;
4875      else
4876	{
4877	  error ("BB %i has incorrect edge", src->index);
4878	  return true;
4879	}
4880    }
4881  if ((fall_edge != NULL) ^ want_fallthru)
4882    {
4883      error ("BB %i has incorrect fallthru edge", src->index);
4884      return true;
4885    }
4886
4887  return false;
4888}
4889