1/* Utilities for ipa analysis.
2   Copyright (C) 2005-2022 Free Software Foundation, Inc.
3   Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.  If not see
19<http://www.gnu.org/licenses/>.  */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "tree.h"
26#include "gimple.h"
27#include "predict.h"
28#include "alloc-pool.h"
29#include "cgraph.h"
30#include "lto-streamer.h"
31#include "dumpfile.h"
32#include "splay-tree.h"
33#include "ipa-utils.h"
34#include "symbol-summary.h"
35#include "tree-vrp.h"
36#include "ipa-prop.h"
37#include "ipa-fnsummary.h"
38
39/* Debugging function for postorder and inorder code. NOTE is a string
40   that is printed before the nodes are printed.  ORDER is an array of
41   cgraph_nodes that has COUNT useful nodes in it.  */
42
43void
44ipa_print_order (FILE* out,
45		 const char * note,
46		 struct cgraph_node** order,
47		 int count)
48{
49  int i;
50  fprintf (out, "\n\n ordered call graph: %s\n", note);
51
52  for (i = count - 1; i >= 0; i--)
53    order[i]->dump (out);
54  fprintf (out, "\n");
55  fflush (out);
56}
57
58
59struct searchc_env {
60  struct cgraph_node **stack;
61  struct cgraph_node **result;
62  int stack_size;
63  int order_pos;
64  splay_tree nodes_marked_new;
65  bool reduce;
66  int count;
67};
68
69/* This is an implementation of Tarjan's strongly connected region
70   finder as reprinted in Aho Hopcraft and Ullman's The Design and
71   Analysis of Computer Programs (1975) pages 192-193.  This version
72   has been customized for cgraph_nodes.  The env parameter is because
73   it is recursive and there are no nested functions here.  This
74   function should only be called from itself or
75   ipa_reduced_postorder.  ENV is a stack env and would be
76   unnecessary if C had nested functions.  V is the node to start
77   searching from.  */
78
79static void
80searchc (struct searchc_env* env, struct cgraph_node *v,
81	 bool (*ignore_edge) (struct cgraph_edge *))
82{
83  struct cgraph_edge *edge;
84  struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
85
86  /* mark node as old */
87  v_info->new_node = false;
88  splay_tree_remove (env->nodes_marked_new, v->get_uid ());
89
90  v_info->dfn_number = env->count;
91  v_info->low_link = env->count;
92  env->count++;
93  env->stack[(env->stack_size)++] = v;
94  v_info->on_stack = true;
95
96  for (edge = v->callees; edge; edge = edge->next_callee)
97    {
98      struct ipa_dfs_info * w_info;
99      enum availability avail;
100      struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
101
102      if (!w || (ignore_edge && ignore_edge (edge)))
103        continue;
104
105      if (w->aux
106	  && (avail >= AVAIL_INTERPOSABLE))
107	{
108	  w_info = (struct ipa_dfs_info *) w->aux;
109	  if (w_info->new_node)
110	    {
111	      searchc (env, w, ignore_edge);
112	      v_info->low_link =
113		(v_info->low_link < w_info->low_link) ?
114		v_info->low_link : w_info->low_link;
115	    }
116	  else
117	    if ((w_info->dfn_number < v_info->dfn_number)
118		&& (w_info->on_stack))
119	      v_info->low_link =
120		(w_info->dfn_number < v_info->low_link) ?
121		w_info->dfn_number : v_info->low_link;
122	}
123    }
124
125
126  if (v_info->low_link == v_info->dfn_number)
127    {
128      struct cgraph_node *last = NULL;
129      struct cgraph_node *x;
130      struct ipa_dfs_info *x_info;
131      do {
132	x = env->stack[--(env->stack_size)];
133	x_info = (struct ipa_dfs_info *) x->aux;
134	x_info->on_stack = false;
135	x_info->scc_no = v_info->dfn_number;
136
137	if (env->reduce)
138	  {
139	    x_info->next_cycle = last;
140	    last = x;
141	  }
142	else
143	  env->result[env->order_pos++] = x;
144      }
145      while (v != x);
146      if (env->reduce)
147	env->result[env->order_pos++] = v;
148    }
149}
150
151/* Topsort the call graph by caller relation.  Put the result in ORDER.
152
153   The REDUCE flag is true if you want the cycles reduced to single nodes.
154   You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
155   call graph nodes in a reduced node.
156
157   Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
158   IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
159   for the topological sort.   */
160
161int
162ipa_reduced_postorder (struct cgraph_node **order,
163		       bool reduce,
164		       bool (*ignore_edge) (struct cgraph_edge *))
165{
166  struct cgraph_node *node;
167  struct searchc_env env;
168  splay_tree_node result;
169  env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
170  env.stack_size = 0;
171  env.result = order;
172  env.order_pos = 0;
173  env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
174  env.count = 1;
175  env.reduce = reduce;
176
177  FOR_EACH_DEFINED_FUNCTION (node)
178    {
179      enum availability avail = node->get_availability ();
180
181      if (avail > AVAIL_INTERPOSABLE
182	  || avail == AVAIL_INTERPOSABLE)
183	{
184	  /* Reuse the info if it is already there.  */
185	  struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
186	  if (!info)
187	    info = XCNEW (struct ipa_dfs_info);
188	  info->new_node = true;
189	  info->on_stack = false;
190	  info->next_cycle = NULL;
191	  node->aux = info;
192
193	  splay_tree_insert (env.nodes_marked_new,
194			     (splay_tree_key)node->get_uid (),
195			     (splay_tree_value)node);
196	}
197      else
198	node->aux = NULL;
199    }
200  result = splay_tree_min (env.nodes_marked_new);
201  while (result)
202    {
203      node = (struct cgraph_node *)result->value;
204      searchc (&env, node, ignore_edge);
205      result = splay_tree_min (env.nodes_marked_new);
206    }
207  splay_tree_delete (env.nodes_marked_new);
208  free (env.stack);
209
210  return env.order_pos;
211}
212
213/* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
214   graph nodes.  */
215
216void
217ipa_free_postorder_info (void)
218{
219  struct cgraph_node *node;
220  FOR_EACH_DEFINED_FUNCTION (node)
221    {
222      /* Get rid of the aux information.  */
223      if (node->aux)
224	{
225	  free (node->aux);
226	  node->aux = NULL;
227	}
228    }
229}
230
231/* Get the set of nodes for the cycle in the reduced call graph starting
232   from NODE.  */
233
234vec<cgraph_node *>
235ipa_get_nodes_in_cycle (struct cgraph_node *node)
236{
237  vec<cgraph_node *> v = vNULL;
238  struct ipa_dfs_info *node_dfs_info;
239  while (node)
240    {
241      v.safe_push (node);
242      node_dfs_info = (struct ipa_dfs_info *) node->aux;
243      node = node_dfs_info->next_cycle;
244    }
245  return v;
246}
247
248/* Return true iff the CS is an edge within a strongly connected component as
249   computed by ipa_reduced_postorder.  */
250
251bool
252ipa_edge_within_scc (struct cgraph_edge *cs)
253{
254  struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
255  struct ipa_dfs_info *callee_dfs;
256  struct cgraph_node *callee = cs->callee->function_symbol ();
257
258  callee_dfs = (struct ipa_dfs_info *) callee->aux;
259  return (caller_dfs
260	  && callee_dfs
261	  && caller_dfs->scc_no == callee_dfs->scc_no);
262}
263
264struct postorder_stack
265{
266  struct cgraph_node *node;
267  struct cgraph_edge *edge;
268  int ref;
269};
270
271/* Fill array order with all nodes with output flag set in the reverse
272   topological order.  Return the number of elements in the array.
273   FIXME: While walking, consider aliases, too.  */
274
275int
276ipa_reverse_postorder (struct cgraph_node **order)
277{
278  struct cgraph_node *node, *node2;
279  int stack_size = 0;
280  int order_pos = 0;
281  struct cgraph_edge *edge;
282  int pass;
283  struct ipa_ref *ref = NULL;
284
285  struct postorder_stack *stack =
286    XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
287
288  /* We have to deal with cycles nicely, so use a depth first traversal
289     output algorithm.  Ignore the fact that some functions won't need
290     to be output and put them into order as well, so we get dependencies
291     right through inline functions.  */
292  FOR_EACH_FUNCTION (node)
293    node->aux = NULL;
294  for (pass = 0; pass < 2; pass++)
295    FOR_EACH_FUNCTION (node)
296      if (!node->aux
297	  && (pass
298	      || (!node->address_taken
299		  && !node->inlined_to
300		  && !node->alias && !node->thunk
301		  && !node->only_called_directly_p ())))
302	{
303	  stack_size = 0;
304          stack[stack_size].node = node;
305	  stack[stack_size].edge = node->callers;
306	  stack[stack_size].ref = 0;
307	  node->aux = (void *)(size_t)1;
308	  while (stack_size >= 0)
309	    {
310	      while (true)
311		{
312		  node2 = NULL;
313		  while (stack[stack_size].edge && !node2)
314		    {
315		      edge = stack[stack_size].edge;
316		      node2 = edge->caller;
317		      stack[stack_size].edge = edge->next_caller;
318		      /* Break possible cycles involving always-inline
319			 functions by ignoring edges from always-inline
320			 functions to non-always-inline functions.  */
321		      if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
322			  && !DECL_DISREGARD_INLINE_LIMITS
323			    (edge->callee->function_symbol ()->decl))
324			node2 = NULL;
325		    }
326		  for (; stack[stack_size].node->iterate_referring (
327						       stack[stack_size].ref,
328						       ref) && !node2;
329		       stack[stack_size].ref++)
330		    {
331		      if (ref->use == IPA_REF_ALIAS)
332			node2 = dyn_cast <cgraph_node *> (ref->referring);
333		    }
334		  if (!node2)
335		    break;
336		  if (!node2->aux)
337		    {
338		      stack[++stack_size].node = node2;
339		      stack[stack_size].edge = node2->callers;
340		      stack[stack_size].ref = 0;
341		      node2->aux = (void *)(size_t)1;
342		    }
343		}
344	      order[order_pos++] = stack[stack_size--].node;
345	    }
346	}
347  free (stack);
348  FOR_EACH_FUNCTION (node)
349    node->aux = NULL;
350  return order_pos;
351}
352
353
354
355/* Given a memory reference T, will return the variable at the bottom
356   of the access.  Unlike get_base_address, this will recurse through
357   INDIRECT_REFS.  */
358
359tree
360get_base_var (tree t)
361{
362  while (!SSA_VAR_P (t)
363	 && (!CONSTANT_CLASS_P (t))
364	 && TREE_CODE (t) != LABEL_DECL
365	 && TREE_CODE (t) != FUNCTION_DECL
366	 && TREE_CODE (t) != CONST_DECL
367	 && TREE_CODE (t) != CONSTRUCTOR)
368    {
369      t = TREE_OPERAND (t, 0);
370    }
371  return t;
372}
373
374/* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count.  */
375
376void
377scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
378{
379  profile_count to = node->count;
380  profile_count::adjust_for_ipa_scaling (&to, &orig_count);
381  struct cgraph_edge *e;
382
383  for (e = node->callees; e; e = e->next_callee)
384    e->count = e->count.apply_scale (to, orig_count);
385  for (e = node->indirect_calls; e; e = e->next_callee)
386    e->count = e->count.apply_scale (to, orig_count);
387}
388
389/* SRC and DST are going to be merged.  Take SRC's profile and merge it into
390   DST so it is not going to be lost.  Possibly destroy SRC's body on the way
391   unless PRESERVE_BODY is set.  */
392
393void
394ipa_merge_profiles (struct cgraph_node *dst,
395		    struct cgraph_node *src,
396		    bool preserve_body)
397{
398  tree oldsrcdecl = src->decl;
399  struct function *srccfun, *dstcfun;
400  bool match = true;
401  bool copy_counts = false;
402
403  if (!src->definition
404      || !dst->definition)
405    return;
406
407  if (src->frequency < dst->frequency)
408    src->frequency = dst->frequency;
409
410  /* Time profiles are merged.  */
411  if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
412    dst->tp_first_run = src->tp_first_run;
413
414  if (src->profile_id && !dst->profile_id)
415    dst->profile_id = src->profile_id;
416
417  /* Merging zero profile to dst is no-op.  */
418  if (src->count.ipa () == profile_count::zero ())
419    return;
420
421  /* FIXME when we merge in unknown profile, we ought to set counts as
422     unsafe.  */
423  if (!src->count.initialized_p ()
424      || !(src->count.ipa () == src->count))
425    return;
426  profile_count orig_count = dst->count;
427
428  /* Either sum the profiles if both are IPA and not global0, or
429     pick more informative one (that is nonzero IPA if other is
430     uninitialized, guessed or global0).   */
431
432  if ((dst->count.ipa ().nonzero_p ()
433       || src->count.ipa ().nonzero_p ())
434      && dst->count.ipa ().initialized_p ()
435      && src->count.ipa ().initialized_p ())
436    dst->count = dst->count.ipa () + src->count.ipa ();
437  else if (dst->count.ipa ().initialized_p ())
438    ;
439  else if (src->count.ipa ().initialized_p ())
440    {
441      copy_counts = true;
442      dst->count = src->count.ipa ();
443    }
444
445  /* If no updating needed return early.  */
446  if (dst->count == orig_count)
447    return;
448
449  if (symtab->dump_file)
450    {
451      fprintf (symtab->dump_file, "Merging profiles of %s count:",
452	       src->dump_name ());
453      src->count.dump (symtab->dump_file);
454      fprintf (symtab->dump_file, " to %s count:",
455	       dst->dump_name ());
456      orig_count.dump (symtab->dump_file);
457      fprintf (symtab->dump_file, " resulting count:");
458      dst->count.dump (symtab->dump_file);
459      fprintf (symtab->dump_file, "\n");
460    }
461
462  /* First handle functions with no gimple body.  */
463  if (dst->thunk || dst->alias
464      || src->thunk || src->alias)
465    {
466      scale_ipa_profile_for_fn (dst, orig_count);
467      return;
468    }
469
470  /* This is ugly.  We need to get both function bodies into memory.
471     If declaration is merged, we need to duplicate it to be able
472     to load body that is being replaced.  This makes symbol table
473     temporarily inconsistent.  */
474  if (src->decl == dst->decl)
475    {
476      struct lto_in_decl_state temp;
477      struct lto_in_decl_state *state;
478
479      /* We are going to move the decl, we want to remove its file decl data.
480	 and link these with the new decl. */
481      temp.fn_decl = src->decl;
482      lto_in_decl_state **slot
483	= src->lto_file_data->function_decl_states->find_slot (&temp,
484							       NO_INSERT);
485      state = *slot;
486      src->lto_file_data->function_decl_states->clear_slot (slot);
487      gcc_assert (state);
488
489      /* Duplicate the decl and be sure it does not link into body of DST.  */
490      src->decl = copy_node (src->decl);
491      DECL_STRUCT_FUNCTION (src->decl) = NULL;
492      DECL_ARGUMENTS (src->decl) = NULL;
493      DECL_INITIAL (src->decl) = NULL;
494      DECL_RESULT (src->decl) = NULL;
495
496      /* Associate the decl state with new declaration, so LTO streamer
497 	 can look it up.  */
498      state->fn_decl = src->decl;
499      slot
500	= src->lto_file_data->function_decl_states->find_slot (state, INSERT);
501      gcc_assert (!*slot);
502      *slot = state;
503    }
504  src->get_untransformed_body ();
505  dst->get_untransformed_body ();
506  srccfun = DECL_STRUCT_FUNCTION (src->decl);
507  dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
508  if (n_basic_blocks_for_fn (srccfun)
509      != n_basic_blocks_for_fn (dstcfun))
510    {
511      if (symtab->dump_file)
512	fprintf (symtab->dump_file,
513		 "Giving up; number of basic block mismatch.\n");
514      match = false;
515    }
516  else if (last_basic_block_for_fn (srccfun)
517	   != last_basic_block_for_fn (dstcfun))
518    {
519      if (symtab->dump_file)
520	fprintf (symtab->dump_file,
521		 "Giving up; last block mismatch.\n");
522      match = false;
523    }
524  else
525    {
526      basic_block srcbb, dstbb;
527      struct cgraph_edge *e, *e2;
528
529      for (e = dst->callees, e2 = src->callees; e && e2 && match;
530	   e2 = e2->next_callee, e = e->next_callee)
531	{
532	  if (gimple_bb (e->call_stmt)->index
533	      != gimple_bb (e2->call_stmt)->index)
534	    {
535	      if (symtab->dump_file)
536		fprintf (symtab->dump_file,
537			 "Giving up; call stmt mismatch.\n");
538	      match = false;
539	    }
540	}
541      if (e || e2)
542	{
543	  if (symtab->dump_file)
544	    fprintf (symtab->dump_file,
545		     "Giving up; number of calls differs.\n");
546	  match = false;
547	}
548      for (e = dst->indirect_calls, e2 = src->indirect_calls; e && e2 && match;
549	   e2 = e2->next_callee, e = e->next_callee)
550	{
551	  if (gimple_bb (e->call_stmt)->index
552	      != gimple_bb (e2->call_stmt)->index)
553	    {
554	      if (symtab->dump_file)
555		fprintf (symtab->dump_file,
556			 "Giving up; indirect call stmt mismatch.\n");
557	      match = false;
558	    }
559	}
560      if (e || e2)
561	{
562	  if (symtab->dump_file)
563	    fprintf (symtab->dump_file,
564		     "Giving up; number of indirect calls differs.\n");
565	  match=false;
566	}
567
568      if (match)
569	FOR_ALL_BB_FN (srcbb, srccfun)
570	  {
571	    unsigned int i;
572
573	    dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
574	    if (dstbb == NULL)
575	      {
576		if (symtab->dump_file)
577		  fprintf (symtab->dump_file,
578			   "No matching block for bb %i.\n",
579			   srcbb->index);
580		match = false;
581		break;
582	      }
583	    if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
584	      {
585		if (symtab->dump_file)
586		  fprintf (symtab->dump_file,
587			   "Edge count mismatch for bb %i.\n",
588			   srcbb->index);
589		match = false;
590		break;
591	      }
592	    for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
593	      {
594		edge srce = EDGE_SUCC (srcbb, i);
595		edge dste = EDGE_SUCC (dstbb, i);
596		if (srce->dest->index != dste->dest->index)
597		  {
598		    if (symtab->dump_file)
599		      fprintf (symtab->dump_file,
600			       "Succ edge mismatch for bb %i.\n",
601			       srce->dest->index);
602		    match = false;
603		    break;
604		  }
605	      }
606	  }
607    }
608  if (match)
609    {
610      struct cgraph_edge *e, *e2;
611      basic_block srcbb, dstbb;
612
613      /* Function and global profile may be out of sync.  First scale it same
614	 way as fixup_cfg would.  */
615      profile_count srcnum = src->count;
616      profile_count srcden = ENTRY_BLOCK_PTR_FOR_FN (srccfun)->count;
617      bool srcscale = srcnum.initialized_p () && !(srcnum == srcden);
618      profile_count dstnum = orig_count;
619      profile_count dstden = ENTRY_BLOCK_PTR_FOR_FN (dstcfun)->count;
620      bool dstscale = !copy_counts
621		      && dstnum.initialized_p () && !(dstnum == dstden);
622
623      /* TODO: merge also statement histograms.  */
624      FOR_ALL_BB_FN (srcbb, srccfun)
625	{
626	  unsigned int i;
627
628	  dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
629
630	  profile_count srccount = srcbb->count;
631	  if (srcscale)
632	    srccount = srccount.apply_scale (srcnum, srcden);
633	  if (dstscale)
634	    dstbb->count = dstbb->count.apply_scale (dstnum, dstden);
635
636	  if (copy_counts)
637	    {
638	      dstbb->count = srccount;
639	      for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
640		{
641		  edge srce = EDGE_SUCC (srcbb, i);
642		  edge dste = EDGE_SUCC (dstbb, i);
643		  if (srce->probability.initialized_p ())
644		    dste->probability = srce->probability;
645		}
646	    }
647	  else
648	    {
649	      for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
650		{
651		  edge srce = EDGE_SUCC (srcbb, i);
652		  edge dste = EDGE_SUCC (dstbb, i);
653		  dste->probability =
654		    dste->probability * dstbb->count.ipa ().probability_in
655						 (dstbb->count.ipa ()
656						  + srccount.ipa ())
657		    + srce->probability * srcbb->count.ipa ().probability_in
658						 (dstbb->count.ipa ()
659						  + srccount.ipa ());
660		}
661	      dstbb->count = dstbb->count.ipa () + srccount.ipa ();
662	    }
663	}
664      push_cfun (dstcfun);
665      update_max_bb_count ();
666      compute_function_frequency ();
667      pop_cfun ();
668      for (e = dst->callees; e; e = e->next_callee)
669	{
670	  if (e->speculative)
671	    continue;
672	  e->count = gimple_bb (e->call_stmt)->count;
673	}
674      for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
675	   e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
676	{
677	  if (!e->speculative && !e2->speculative)
678	    {
679	      /* FIXME: we need to also merge ipa-profile histograms
680		 because with LTO merging happens from lto-symtab before
681		 these are converted to indirect edges.  */
682	      e->count = gimple_bb (e->call_stmt)->count;
683	      continue;
684	    }
685
686	  /* When copying just remove all speuclations on dst and then copy
687	     one from src.  */
688	  if (copy_counts)
689	    {
690	      while (e->speculative)
691		cgraph_edge::resolve_speculation (e, NULL);
692	      e->count = gimple_bb (e->call_stmt)->count;
693	      if (e2->speculative)
694		{
695		  for (cgraph_edge *e3 = e2->first_speculative_call_target ();
696		       e3;
697		       e3 = e3->next_speculative_call_target ())
698		    {
699		      cgraph_edge *ns;
700		      ns = e->make_speculative
701			 (dyn_cast <cgraph_node *>
702			    (e3->speculative_call_target_ref ()->referred),
703			     e3->count, e3->speculative_id);
704		      /* Target may differ from ref (for example it may be
705			 redirected to local alias.  */
706		      ns->redirect_callee (e3->callee);
707		    }
708		}
709	      continue;
710	    }
711
712	  /* Iterate all speculations in SRC, see if corresponding ones exist
713	     int DST and if so, sum the counts.  Otherwise create new
714	     speculation.  */
715	  int max_spec = 0;
716	  for (cgraph_edge *e3 = e->first_speculative_call_target ();
717	       e3;
718	       e3 = e3->next_speculative_call_target ())
719	    if (e3->speculative_id > max_spec)
720	      max_spec = e3->speculative_id;
721	  for (cgraph_edge *e3 = e2->first_speculative_call_target ();
722	       e3;
723	       e3 = e3->next_speculative_call_target ())
724	    {
725	      cgraph_edge *te
726		 = e->speculative_call_for_target
727			 (dyn_cast <cgraph_node *>
728			    (e3->speculative_call_target_ref ()->referred));
729	      if (te)
730		te->count = te->count + e3->count;
731	      else
732		{
733		  e->count = e->count + e3->count;
734		  cgraph_edge *ns;
735		  ns = e->make_speculative
736			 (dyn_cast <cgraph_node *>
737			    (e3->speculative_call_target_ref ()
738			     ->referred),
739			  e3->count,
740			  e3->speculative_id + max_spec + 1);
741		  /* Target may differ from ref (for example it may be
742		     redirected to local alias.  */
743		  ns->redirect_callee (e3->callee);
744		}
745	    }
746	}
747      if (!preserve_body)
748        src->release_body ();
749      /* Update summary.  */
750      compute_fn_summary (dst, 0);
751    }
752  /* We can't update CFG profile, but we can scale IPA profile. CFG
753     will be scaled according to dst->count after IPA passes.  */
754  else
755    scale_ipa_profile_for_fn (dst, orig_count);
756  src->decl = oldsrcdecl;
757}
758
759/* Return true if call to DEST is known to be self-recusive
760   call withing FUNC.  */
761
762bool
763recursive_call_p (tree func, tree dest)
764{
765  struct cgraph_node *dest_node = cgraph_node::get_create (dest);
766  struct cgraph_node *cnode = cgraph_node::get_create (func);
767  ipa_ref *alias;
768  enum availability avail;
769
770  gcc_assert (!cnode->alias);
771  if (cnode != dest_node->ultimate_alias_target (&avail))
772    return false;
773  if (avail >= AVAIL_AVAILABLE)
774    return true;
775  if (!dest_node->semantically_equivalent_p (cnode))
776    return false;
777  /* If there is only one way to call the fuction or we know all of them
778     are semantically equivalent, we still can consider call recursive.  */
779  FOR_EACH_ALIAS (cnode, alias)
780    if (!dest_node->semantically_equivalent_p (alias->referring))
781      return false;
782  return true;
783}
784