1/* Branch trace support for GDB, the GNU debugger.
2
3   Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5   Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7   This file is part of GDB.
8
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 3 of the License, or
12   (at your option) any later version.
13
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18
19   You should have received a copy of the GNU General Public License
20   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21
22#include "defs.h"
23#include "btrace.h"
24#include "gdbthread.h"
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
32#include "xml-support.h"
33#include "regcache.h"
34#include "gdbsupport/rsp-low.h"
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
37#include "gdbarch.h"
38
39/* For maintenance commands.  */
40#include "record-btrace.h"
41
42#include <inttypes.h>
43#include <ctype.h>
44#include <algorithm>
45
46/* Command lists for btrace maintenance commands.  */
47static struct cmd_list_element *maint_btrace_cmdlist;
48static struct cmd_list_element *maint_btrace_set_cmdlist;
49static struct cmd_list_element *maint_btrace_show_cmdlist;
50static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
51static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
52
53/* Control whether to skip PAD packets when computing the packet history.  */
54static bool maint_btrace_pt_skip_pad = true;
55
56static void btrace_add_pc (struct thread_info *tp);
57
58/* Print a record debug message.  Use do ... while (0) to avoid ambiguities
59   when used in if statements.  */
60
61#define DEBUG(msg, args...)						\
62  do									\
63    {									\
64      if (record_debug != 0)						\
65	gdb_printf (gdb_stdlog,						\
66		    "[btrace] " msg "\n", ##args);			\
67    }									\
68  while (0)
69
70#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72/* Return the function name of a recorded function segment for printing.
73   This function never returns NULL.  */
74
75static const char *
76ftrace_print_function_name (const struct btrace_function *bfun)
77{
78  struct minimal_symbol *msym;
79  struct symbol *sym;
80
81  msym = bfun->msym;
82  sym = bfun->sym;
83
84  if (sym != NULL)
85    return sym->print_name ();
86
87  if (msym != NULL)
88    return msym->print_name ();
89
90  return "<unknown>";
91}
92
93/* Return the file name of a recorded function segment for printing.
94   This function never returns NULL.  */
95
96static const char *
97ftrace_print_filename (const struct btrace_function *bfun)
98{
99  struct symbol *sym;
100  const char *filename;
101
102  sym = bfun->sym;
103
104  if (sym != NULL)
105    filename = symtab_to_filename_for_display (sym->symtab ());
106  else
107    filename = "<unknown>";
108
109  return filename;
110}
111
112/* Return a string representation of the address of an instruction.
113   This function never returns NULL.  */
114
115static const char *
116ftrace_print_insn_addr (const struct btrace_insn *insn)
117{
118  if (insn == NULL)
119    return "<nil>";
120
121  return core_addr_to_string_nz (insn->pc);
122}
123
124/* Print an ftrace debug status message.  */
125
126static void
127ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128{
129  const char *fun, *file;
130  unsigned int ibegin, iend;
131  int level;
132
133  fun = ftrace_print_function_name (bfun);
134  file = ftrace_print_filename (bfun);
135  level = bfun->level;
136
137  ibegin = bfun->insn_offset;
138  iend = ibegin + bfun->insn.size ();
139
140  DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141		prefix, fun, file, level, ibegin, iend);
142}
143
144/* Return the number of instructions in a given function call segment.  */
145
146static unsigned int
147ftrace_call_num_insn (const struct btrace_function* bfun)
148{
149  if (bfun == NULL)
150    return 0;
151
152  /* A gap is always counted as one instruction.  */
153  if (bfun->errcode != 0)
154    return 1;
155
156  return bfun->insn.size ();
157}
158
159/* Return the function segment with the given NUMBER or NULL if no such segment
160   exists.  BTINFO is the branch trace information for the current thread.  */
161
162static struct btrace_function *
163ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
164			    unsigned int number)
165{
166  if (number == 0 || number > btinfo->functions.size ())
167    return NULL;
168
169  return &btinfo->functions[number - 1];
170}
171
172/* A const version of the function above.  */
173
174static const struct btrace_function *
175ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
176			    unsigned int number)
177{
178  if (number == 0 || number > btinfo->functions.size ())
179    return NULL;
180
181  return &btinfo->functions[number - 1];
182}
183
184/* Return non-zero if BFUN does not match MFUN and FUN,
185   return zero otherwise.  */
186
187static int
188ftrace_function_switched (const struct btrace_function *bfun,
189			  const struct minimal_symbol *mfun,
190			  const struct symbol *fun)
191{
192  struct minimal_symbol *msym;
193  struct symbol *sym;
194
195  msym = bfun->msym;
196  sym = bfun->sym;
197
198  /* If the minimal symbol changed, we certainly switched functions.  */
199  if (mfun != NULL && msym != NULL
200      && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
201    return 1;
202
203  /* If the symbol changed, we certainly switched functions.  */
204  if (fun != NULL && sym != NULL)
205    {
206      const char *bfname, *fname;
207
208      /* Check the function name.  */
209      if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
210	return 1;
211
212      /* Check the location of those functions, as well.  */
213      bfname = symtab_to_fullname (sym->symtab ());
214      fname = symtab_to_fullname (fun->symtab ());
215      if (filename_cmp (fname, bfname) != 0)
216	return 1;
217    }
218
219  /* If we lost symbol information, we switched functions.  */
220  if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
221    return 1;
222
223  /* If we gained symbol information, we switched functions.  */
224  if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
225    return 1;
226
227  return 0;
228}
229
230/* Allocate and initialize a new branch trace function segment at the end of
231   the trace.
232   BTINFO is the branch trace information for the current thread.
233   MFUN and FUN are the symbol information we have for this function.
234   This invalidates all struct btrace_function pointer currently held.  */
235
236static struct btrace_function *
237ftrace_new_function (struct btrace_thread_info *btinfo,
238		     struct minimal_symbol *mfun,
239		     struct symbol *fun)
240{
241  int level;
242  unsigned int number, insn_offset;
243
244  if (btinfo->functions.empty ())
245    {
246      /* Start counting NUMBER and INSN_OFFSET at one.  */
247      level = 0;
248      number = 1;
249      insn_offset = 1;
250    }
251  else
252    {
253      const struct btrace_function *prev = &btinfo->functions.back ();
254      level = prev->level;
255      number = prev->number + 1;
256      insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
257    }
258
259  btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
260  return &btinfo->functions.back ();
261}
262
263/* Update the UP field of a function segment.  */
264
265static void
266ftrace_update_caller (struct btrace_function *bfun,
267		      struct btrace_function *caller,
268		      btrace_function_flags flags)
269{
270  if (bfun->up != 0)
271    ftrace_debug (bfun, "updating caller");
272
273  bfun->up = caller->number;
274  bfun->flags = flags;
275
276  ftrace_debug (bfun, "set caller");
277  ftrace_debug (caller, "..to");
278}
279
280/* Fix up the caller for all segments of a function.  */
281
282static void
283ftrace_fixup_caller (struct btrace_thread_info *btinfo,
284		     struct btrace_function *bfun,
285		     struct btrace_function *caller,
286		     btrace_function_flags flags)
287{
288  unsigned int prev, next;
289
290  prev = bfun->prev;
291  next = bfun->next;
292  ftrace_update_caller (bfun, caller, flags);
293
294  /* Update all function segments belonging to the same function.  */
295  for (; prev != 0; prev = bfun->prev)
296    {
297      bfun = ftrace_find_call_by_number (btinfo, prev);
298      ftrace_update_caller (bfun, caller, flags);
299    }
300
301  for (; next != 0; next = bfun->next)
302    {
303      bfun = ftrace_find_call_by_number (btinfo, next);
304      ftrace_update_caller (bfun, caller, flags);
305    }
306}
307
308/* Add a new function segment for a call at the end of the trace.
309   BTINFO is the branch trace information for the current thread.
310   MFUN and FUN are the symbol information we have for this function.  */
311
312static struct btrace_function *
313ftrace_new_call (struct btrace_thread_info *btinfo,
314		 struct minimal_symbol *mfun,
315		 struct symbol *fun)
316{
317  const unsigned int length = btinfo->functions.size ();
318  struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
319
320  bfun->up = length;
321  bfun->level += 1;
322
323  ftrace_debug (bfun, "new call");
324
325  return bfun;
326}
327
328/* Add a new function segment for a tail call at the end of the trace.
329   BTINFO is the branch trace information for the current thread.
330   MFUN and FUN are the symbol information we have for this function.  */
331
332static struct btrace_function *
333ftrace_new_tailcall (struct btrace_thread_info *btinfo,
334		     struct minimal_symbol *mfun,
335		     struct symbol *fun)
336{
337  const unsigned int length = btinfo->functions.size ();
338  struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
339
340  bfun->up = length;
341  bfun->level += 1;
342  bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
343
344  ftrace_debug (bfun, "new tail call");
345
346  return bfun;
347}
348
349/* Return the caller of BFUN or NULL if there is none.  This function skips
350   tail calls in the call chain.  BTINFO is the branch trace information for
351   the current thread.  */
352static struct btrace_function *
353ftrace_get_caller (struct btrace_thread_info *btinfo,
354		   struct btrace_function *bfun)
355{
356  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
357    if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
358      return ftrace_find_call_by_number (btinfo, bfun->up);
359
360  return NULL;
361}
362
363/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364   symbol information.  BTINFO is the branch trace information for the current
365   thread.  */
366
367static struct btrace_function *
368ftrace_find_caller (struct btrace_thread_info *btinfo,
369		    struct btrace_function *bfun,
370		    struct minimal_symbol *mfun,
371		    struct symbol *fun)
372{
373  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
374    {
375      /* Skip functions with incompatible symbol information.  */
376      if (ftrace_function_switched (bfun, mfun, fun))
377	continue;
378
379      /* This is the function segment we're looking for.  */
380      break;
381    }
382
383  return bfun;
384}
385
386/* Find the innermost caller in the back trace of BFUN, skipping all
387   function segments that do not end with a call instruction (e.g.
388   tail calls ending with a jump).  BTINFO is the branch trace information for
389   the current thread.  */
390
391static struct btrace_function *
392ftrace_find_call (struct btrace_thread_info *btinfo,
393		  struct btrace_function *bfun)
394{
395  for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
396    {
397      /* Skip gaps.  */
398      if (bfun->errcode != 0)
399	continue;
400
401      btrace_insn &last = bfun->insn.back ();
402
403      if (last.iclass == BTRACE_INSN_CALL)
404	break;
405    }
406
407  return bfun;
408}
409
410/* Add a continuation segment for a function into which we return at the end of
411   the trace.
412   BTINFO is the branch trace information for the current thread.
413   MFUN and FUN are the symbol information we have for this function.  */
414
415static struct btrace_function *
416ftrace_new_return (struct btrace_thread_info *btinfo,
417		   struct minimal_symbol *mfun,
418		   struct symbol *fun)
419{
420  struct btrace_function *prev, *bfun, *caller;
421
422  bfun = ftrace_new_function (btinfo, mfun, fun);
423  prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
424
425  /* It is important to start at PREV's caller.  Otherwise, we might find
426     PREV itself, if PREV is a recursive function.  */
427  caller = ftrace_find_call_by_number (btinfo, prev->up);
428  caller = ftrace_find_caller (btinfo, caller, mfun, fun);
429  if (caller != NULL)
430    {
431      /* The caller of PREV is the preceding btrace function segment in this
432	 function instance.  */
433      gdb_assert (caller->next == 0);
434
435      caller->next = bfun->number;
436      bfun->prev = caller->number;
437
438      /* Maintain the function level.  */
439      bfun->level = caller->level;
440
441      /* Maintain the call stack.  */
442      bfun->up = caller->up;
443      bfun->flags = caller->flags;
444
445      ftrace_debug (bfun, "new return");
446    }
447  else
448    {
449      /* We did not find a caller.  This could mean that something went
450	 wrong or that the call is simply not included in the trace.  */
451
452      /* Let's search for some actual call.  */
453      caller = ftrace_find_call_by_number (btinfo, prev->up);
454      caller = ftrace_find_call (btinfo, caller);
455      if (caller == NULL)
456	{
457	  /* There is no call in PREV's back trace.  We assume that the
458	     branch trace did not include it.  */
459
460	  /* Let's find the topmost function and add a new caller for it.
461	     This should handle a series of initial tail calls.  */
462	  while (prev->up != 0)
463	    prev = ftrace_find_call_by_number (btinfo, prev->up);
464
465	  bfun->level = prev->level - 1;
466
467	  /* Fix up the call stack for PREV.  */
468	  ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
469
470	  ftrace_debug (bfun, "new return - no caller");
471	}
472      else
473	{
474	  /* There is a call in PREV's back trace to which we should have
475	     returned but didn't.  Let's start a new, separate back trace
476	     from PREV's level.  */
477	  bfun->level = prev->level - 1;
478
479	  /* We fix up the back trace for PREV but leave other function segments
480	     on the same level as they are.
481	     This should handle things like schedule () correctly where we're
482	     switching contexts.  */
483	  prev->up = bfun->number;
484	  prev->flags = BFUN_UP_LINKS_TO_RET;
485
486	  ftrace_debug (bfun, "new return - unknown caller");
487	}
488    }
489
490  return bfun;
491}
492
493/* Add a new function segment for a function switch at the end of the trace.
494   BTINFO is the branch trace information for the current thread.
495   MFUN and FUN are the symbol information we have for this function.  */
496
497static struct btrace_function *
498ftrace_new_switch (struct btrace_thread_info *btinfo,
499		   struct minimal_symbol *mfun,
500		   struct symbol *fun)
501{
502  struct btrace_function *prev, *bfun;
503
504  /* This is an unexplained function switch.  We can't really be sure about the
505     call stack, yet the best I can think of right now is to preserve it.  */
506  bfun = ftrace_new_function (btinfo, mfun, fun);
507  prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
508  bfun->up = prev->up;
509  bfun->flags = prev->flags;
510
511  ftrace_debug (bfun, "new switch");
512
513  return bfun;
514}
515
516/* Add a new function segment for a gap in the trace due to a decode error at
517   the end of the trace.
518   BTINFO is the branch trace information for the current thread.
519   ERRCODE is the format-specific error code.  */
520
521static struct btrace_function *
522ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
523		std::vector<unsigned int> &gaps)
524{
525  struct btrace_function *bfun;
526
527  if (btinfo->functions.empty ())
528    bfun = ftrace_new_function (btinfo, NULL, NULL);
529  else
530    {
531      /* We hijack the previous function segment if it was empty.  */
532      bfun = &btinfo->functions.back ();
533      if (bfun->errcode != 0 || !bfun->insn.empty ())
534	bfun = ftrace_new_function (btinfo, NULL, NULL);
535    }
536
537  bfun->errcode = errcode;
538  gaps.push_back (bfun->number);
539
540  ftrace_debug (bfun, "new gap");
541
542  return bfun;
543}
544
545/* Update the current function segment at the end of the trace in BTINFO with
546   respect to the instruction at PC.  This may create new function segments.
547   Return the chronologically latest function segment, never NULL.  */
548
549static struct btrace_function *
550ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
551{
552  struct bound_minimal_symbol bmfun;
553  struct minimal_symbol *mfun;
554  struct symbol *fun;
555  struct btrace_function *bfun;
556
557  /* Try to determine the function we're in.  We use both types of symbols
558     to avoid surprises when we sometimes get a full symbol and sometimes
559     only a minimal symbol.  */
560  fun = find_pc_function (pc);
561  bmfun = lookup_minimal_symbol_by_pc (pc);
562  mfun = bmfun.minsym;
563
564  if (fun == NULL && mfun == NULL)
565    DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
566
567  /* If we didn't have a function, we create one.  */
568  if (btinfo->functions.empty ())
569    return ftrace_new_function (btinfo, mfun, fun);
570
571  /* If we had a gap before, we create a function.  */
572  bfun = &btinfo->functions.back ();
573  if (bfun->errcode != 0)
574    return ftrace_new_function (btinfo, mfun, fun);
575
576  /* Check the last instruction, if we have one.
577     We do this check first, since it allows us to fill in the call stack
578     links in addition to the normal flow links.  */
579  btrace_insn *last = NULL;
580  if (!bfun->insn.empty ())
581    last = &bfun->insn.back ();
582
583  if (last != NULL)
584    {
585      switch (last->iclass)
586	{
587	case BTRACE_INSN_RETURN:
588	  {
589	    const char *fname;
590
591	    /* On some systems, _dl_runtime_resolve returns to the resolved
592	       function instead of jumping to it.  From our perspective,
593	       however, this is a tailcall.
594	       If we treated it as return, we wouldn't be able to find the
595	       resolved function in our stack back trace.  Hence, we would
596	       lose the current stack back trace and start anew with an empty
597	       back trace.  When the resolved function returns, we would then
598	       create a stack back trace with the same function names but
599	       different frame id's.  This will confuse stepping.  */
600	    fname = ftrace_print_function_name (bfun);
601	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
602	      return ftrace_new_tailcall (btinfo, mfun, fun);
603
604	    return ftrace_new_return (btinfo, mfun, fun);
605	  }
606
607	case BTRACE_INSN_CALL:
608	  /* Ignore calls to the next instruction.  They are used for PIC.  */
609	  if (last->pc + last->size == pc)
610	    break;
611
612	  return ftrace_new_call (btinfo, mfun, fun);
613
614	case BTRACE_INSN_JUMP:
615	  {
616	    CORE_ADDR start;
617
618	    start = get_pc_function_start (pc);
619
620	    /* A jump to the start of a function is (typically) a tail call.  */
621	    if (start == pc)
622	      return ftrace_new_tailcall (btinfo, mfun, fun);
623
624	    /* Some versions of _Unwind_RaiseException use an indirect
625	       jump to 'return' to the exception handler of the caller
626	       handling the exception instead of a return.  Let's restrict
627	       this heuristic to that and related functions.  */
628	    const char *fname = ftrace_print_function_name (bfun);
629	    if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
630	      {
631		struct btrace_function *caller
632		  = ftrace_find_call_by_number (btinfo, bfun->up);
633		caller = ftrace_find_caller (btinfo, caller, mfun, fun);
634		if (caller != NULL)
635		  return ftrace_new_return (btinfo, mfun, fun);
636	      }
637
638	    /* If we can't determine the function for PC, we treat a jump at
639	       the end of the block as tail call if we're switching functions
640	       and as an intra-function branch if we don't.  */
641	    if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
642	      return ftrace_new_tailcall (btinfo, mfun, fun);
643
644	    break;
645	  }
646	}
647    }
648
649  /* Check if we're switching functions for some other reason.  */
650  if (ftrace_function_switched (bfun, mfun, fun))
651    {
652      DEBUG_FTRACE ("switching from %s in %s at %s",
653		    ftrace_print_insn_addr (last),
654		    ftrace_print_function_name (bfun),
655		    ftrace_print_filename (bfun));
656
657      return ftrace_new_switch (btinfo, mfun, fun);
658    }
659
660  return bfun;
661}
662
663/* Add the instruction at PC to BFUN's instructions.  */
664
665static void
666ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
667{
668  bfun->insn.push_back (insn);
669
670  if (record_debug > 1)
671    ftrace_debug (bfun, "update insn");
672}
673
674/* Classify the instruction at PC.  */
675
676static enum btrace_insn_class
677ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
678{
679  enum btrace_insn_class iclass;
680
681  iclass = BTRACE_INSN_OTHER;
682  try
683    {
684      if (gdbarch_insn_is_call (gdbarch, pc))
685	iclass = BTRACE_INSN_CALL;
686      else if (gdbarch_insn_is_ret (gdbarch, pc))
687	iclass = BTRACE_INSN_RETURN;
688      else if (gdbarch_insn_is_jump (gdbarch, pc))
689	iclass = BTRACE_INSN_JUMP;
690    }
691  catch (const gdb_exception_error &error)
692    {
693    }
694
695  return iclass;
696}
697
698/* Try to match the back trace at LHS to the back trace at RHS.  Returns the
699   number of matching function segments or zero if the back traces do not
700   match.  BTINFO is the branch trace information for the current thread.  */
701
702static int
703ftrace_match_backtrace (struct btrace_thread_info *btinfo,
704			struct btrace_function *lhs,
705			struct btrace_function *rhs)
706{
707  int matches;
708
709  for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
710    {
711      if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
712	return 0;
713
714      lhs = ftrace_get_caller (btinfo, lhs);
715      rhs = ftrace_get_caller (btinfo, rhs);
716    }
717
718  return matches;
719}
720
721/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
722   BTINFO is the branch trace information for the current thread.  */
723
724static void
725ftrace_fixup_level (struct btrace_thread_info *btinfo,
726		    struct btrace_function *bfun, int adjustment)
727{
728  if (adjustment == 0)
729    return;
730
731  DEBUG_FTRACE ("fixup level (%+d)", adjustment);
732  ftrace_debug (bfun, "..bfun");
733
734  while (bfun != NULL)
735    {
736      bfun->level += adjustment;
737      bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
738    }
739}
740
741/* Recompute the global level offset.  Traverse the function trace and compute
742   the global level offset as the negative of the minimal function level.  */
743
744static void
745ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
746{
747  int level = INT_MAX;
748
749  if (btinfo == NULL)
750    return;
751
752  if (btinfo->functions.empty ())
753    return;
754
755  unsigned int length = btinfo->functions.size() - 1;
756  for (unsigned int i = 0; i < length; ++i)
757    level = std::min (level, btinfo->functions[i].level);
758
759  /* The last function segment contains the current instruction, which is not
760     really part of the trace.  If it contains just this one instruction, we
761     ignore the segment.  */
762  struct btrace_function *last = &btinfo->functions.back();
763  if (last->insn.size () != 1)
764    level = std::min (level, last->level);
765
766  DEBUG_FTRACE ("setting global level offset: %d", -level);
767  btinfo->level = -level;
768}
769
770/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
771   ftrace_connect_backtrace.  BTINFO is the branch trace information for the
772   current thread.  */
773
774static void
775ftrace_connect_bfun (struct btrace_thread_info *btinfo,
776		     struct btrace_function *prev,
777		     struct btrace_function *next)
778{
779  DEBUG_FTRACE ("connecting...");
780  ftrace_debug (prev, "..prev");
781  ftrace_debug (next, "..next");
782
783  /* The function segments are not yet connected.  */
784  gdb_assert (prev->next == 0);
785  gdb_assert (next->prev == 0);
786
787  prev->next = next->number;
788  next->prev = prev->number;
789
790  /* We may have moved NEXT to a different function level.  */
791  ftrace_fixup_level (btinfo, next, prev->level - next->level);
792
793  /* If we run out of back trace for one, let's use the other's.  */
794  if (prev->up == 0)
795    {
796      const btrace_function_flags flags = next->flags;
797
798      next = ftrace_find_call_by_number (btinfo, next->up);
799      if (next != NULL)
800	{
801	  DEBUG_FTRACE ("using next's callers");
802	  ftrace_fixup_caller (btinfo, prev, next, flags);
803	}
804    }
805  else if (next->up == 0)
806    {
807      const btrace_function_flags flags = prev->flags;
808
809      prev = ftrace_find_call_by_number (btinfo, prev->up);
810      if (prev != NULL)
811	{
812	  DEBUG_FTRACE ("using prev's callers");
813	  ftrace_fixup_caller (btinfo, next, prev, flags);
814	}
815    }
816  else
817    {
818      /* PREV may have a tailcall caller, NEXT can't.  If it does, fixup the up
819	 link to add the tail callers to NEXT's back trace.
820
821	 This removes NEXT->UP from NEXT's back trace.  It will be added back
822	 when connecting NEXT and PREV's callers - provided they exist.
823
824	 If PREV's back trace consists of a series of tail calls without an
825	 actual call, there will be no further connection and NEXT's caller will
826	 be removed for good.  To catch this case, we handle it here and connect
827	 the top of PREV's back trace to NEXT's caller.  */
828      if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
829	{
830	  struct btrace_function *caller;
831	  btrace_function_flags next_flags, prev_flags;
832
833	  /* We checked NEXT->UP above so CALLER can't be NULL.  */
834	  caller = ftrace_find_call_by_number (btinfo, next->up);
835	  next_flags = next->flags;
836	  prev_flags = prev->flags;
837
838	  DEBUG_FTRACE ("adding prev's tail calls to next");
839
840	  prev = ftrace_find_call_by_number (btinfo, prev->up);
841	  ftrace_fixup_caller (btinfo, next, prev, prev_flags);
842
843	  for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
844								  prev->up))
845	    {
846	      /* At the end of PREV's back trace, continue with CALLER.  */
847	      if (prev->up == 0)
848		{
849		  DEBUG_FTRACE ("fixing up link for tailcall chain");
850		  ftrace_debug (prev, "..top");
851		  ftrace_debug (caller, "..up");
852
853		  ftrace_fixup_caller (btinfo, prev, caller, next_flags);
854
855		  /* If we skipped any tail calls, this may move CALLER to a
856		     different function level.
857
858		     Note that changing CALLER's level is only OK because we
859		     know that this is the last iteration of the bottom-to-top
860		     walk in ftrace_connect_backtrace.
861
862		     Otherwise we will fix up CALLER's level when we connect it
863		     to PREV's caller in the next iteration.  */
864		  ftrace_fixup_level (btinfo, caller,
865				      prev->level - caller->level - 1);
866		  break;
867		}
868
869	      /* There's nothing to do if we find a real call.  */
870	      if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
871		{
872		  DEBUG_FTRACE ("will fix up link in next iteration");
873		  break;
874		}
875	    }
876	}
877    }
878}
879
880/* Connect function segments on the same level in the back trace at LHS and RHS.
881   The back traces at LHS and RHS are expected to match according to
882   ftrace_match_backtrace.  BTINFO is the branch trace information for the
883   current thread.  */
884
885static void
886ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
887			  struct btrace_function *lhs,
888			  struct btrace_function *rhs)
889{
890  while (lhs != NULL && rhs != NULL)
891    {
892      struct btrace_function *prev, *next;
893
894      gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
895
896      /* Connecting LHS and RHS may change the up link.  */
897      prev = lhs;
898      next = rhs;
899
900      lhs = ftrace_get_caller (btinfo, lhs);
901      rhs = ftrace_get_caller (btinfo, rhs);
902
903      ftrace_connect_bfun (btinfo, prev, next);
904    }
905}
906
907/* Bridge the gap between two function segments left and right of a gap if their
908   respective back traces match in at least MIN_MATCHES functions.  BTINFO is
909   the branch trace information for the current thread.
910
911   Returns non-zero if the gap could be bridged, zero otherwise.  */
912
913static int
914ftrace_bridge_gap (struct btrace_thread_info *btinfo,
915		   struct btrace_function *lhs, struct btrace_function *rhs,
916		   int min_matches)
917{
918  struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
919  int best_matches;
920
921  DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
922		rhs->insn_offset - 1, min_matches);
923
924  best_matches = 0;
925  best_l = NULL;
926  best_r = NULL;
927
928  /* We search the back traces of LHS and RHS for valid connections and connect
929     the two function segments that give the longest combined back trace.  */
930
931  for (cand_l = lhs; cand_l != NULL;
932       cand_l = ftrace_get_caller (btinfo, cand_l))
933    for (cand_r = rhs; cand_r != NULL;
934	 cand_r = ftrace_get_caller (btinfo, cand_r))
935      {
936	int matches;
937
938	matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
939	if (best_matches < matches)
940	  {
941	    best_matches = matches;
942	    best_l = cand_l;
943	    best_r = cand_r;
944	  }
945      }
946
947  /* We need at least MIN_MATCHES matches.  */
948  gdb_assert (min_matches > 0);
949  if (best_matches < min_matches)
950    return 0;
951
952  DEBUG_FTRACE ("..matches: %d", best_matches);
953
954  /* We will fix up the level of BEST_R and succeeding function segments such
955     that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
956
957     This will ignore the level of RHS and following if BEST_R != RHS.  I.e. if
958     BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
959
960     To catch this, we already fix up the level here where we can start at RHS
961     instead of at BEST_R.  We will ignore the level fixup when connecting
962     BEST_L to BEST_R as they will already be on the same level.  */
963  ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
964
965  ftrace_connect_backtrace (btinfo, best_l, best_r);
966
967  return best_matches;
968}
969
970/* Try to bridge gaps due to overflow or decode errors by connecting the
971   function segments that are separated by the gap.  */
972
973static void
974btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
975{
976  struct btrace_thread_info *btinfo = &tp->btrace;
977  std::vector<unsigned int> remaining;
978  int min_matches;
979
980  DEBUG ("bridge gaps");
981
982  /* We require a minimum amount of matches for bridging a gap.  The number of
983     required matches will be lowered with each iteration.
984
985     The more matches the higher our confidence that the bridging is correct.
986     For big gaps or small traces, however, it may not be feasible to require a
987     high number of matches.  */
988  for (min_matches = 5; min_matches > 0; --min_matches)
989    {
990      /* Let's try to bridge as many gaps as we can.  In some cases, we need to
991	 skip a gap and revisit it again after we closed later gaps.  */
992      while (!gaps.empty ())
993	{
994	  for (const unsigned int number : gaps)
995	    {
996	      struct btrace_function *gap, *lhs, *rhs;
997	      int bridged;
998
999	      gap = ftrace_find_call_by_number (btinfo, number);
1000
1001	      /* We may have a sequence of gaps if we run from one error into
1002		 the next as we try to re-sync onto the trace stream.  Ignore
1003		 all but the leftmost gap in such a sequence.
1004
1005		 Also ignore gaps at the beginning of the trace.  */
1006	      lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1007	      if (lhs == NULL || lhs->errcode != 0)
1008		continue;
1009
1010	      /* Skip gaps to the right.  */
1011	      rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1012	      while (rhs != NULL && rhs->errcode != 0)
1013		rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1014
1015	      /* Ignore gaps at the end of the trace.  */
1016	      if (rhs == NULL)
1017		continue;
1018
1019	      bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1020
1021	      /* Keep track of gaps we were not able to bridge and try again.
1022		 If we just pushed them to the end of GAPS we would risk an
1023		 infinite loop in case we simply cannot bridge a gap.  */
1024	      if (bridged == 0)
1025		remaining.push_back (number);
1026	    }
1027
1028	  /* Let's see if we made any progress.  */
1029	  if (remaining.size () == gaps.size ())
1030	    break;
1031
1032	  gaps.clear ();
1033	  gaps.swap (remaining);
1034	}
1035
1036      /* We get here if either GAPS is empty or if GAPS equals REMAINING.  */
1037      if (gaps.empty ())
1038	break;
1039
1040      remaining.clear ();
1041    }
1042
1043  /* We may omit this in some cases.  Not sure it is worth the extra
1044     complication, though.  */
1045  ftrace_compute_global_level_offset (btinfo);
1046}
1047
1048/* Compute the function branch trace from BTS trace.  */
1049
1050static void
1051btrace_compute_ftrace_bts (struct thread_info *tp,
1052			   const struct btrace_data_bts *btrace,
1053			   std::vector<unsigned int> &gaps)
1054{
1055 /* We may end up doing target calls that require the current thread to be TP,
1056    for example reading memory through gdb_insn_length.  Make sure TP is the
1057    current thread.  */
1058  scoped_restore_current_thread restore_thread;
1059  switch_to_thread (tp);
1060
1061  struct btrace_thread_info *btinfo;
1062  struct gdbarch *gdbarch;
1063  unsigned int blk;
1064  int level;
1065
1066  gdbarch = target_gdbarch ();
1067  btinfo = &tp->btrace;
1068  blk = btrace->blocks->size ();
1069
1070  if (btinfo->functions.empty ())
1071    level = INT_MAX;
1072  else
1073    level = -btinfo->level;
1074
1075  while (blk != 0)
1076    {
1077      CORE_ADDR pc;
1078
1079      blk -= 1;
1080
1081      const btrace_block &block = btrace->blocks->at (blk);
1082      pc = block.begin;
1083
1084      for (;;)
1085	{
1086	  struct btrace_function *bfun;
1087	  struct btrace_insn insn;
1088	  int size;
1089
1090	  /* We should hit the end of the block.  Warn if we went too far.  */
1091	  if (block.end < pc)
1092	    {
1093	      /* Indicate the gap in the trace.  */
1094	      bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1095
1096	      warning (_("Recorded trace may be corrupted at instruction "
1097			 "%u (pc = %s)."), bfun->insn_offset - 1,
1098		       core_addr_to_string_nz (pc));
1099
1100	      break;
1101	    }
1102
1103	  bfun = ftrace_update_function (btinfo, pc);
1104
1105	  /* Maintain the function level offset.
1106	     For all but the last block, we do it here.  */
1107	  if (blk != 0)
1108	    level = std::min (level, bfun->level);
1109
1110	  size = 0;
1111	  try
1112	    {
1113	      size = gdb_insn_length (gdbarch, pc);
1114	    }
1115	  catch (const gdb_exception_error &error)
1116	    {
1117	    }
1118
1119	  insn.pc = pc;
1120	  insn.size = size;
1121	  insn.iclass = ftrace_classify_insn (gdbarch, pc);
1122	  insn.flags = 0;
1123
1124	  ftrace_update_insns (bfun, insn);
1125
1126	  /* We're done once we pushed the instruction at the end.  */
1127	  if (block.end == pc)
1128	    break;
1129
1130	  /* We can't continue if we fail to compute the size.  */
1131	  if (size <= 0)
1132	    {
1133	      /* Indicate the gap in the trace.  We just added INSN so we're
1134		 not at the beginning.  */
1135	      bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1136
1137	      warning (_("Recorded trace may be incomplete at instruction %u "
1138			 "(pc = %s)."), bfun->insn_offset - 1,
1139		       core_addr_to_string_nz (pc));
1140
1141	      break;
1142	    }
1143
1144	  pc += size;
1145
1146	  /* Maintain the function level offset.
1147	     For the last block, we do it here to not consider the last
1148	     instruction.
1149	     Since the last instruction corresponds to the current instruction
1150	     and is not really part of the execution history, it shouldn't
1151	     affect the level.  */
1152	  if (blk == 0)
1153	    level = std::min (level, bfun->level);
1154	}
1155    }
1156
1157  /* LEVEL is the minimal function level of all btrace function segments.
1158     Define the global level offset to -LEVEL so all function levels are
1159     normalized to start at zero.  */
1160  btinfo->level = -level;
1161}
1162
1163#if defined (HAVE_LIBIPT)
1164
1165static enum btrace_insn_class
1166pt_reclassify_insn (enum pt_insn_class iclass)
1167{
1168  switch (iclass)
1169    {
1170    case ptic_call:
1171      return BTRACE_INSN_CALL;
1172
1173    case ptic_return:
1174      return BTRACE_INSN_RETURN;
1175
1176    case ptic_jump:
1177      return BTRACE_INSN_JUMP;
1178
1179    default:
1180      return BTRACE_INSN_OTHER;
1181    }
1182}
1183
1184/* Return the btrace instruction flags for INSN.  */
1185
1186static btrace_insn_flags
1187pt_btrace_insn_flags (const struct pt_insn &insn)
1188{
1189  btrace_insn_flags flags = 0;
1190
1191  if (insn.speculative)
1192    flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1193
1194  return flags;
1195}
1196
1197/* Return the btrace instruction for INSN.  */
1198
1199static btrace_insn
1200pt_btrace_insn (const struct pt_insn &insn)
1201{
1202  return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1203	  pt_reclassify_insn (insn.iclass),
1204	  pt_btrace_insn_flags (insn)};
1205}
1206
1207/* Handle instruction decode events (libipt-v2).  */
1208
1209static int
1210handle_pt_insn_events (struct btrace_thread_info *btinfo,
1211		       struct pt_insn_decoder *decoder,
1212		       std::vector<unsigned int> &gaps, int status)
1213{
1214#if defined (HAVE_PT_INSN_EVENT)
1215  while (status & pts_event_pending)
1216    {
1217      struct btrace_function *bfun;
1218      struct pt_event event;
1219      uint64_t offset;
1220
1221      status = pt_insn_event (decoder, &event, sizeof (event));
1222      if (status < 0)
1223	break;
1224
1225      switch (event.type)
1226	{
1227	default:
1228	  break;
1229
1230	case ptev_enabled:
1231	  if (event.status_update != 0)
1232	    break;
1233
1234	  if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1235	    {
1236	      bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1237
1238	      pt_insn_get_offset (decoder, &offset);
1239
1240	      warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1241			 PRIx64 ")."), bfun->insn_offset - 1, offset);
1242	    }
1243
1244	  break;
1245
1246	case ptev_overflow:
1247	  bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1248
1249	  pt_insn_get_offset (decoder, &offset);
1250
1251	  warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1252		   bfun->insn_offset - 1, offset);
1253
1254	  break;
1255	}
1256    }
1257#endif /* defined (HAVE_PT_INSN_EVENT) */
1258
1259  return status;
1260}
1261
1262/* Handle events indicated by flags in INSN (libipt-v1).  */
1263
1264static void
1265handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1266			    struct pt_insn_decoder *decoder,
1267			    const struct pt_insn &insn,
1268			    std::vector<unsigned int> &gaps)
1269{
1270#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1271  /* Tracing is disabled and re-enabled each time we enter the kernel.  Most
1272     times, we continue from the same instruction we stopped before.  This is
1273     indicated via the RESUMED instruction flag.  The ENABLED instruction flag
1274     means that we continued from some other instruction.  Indicate this as a
1275     trace gap except when tracing just started.  */
1276  if (insn.enabled && !btinfo->functions.empty ())
1277    {
1278      struct btrace_function *bfun;
1279      uint64_t offset;
1280
1281      bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1282
1283      pt_insn_get_offset (decoder, &offset);
1284
1285      warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1286		 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1287	       insn.ip);
1288    }
1289#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1290
1291#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1292  /* Indicate trace overflows.  */
1293  if (insn.resynced)
1294    {
1295      struct btrace_function *bfun;
1296      uint64_t offset;
1297
1298      bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1299
1300      pt_insn_get_offset (decoder, &offset);
1301
1302      warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1303		 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1304    }
1305#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1306}
1307
1308/* Add function branch trace to BTINFO using DECODER.  */
1309
1310static void
1311ftrace_add_pt (struct btrace_thread_info *btinfo,
1312	       struct pt_insn_decoder *decoder,
1313	       int *plevel,
1314	       std::vector<unsigned int> &gaps)
1315{
1316  struct btrace_function *bfun;
1317  uint64_t offset;
1318  int status;
1319
1320  for (;;)
1321    {
1322      struct pt_insn insn;
1323
1324      status = pt_insn_sync_forward (decoder);
1325      if (status < 0)
1326	{
1327	  if (status != -pte_eos)
1328	    warning (_("Failed to synchronize onto the Intel Processor "
1329		       "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1330	  break;
1331	}
1332
1333      for (;;)
1334	{
1335	  /* Handle events from the previous iteration or synchronization.  */
1336	  status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1337	  if (status < 0)
1338	    break;
1339
1340	  status = pt_insn_next (decoder, &insn, sizeof(insn));
1341	  if (status < 0)
1342	    break;
1343
1344	  /* Handle events indicated by flags in INSN.  */
1345	  handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1346
1347	  bfun = ftrace_update_function (btinfo, insn.ip);
1348
1349	  /* Maintain the function level offset.  */
1350	  *plevel = std::min (*plevel, bfun->level);
1351
1352	  ftrace_update_insns (bfun, pt_btrace_insn (insn));
1353	}
1354
1355      if (status == -pte_eos)
1356	break;
1357
1358      /* Indicate the gap in the trace.  */
1359      bfun = ftrace_new_gap (btinfo, status, gaps);
1360
1361      pt_insn_get_offset (decoder, &offset);
1362
1363      warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1364		 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1365	       offset, insn.ip, pt_errstr (pt_errcode (status)));
1366    }
1367}
1368
1369/* A callback function to allow the trace decoder to read the inferior's
1370   memory.  */
1371
1372static int
1373btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1374			    const struct pt_asid *asid, uint64_t pc,
1375			    void *context)
1376{
1377  int result, errcode;
1378
1379  result = (int) size;
1380  try
1381    {
1382      errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1383      if (errcode != 0)
1384	result = -pte_nomap;
1385    }
1386  catch (const gdb_exception_error &error)
1387    {
1388      result = -pte_nomap;
1389    }
1390
1391  return result;
1392}
1393
1394/* Translate the vendor from one enum to another.  */
1395
1396static enum pt_cpu_vendor
1397pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1398{
1399  switch (vendor)
1400    {
1401    default:
1402      return pcv_unknown;
1403
1404    case CV_INTEL:
1405      return pcv_intel;
1406    }
1407}
1408
1409/* Finalize the function branch trace after decode.  */
1410
1411static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1412				       struct thread_info *tp, int level)
1413{
1414  pt_insn_free_decoder (decoder);
1415
1416  /* LEVEL is the minimal function level of all btrace function segments.
1417     Define the global level offset to -LEVEL so all function levels are
1418     normalized to start at zero.  */
1419  tp->btrace.level = -level;
1420
1421  /* Add a single last instruction entry for the current PC.
1422     This allows us to compute the backtrace at the current PC using both
1423     standard unwind and btrace unwind.
1424     This extra entry is ignored by all record commands.  */
1425  btrace_add_pc (tp);
1426}
1427
1428/* Compute the function branch trace from Intel Processor Trace
1429   format.  */
1430
1431static void
1432btrace_compute_ftrace_pt (struct thread_info *tp,
1433			  const struct btrace_data_pt *btrace,
1434			  std::vector<unsigned int> &gaps)
1435{
1436 /* We may end up doing target calls that require the current thread to be TP,
1437    for example reading memory through btrace_pt_readmem_callback.  Make sure
1438    TP is the current thread.  */
1439  scoped_restore_current_thread restore_thread;
1440  switch_to_thread (tp);
1441
1442  struct btrace_thread_info *btinfo;
1443  struct pt_insn_decoder *decoder;
1444  struct pt_config config;
1445  int level, errcode;
1446
1447  if (btrace->size == 0)
1448    return;
1449
1450  btinfo = &tp->btrace;
1451  if (btinfo->functions.empty ())
1452    level = INT_MAX;
1453  else
1454    level = -btinfo->level;
1455
1456  pt_config_init(&config);
1457  config.begin = btrace->data;
1458  config.end = btrace->data + btrace->size;
1459
1460  /* We treat an unknown vendor as 'no errata'.  */
1461  if (btrace->config.cpu.vendor != CV_UNKNOWN)
1462    {
1463      config.cpu.vendor
1464	= pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1465      config.cpu.family = btrace->config.cpu.family;
1466      config.cpu.model = btrace->config.cpu.model;
1467      config.cpu.stepping = btrace->config.cpu.stepping;
1468
1469      errcode = pt_cpu_errata (&config.errata, &config.cpu);
1470      if (errcode < 0)
1471	error (_("Failed to configure the Intel Processor Trace "
1472		 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1473    }
1474
1475  decoder = pt_insn_alloc_decoder (&config);
1476  if (decoder == NULL)
1477    error (_("Failed to allocate the Intel Processor Trace decoder."));
1478
1479  try
1480    {
1481      struct pt_image *image;
1482
1483      image = pt_insn_get_image(decoder);
1484      if (image == NULL)
1485	error (_("Failed to configure the Intel Processor Trace decoder."));
1486
1487      errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1488      if (errcode < 0)
1489	error (_("Failed to configure the Intel Processor Trace decoder: "
1490		 "%s."), pt_errstr (pt_errcode (errcode)));
1491
1492      ftrace_add_pt (btinfo, decoder, &level, gaps);
1493    }
1494  catch (const gdb_exception &error)
1495    {
1496      /* Indicate a gap in the trace if we quit trace processing.  */
1497      if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1498	ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1499
1500      btrace_finalize_ftrace_pt (decoder, tp, level);
1501
1502      throw;
1503    }
1504
1505  btrace_finalize_ftrace_pt (decoder, tp, level);
1506}
1507
1508#else /* defined (HAVE_LIBIPT)  */
1509
1510static void
1511btrace_compute_ftrace_pt (struct thread_info *tp,
1512			  const struct btrace_data_pt *btrace,
1513			  std::vector<unsigned int> &gaps)
1514{
1515  internal_error (_("Unexpected branch trace format."));
1516}
1517
1518#endif /* defined (HAVE_LIBIPT)  */
1519
1520/* Compute the function branch trace from a block branch trace BTRACE for
1521   a thread given by BTINFO.  If CPU is not NULL, overwrite the cpu in the
1522   branch trace configuration.  This is currently only used for the PT
1523   format.  */
1524
1525static void
1526btrace_compute_ftrace_1 (struct thread_info *tp,
1527			 struct btrace_data *btrace,
1528			 const struct btrace_cpu *cpu,
1529			 std::vector<unsigned int> &gaps)
1530{
1531  DEBUG ("compute ftrace");
1532
1533  switch (btrace->format)
1534    {
1535    case BTRACE_FORMAT_NONE:
1536      return;
1537
1538    case BTRACE_FORMAT_BTS:
1539      btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1540      return;
1541
1542    case BTRACE_FORMAT_PT:
1543      /* Overwrite the cpu we use for enabling errata workarounds.  */
1544      if (cpu != nullptr)
1545	btrace->variant.pt.config.cpu = *cpu;
1546
1547      btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1548      return;
1549    }
1550
1551  internal_error (_("Unknown branch trace format."));
1552}
1553
1554static void
1555btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1556{
1557  if (!gaps.empty ())
1558    {
1559      tp->btrace.ngaps += gaps.size ();
1560      btrace_bridge_gaps (tp, gaps);
1561    }
1562}
1563
1564static void
1565btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1566		       const struct btrace_cpu *cpu)
1567{
1568  std::vector<unsigned int> gaps;
1569
1570  try
1571    {
1572      btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1573    }
1574  catch (const gdb_exception &error)
1575    {
1576      btrace_finalize_ftrace (tp, gaps);
1577
1578      throw;
1579    }
1580
1581  btrace_finalize_ftrace (tp, gaps);
1582}
1583
1584/* Add an entry for the current PC.  */
1585
1586static void
1587btrace_add_pc (struct thread_info *tp)
1588{
1589  struct btrace_data btrace;
1590  struct regcache *regcache;
1591  CORE_ADDR pc;
1592
1593  regcache = get_thread_regcache (tp);
1594  pc = regcache_read_pc (regcache);
1595
1596  btrace.format = BTRACE_FORMAT_BTS;
1597  btrace.variant.bts.blocks = new std::vector<btrace_block>;
1598
1599  btrace.variant.bts.blocks->emplace_back (pc, pc);
1600
1601  btrace_compute_ftrace (tp, &btrace, NULL);
1602}
1603
1604/* See btrace.h.  */
1605
1606void
1607btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1608{
1609  if (tp->btrace.target != NULL)
1610    error (_("Recording already enabled on thread %s (%s)."),
1611	   print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1612
1613#if !defined (HAVE_LIBIPT)
1614  if (conf->format == BTRACE_FORMAT_PT)
1615    error (_("Intel Processor Trace support was disabled at compile time."));
1616#endif /* !defined (HAVE_LIBIPT) */
1617
1618  DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1619	 tp->ptid.to_string ().c_str ());
1620
1621  tp->btrace.target = target_enable_btrace (tp, conf);
1622
1623  if (tp->btrace.target == NULL)
1624    error (_("Failed to enable recording on thread %s (%s)."),
1625	   print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1626
1627  /* We need to undo the enable in case of errors.  */
1628  try
1629    {
1630      /* Add an entry for the current PC so we start tracing from where we
1631	 enabled it.
1632
1633	 If we can't access TP's registers, TP is most likely running.  In this
1634	 case, we can't really say where tracing was enabled so it should be
1635	 safe to simply skip this step.
1636
1637	 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1638	 start at the PC at which tracing was enabled.  */
1639      if (conf->format != BTRACE_FORMAT_PT
1640	  && can_access_registers_thread (tp))
1641	btrace_add_pc (tp);
1642    }
1643  catch (const gdb_exception &exception)
1644    {
1645      btrace_disable (tp);
1646
1647      throw;
1648    }
1649}
1650
1651/* See btrace.h.  */
1652
1653const struct btrace_config *
1654btrace_conf (const struct btrace_thread_info *btinfo)
1655{
1656  if (btinfo->target == NULL)
1657    return NULL;
1658
1659  return target_btrace_conf (btinfo->target);
1660}
1661
1662/* See btrace.h.  */
1663
1664void
1665btrace_disable (struct thread_info *tp)
1666{
1667  struct btrace_thread_info *btp = &tp->btrace;
1668
1669  if (btp->target == NULL)
1670    error (_("Recording not enabled on thread %s (%s)."),
1671	   print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1672
1673  DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1674	 tp->ptid.to_string ().c_str ());
1675
1676  target_disable_btrace (btp->target);
1677  btp->target = NULL;
1678
1679  btrace_clear (tp);
1680}
1681
1682/* See btrace.h.  */
1683
1684void
1685btrace_teardown (struct thread_info *tp)
1686{
1687  struct btrace_thread_info *btp = &tp->btrace;
1688
1689  if (btp->target == NULL)
1690    return;
1691
1692  DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1693	 tp->ptid.to_string ().c_str ());
1694
1695  target_teardown_btrace (btp->target);
1696  btp->target = NULL;
1697
1698  btrace_clear (tp);
1699}
1700
1701/* Stitch branch trace in BTS format.  */
1702
1703static int
1704btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1705{
1706  struct btrace_thread_info *btinfo;
1707  struct btrace_function *last_bfun;
1708  btrace_block *first_new_block;
1709
1710  btinfo = &tp->btrace;
1711  gdb_assert (!btinfo->functions.empty ());
1712  gdb_assert (!btrace->blocks->empty ());
1713
1714  last_bfun = &btinfo->functions.back ();
1715
1716  /* If the existing trace ends with a gap, we just glue the traces
1717     together.  We need to drop the last (i.e. chronologically first) block
1718     of the new trace,  though, since we can't fill in the start address.*/
1719  if (last_bfun->insn.empty ())
1720    {
1721      btrace->blocks->pop_back ();
1722      return 0;
1723    }
1724
1725  /* Beware that block trace starts with the most recent block, so the
1726     chronologically first block in the new trace is the last block in
1727     the new trace's block vector.  */
1728  first_new_block = &btrace->blocks->back ();
1729  const btrace_insn &last_insn = last_bfun->insn.back ();
1730
1731  /* If the current PC at the end of the block is the same as in our current
1732     trace, there are two explanations:
1733       1. we executed the instruction and some branch brought us back.
1734       2. we have not made any progress.
1735     In the first case, the delta trace vector should contain at least two
1736     entries.
1737     In the second case, the delta trace vector should contain exactly one
1738     entry for the partial block containing the current PC.  Remove it.  */
1739  if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
1740    {
1741      btrace->blocks->pop_back ();
1742      return 0;
1743    }
1744
1745  DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
1746	 core_addr_to_string_nz (first_new_block->end));
1747
1748  /* Do a simple sanity check to make sure we don't accidentally end up
1749     with a bad block.  This should not occur in practice.  */
1750  if (first_new_block->end < last_insn.pc)
1751    {
1752      warning (_("Error while trying to read delta trace.  Falling back to "
1753		 "a full read."));
1754      return -1;
1755    }
1756
1757  /* We adjust the last block to start at the end of our current trace.  */
1758  gdb_assert (first_new_block->begin == 0);
1759  first_new_block->begin = last_insn.pc;
1760
1761  /* We simply pop the last insn so we can insert it again as part of
1762     the normal branch trace computation.
1763     Since instruction iterators are based on indices in the instructions
1764     vector, we don't leave any pointers dangling.  */
1765  DEBUG ("pruning insn at %s for stitching",
1766	 ftrace_print_insn_addr (&last_insn));
1767
1768  last_bfun->insn.pop_back ();
1769
1770  /* The instructions vector may become empty temporarily if this has
1771     been the only instruction in this function segment.
1772     This violates the invariant but will be remedied shortly by
1773     btrace_compute_ftrace when we add the new trace.  */
1774
1775  /* The only case where this would hurt is if the entire trace consisted
1776     of just that one instruction.  If we remove it, we might turn the now
1777     empty btrace function segment into a gap.  But we don't want gaps at
1778     the beginning.  To avoid this, we remove the entire old trace.  */
1779  if (last_bfun->number == 1 && last_bfun->insn.empty ())
1780    btrace_clear (tp);
1781
1782  return 0;
1783}
1784
1785/* Adjust the block trace in order to stitch old and new trace together.
1786   BTRACE is the new delta trace between the last and the current stop.
1787   TP is the traced thread.
1788   May modifx BTRACE as well as the existing trace in TP.
1789   Return 0 on success, -1 otherwise.  */
1790
1791static int
1792btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1793{
1794  /* If we don't have trace, there's nothing to do.  */
1795  if (btrace->empty ())
1796    return 0;
1797
1798  switch (btrace->format)
1799    {
1800    case BTRACE_FORMAT_NONE:
1801      return 0;
1802
1803    case BTRACE_FORMAT_BTS:
1804      return btrace_stitch_bts (&btrace->variant.bts, tp);
1805
1806    case BTRACE_FORMAT_PT:
1807      /* Delta reads are not supported.  */
1808      return -1;
1809    }
1810
1811  internal_error (_("Unknown branch trace format."));
1812}
1813
1814/* Clear the branch trace histories in BTINFO.  */
1815
1816static void
1817btrace_clear_history (struct btrace_thread_info *btinfo)
1818{
1819  xfree (btinfo->insn_history);
1820  xfree (btinfo->call_history);
1821  xfree (btinfo->replay);
1822
1823  btinfo->insn_history = NULL;
1824  btinfo->call_history = NULL;
1825  btinfo->replay = NULL;
1826}
1827
1828/* Clear the branch trace maintenance histories in BTINFO.  */
1829
1830static void
1831btrace_maint_clear (struct btrace_thread_info *btinfo)
1832{
1833  switch (btinfo->data.format)
1834    {
1835    default:
1836      break;
1837
1838    case BTRACE_FORMAT_BTS:
1839      btinfo->maint.variant.bts.packet_history.begin = 0;
1840      btinfo->maint.variant.bts.packet_history.end = 0;
1841      break;
1842
1843#if defined (HAVE_LIBIPT)
1844    case BTRACE_FORMAT_PT:
1845      delete btinfo->maint.variant.pt.packets;
1846
1847      btinfo->maint.variant.pt.packets = NULL;
1848      btinfo->maint.variant.pt.packet_history.begin = 0;
1849      btinfo->maint.variant.pt.packet_history.end = 0;
1850      break;
1851#endif /* defined (HAVE_LIBIPT)  */
1852    }
1853}
1854
1855/* See btrace.h.  */
1856
1857const char *
1858btrace_decode_error (enum btrace_format format, int errcode)
1859{
1860  switch (format)
1861    {
1862    case BTRACE_FORMAT_BTS:
1863      switch (errcode)
1864	{
1865	case BDE_BTS_OVERFLOW:
1866	  return _("instruction overflow");
1867
1868	case BDE_BTS_INSN_SIZE:
1869	  return _("unknown instruction");
1870
1871	default:
1872	  break;
1873	}
1874      break;
1875
1876#if defined (HAVE_LIBIPT)
1877    case BTRACE_FORMAT_PT:
1878      switch (errcode)
1879	{
1880	case BDE_PT_USER_QUIT:
1881	  return _("trace decode cancelled");
1882
1883	case BDE_PT_DISABLED:
1884	  return _("disabled");
1885
1886	case BDE_PT_OVERFLOW:
1887	  return _("overflow");
1888
1889	default:
1890	  if (errcode < 0)
1891	    return pt_errstr (pt_errcode (errcode));
1892	  break;
1893	}
1894      break;
1895#endif /* defined (HAVE_LIBIPT)  */
1896
1897    default:
1898      break;
1899    }
1900
1901  return _("unknown");
1902}
1903
1904/* See btrace.h.  */
1905
1906void
1907btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
1908{
1909  struct btrace_thread_info *btinfo;
1910  struct btrace_target_info *tinfo;
1911  struct btrace_data btrace;
1912  int errcode;
1913
1914  DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1915	 tp->ptid.to_string ().c_str ());
1916
1917  btinfo = &tp->btrace;
1918  tinfo = btinfo->target;
1919  if (tinfo == NULL)
1920    return;
1921
1922  /* There's no way we could get new trace while replaying.
1923     On the other hand, delta trace would return a partial record with the
1924     current PC, which is the replay PC, not the last PC, as expected.  */
1925  if (btinfo->replay != NULL)
1926    return;
1927
1928  /* With CLI usage, TP is always the current thread when we get here.
1929     However, since we can also store a gdb.Record object in Python
1930     referring to a different thread than the current one, we need to
1931     temporarily set the current thread.  */
1932  scoped_restore_current_thread restore_thread;
1933  switch_to_thread (tp);
1934
1935  /* We should not be called on running or exited threads.  */
1936  gdb_assert (can_access_registers_thread (tp));
1937
1938  /* Let's first try to extend the trace we already have.  */
1939  if (!btinfo->functions.empty ())
1940    {
1941      errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1942      if (errcode == 0)
1943	{
1944	  /* Success.  Let's try to stitch the traces together.  */
1945	  errcode = btrace_stitch_trace (&btrace, tp);
1946	}
1947      else
1948	{
1949	  /* We failed to read delta trace.  Let's try to read new trace.  */
1950	  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1951
1952	  /* If we got any new trace, discard what we have.  */
1953	  if (errcode == 0 && !btrace.empty ())
1954	    btrace_clear (tp);
1955	}
1956
1957      /* If we were not able to read the trace, we start over.  */
1958      if (errcode != 0)
1959	{
1960	  btrace_clear (tp);
1961	  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1962	}
1963    }
1964  else
1965    errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1966
1967  /* If we were not able to read the branch trace, signal an error.  */
1968  if (errcode != 0)
1969    error (_("Failed to read branch trace."));
1970
1971  /* Compute the trace, provided we have any.  */
1972  if (!btrace.empty ())
1973    {
1974      /* Store the raw trace data.  The stored data will be cleared in
1975	 btrace_clear, so we always append the new trace.  */
1976      btrace_data_append (&btinfo->data, &btrace);
1977      btrace_maint_clear (btinfo);
1978
1979      btrace_clear_history (btinfo);
1980      btrace_compute_ftrace (tp, &btrace, cpu);
1981    }
1982}
1983
1984/* See btrace.h.  */
1985
1986void
1987btrace_clear (struct thread_info *tp)
1988{
1989  struct btrace_thread_info *btinfo;
1990
1991  DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1992	 tp->ptid.to_string ().c_str ());
1993
1994  /* Make sure btrace frames that may hold a pointer into the branch
1995     trace data are destroyed.  */
1996  reinit_frame_cache ();
1997
1998  btinfo = &tp->btrace;
1999
2000  btinfo->functions.clear ();
2001  btinfo->ngaps = 0;
2002
2003  /* Must clear the maint data before - it depends on BTINFO->DATA.  */
2004  btrace_maint_clear (btinfo);
2005  btinfo->data.clear ();
2006  btrace_clear_history (btinfo);
2007}
2008
2009/* See btrace.h.  */
2010
2011void
2012btrace_free_objfile (struct objfile *objfile)
2013{
2014  DEBUG ("free objfile");
2015
2016  for (thread_info *tp : all_non_exited_threads ())
2017    btrace_clear (tp);
2018}
2019
2020#if defined (HAVE_LIBEXPAT)
2021
2022/* Check the btrace document version.  */
2023
2024static void
2025check_xml_btrace_version (struct gdb_xml_parser *parser,
2026			  const struct gdb_xml_element *element,
2027			  void *user_data,
2028			  std::vector<gdb_xml_value> &attributes)
2029{
2030  const char *version
2031    = (const char *) xml_find_attribute (attributes, "version")->value.get ();
2032
2033  if (strcmp (version, "1.0") != 0)
2034    gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
2035}
2036
2037/* Parse a btrace "block" xml record.  */
2038
2039static void
2040parse_xml_btrace_block (struct gdb_xml_parser *parser,
2041			const struct gdb_xml_element *element,
2042			void *user_data,
2043			std::vector<gdb_xml_value> &attributes)
2044{
2045  struct btrace_data *btrace;
2046  ULONGEST *begin, *end;
2047
2048  btrace = (struct btrace_data *) user_data;
2049
2050  switch (btrace->format)
2051    {
2052    case BTRACE_FORMAT_BTS:
2053      break;
2054
2055    case BTRACE_FORMAT_NONE:
2056      btrace->format = BTRACE_FORMAT_BTS;
2057      btrace->variant.bts.blocks = new std::vector<btrace_block>;
2058      break;
2059
2060    default:
2061      gdb_xml_error (parser, _("Btrace format error."));
2062    }
2063
2064  begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get ();
2065  end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get ();
2066  btrace->variant.bts.blocks->emplace_back (*begin, *end);
2067}
2068
2069/* Parse a "raw" xml record.  */
2070
2071static void
2072parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
2073	       gdb_byte **pdata, size_t *psize)
2074{
2075  gdb_byte *bin;
2076  size_t len, size;
2077
2078  len = strlen (body_text);
2079  if (len % 2 != 0)
2080    gdb_xml_error (parser, _("Bad raw data size."));
2081
2082  size = len / 2;
2083
2084  gdb::unique_xmalloc_ptr<gdb_byte> data ((gdb_byte *) xmalloc (size));
2085  bin = data.get ();
2086
2087  /* We use hex encoding - see gdbsupport/rsp-low.h.  */
2088  while (len > 0)
2089    {
2090      char hi, lo;
2091
2092      hi = *body_text++;
2093      lo = *body_text++;
2094
2095      if (hi == 0 || lo == 0)
2096	gdb_xml_error (parser, _("Bad hex encoding."));
2097
2098      *bin++ = fromhex (hi) * 16 + fromhex (lo);
2099      len -= 2;
2100    }
2101
2102  *pdata = data.release ();
2103  *psize = size;
2104}
2105
2106/* Parse a btrace pt-config "cpu" xml record.  */
2107
2108static void
2109parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2110				const struct gdb_xml_element *element,
2111				void *user_data,
2112				std::vector<gdb_xml_value> &attributes)
2113{
2114  struct btrace_data *btrace;
2115  const char *vendor;
2116  ULONGEST *family, *model, *stepping;
2117
2118  vendor =
2119    (const char *) xml_find_attribute (attributes, "vendor")->value.get ();
2120  family
2121    = (ULONGEST *) xml_find_attribute (attributes, "family")->value.get ();
2122  model
2123    = (ULONGEST *) xml_find_attribute (attributes, "model")->value.get ();
2124  stepping
2125    = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get ();
2126
2127  btrace = (struct btrace_data *) user_data;
2128
2129  if (strcmp (vendor, "GenuineIntel") == 0)
2130    btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2131
2132  btrace->variant.pt.config.cpu.family = *family;
2133  btrace->variant.pt.config.cpu.model = *model;
2134  btrace->variant.pt.config.cpu.stepping = *stepping;
2135}
2136
2137/* Parse a btrace pt "raw" xml record.  */
2138
2139static void
2140parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2141			 const struct gdb_xml_element *element,
2142			 void *user_data, const char *body_text)
2143{
2144  struct btrace_data *btrace;
2145
2146  btrace = (struct btrace_data *) user_data;
2147  parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2148		 &btrace->variant.pt.size);
2149}
2150
2151/* Parse a btrace "pt" xml record.  */
2152
2153static void
2154parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2155		     const struct gdb_xml_element *element,
2156		     void *user_data,
2157		     std::vector<gdb_xml_value> &attributes)
2158{
2159  struct btrace_data *btrace;
2160
2161  btrace = (struct btrace_data *) user_data;
2162  btrace->format = BTRACE_FORMAT_PT;
2163  btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2164  btrace->variant.pt.data = NULL;
2165  btrace->variant.pt.size = 0;
2166}
2167
2168static const struct gdb_xml_attribute block_attributes[] = {
2169  { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2170  { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2171  { NULL, GDB_XML_AF_NONE, NULL, NULL }
2172};
2173
2174static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2175  { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2176  { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2177  { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2178  { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2179  { NULL, GDB_XML_AF_NONE, NULL, NULL }
2180};
2181
2182static const struct gdb_xml_element btrace_pt_config_children[] = {
2183  { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2184    parse_xml_btrace_pt_config_cpu, NULL },
2185  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2186};
2187
2188static const struct gdb_xml_element btrace_pt_children[] = {
2189  { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2190    NULL },
2191  { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2192  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2193};
2194
2195static const struct gdb_xml_attribute btrace_attributes[] = {
2196  { "version", GDB_XML_AF_NONE, NULL, NULL },
2197  { NULL, GDB_XML_AF_NONE, NULL, NULL }
2198};
2199
2200static const struct gdb_xml_element btrace_children[] = {
2201  { "block", block_attributes, NULL,
2202    GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2203  { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2204    NULL },
2205  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2206};
2207
2208static const struct gdb_xml_element btrace_elements[] = {
2209  { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2210    check_xml_btrace_version, NULL },
2211  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2212};
2213
2214#endif /* defined (HAVE_LIBEXPAT) */
2215
2216/* See btrace.h.  */
2217
2218void
2219parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2220{
2221#if defined (HAVE_LIBEXPAT)
2222
2223  int errcode;
2224  btrace_data result;
2225  result.format = BTRACE_FORMAT_NONE;
2226
2227  errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2228				 buffer, &result);
2229  if (errcode != 0)
2230    error (_("Error parsing branch trace."));
2231
2232  /* Keep parse results.  */
2233  *btrace = std::move (result);
2234
2235#else  /* !defined (HAVE_LIBEXPAT) */
2236
2237  error (_("Cannot process branch trace.  XML support was disabled at "
2238	   "compile time."));
2239
2240#endif  /* !defined (HAVE_LIBEXPAT) */
2241}
2242
2243#if defined (HAVE_LIBEXPAT)
2244
2245/* Parse a btrace-conf "bts" xml record.  */
2246
2247static void
2248parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2249			  const struct gdb_xml_element *element,
2250			  void *user_data,
2251			  std::vector<gdb_xml_value> &attributes)
2252{
2253  struct btrace_config *conf;
2254  struct gdb_xml_value *size;
2255
2256  conf = (struct btrace_config *) user_data;
2257  conf->format = BTRACE_FORMAT_BTS;
2258  conf->bts.size = 0;
2259
2260  size = xml_find_attribute (attributes, "size");
2261  if (size != NULL)
2262    conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get ();
2263}
2264
2265/* Parse a btrace-conf "pt" xml record.  */
2266
2267static void
2268parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2269			  const struct gdb_xml_element *element,
2270			  void *user_data,
2271			  std::vector<gdb_xml_value> &attributes)
2272{
2273  struct btrace_config *conf;
2274  struct gdb_xml_value *size;
2275
2276  conf = (struct btrace_config *) user_data;
2277  conf->format = BTRACE_FORMAT_PT;
2278  conf->pt.size = 0;
2279
2280  size = xml_find_attribute (attributes, "size");
2281  if (size != NULL)
2282    conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get ();
2283}
2284
2285static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2286  { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2287  { NULL, GDB_XML_AF_NONE, NULL, NULL }
2288};
2289
2290static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2291  { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2292  { NULL, GDB_XML_AF_NONE, NULL, NULL }
2293};
2294
2295static const struct gdb_xml_element btrace_conf_children[] = {
2296  { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2297    parse_xml_btrace_conf_bts, NULL },
2298  { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2299    parse_xml_btrace_conf_pt, NULL },
2300  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2301};
2302
2303static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2304  { "version", GDB_XML_AF_NONE, NULL, NULL },
2305  { NULL, GDB_XML_AF_NONE, NULL, NULL }
2306};
2307
2308static const struct gdb_xml_element btrace_conf_elements[] = {
2309  { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2310    GDB_XML_EF_NONE, NULL, NULL },
2311  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2312};
2313
2314#endif /* defined (HAVE_LIBEXPAT) */
2315
2316/* See btrace.h.  */
2317
2318void
2319parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2320{
2321#if defined (HAVE_LIBEXPAT)
2322
2323  int errcode;
2324  errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2325				 btrace_conf_elements, xml, conf);
2326  if (errcode != 0)
2327    error (_("Error parsing branch trace configuration."));
2328
2329#else  /* !defined (HAVE_LIBEXPAT) */
2330
2331  error (_("Cannot process the branch trace configuration.  XML support "
2332	   "was disabled at compile time."));
2333
2334#endif  /* !defined (HAVE_LIBEXPAT) */
2335}
2336
2337/* See btrace.h.  */
2338
2339const struct btrace_insn *
2340btrace_insn_get (const struct btrace_insn_iterator *it)
2341{
2342  const struct btrace_function *bfun;
2343  unsigned int index, end;
2344
2345  index = it->insn_index;
2346  bfun = &it->btinfo->functions[it->call_index];
2347
2348  /* Check if the iterator points to a gap in the trace.  */
2349  if (bfun->errcode != 0)
2350    return NULL;
2351
2352  /* The index is within the bounds of this function's instruction vector.  */
2353  end = bfun->insn.size ();
2354  gdb_assert (0 < end);
2355  gdb_assert (index < end);
2356
2357  return &bfun->insn[index];
2358}
2359
2360/* See btrace.h.  */
2361
2362int
2363btrace_insn_get_error (const struct btrace_insn_iterator *it)
2364{
2365  return it->btinfo->functions[it->call_index].errcode;
2366}
2367
2368/* See btrace.h.  */
2369
2370unsigned int
2371btrace_insn_number (const struct btrace_insn_iterator *it)
2372{
2373  return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2374}
2375
2376/* See btrace.h.  */
2377
2378void
2379btrace_insn_begin (struct btrace_insn_iterator *it,
2380		   const struct btrace_thread_info *btinfo)
2381{
2382  if (btinfo->functions.empty ())
2383    error (_("No trace."));
2384
2385  it->btinfo = btinfo;
2386  it->call_index = 0;
2387  it->insn_index = 0;
2388}
2389
2390/* See btrace.h.  */
2391
2392void
2393btrace_insn_end (struct btrace_insn_iterator *it,
2394		 const struct btrace_thread_info *btinfo)
2395{
2396  const struct btrace_function *bfun;
2397  unsigned int length;
2398
2399  if (btinfo->functions.empty ())
2400    error (_("No trace."));
2401
2402  bfun = &btinfo->functions.back ();
2403  length = bfun->insn.size ();
2404
2405  /* The last function may either be a gap or it contains the current
2406     instruction, which is one past the end of the execution trace; ignore
2407     it.  */
2408  if (length > 0)
2409    length -= 1;
2410
2411  it->btinfo = btinfo;
2412  it->call_index = bfun->number - 1;
2413  it->insn_index = length;
2414}
2415
2416/* See btrace.h.  */
2417
2418unsigned int
2419btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2420{
2421  const struct btrace_function *bfun;
2422  unsigned int index, steps;
2423
2424  bfun = &it->btinfo->functions[it->call_index];
2425  steps = 0;
2426  index = it->insn_index;
2427
2428  while (stride != 0)
2429    {
2430      unsigned int end, space, adv;
2431
2432      end = bfun->insn.size ();
2433
2434      /* An empty function segment represents a gap in the trace.  We count
2435	 it as one instruction.  */
2436      if (end == 0)
2437	{
2438	  const struct btrace_function *next;
2439
2440	  next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2441	  if (next == NULL)
2442	    break;
2443
2444	  stride -= 1;
2445	  steps += 1;
2446
2447	  bfun = next;
2448	  index = 0;
2449
2450	  continue;
2451	}
2452
2453      gdb_assert (0 < end);
2454      gdb_assert (index < end);
2455
2456      /* Compute the number of instructions remaining in this segment.  */
2457      space = end - index;
2458
2459      /* Advance the iterator as far as possible within this segment.  */
2460      adv = std::min (space, stride);
2461      stride -= adv;
2462      index += adv;
2463      steps += adv;
2464
2465      /* Move to the next function if we're at the end of this one.  */
2466      if (index == end)
2467	{
2468	  const struct btrace_function *next;
2469
2470	  next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2471	  if (next == NULL)
2472	    {
2473	      /* We stepped past the last function.
2474
2475		 Let's adjust the index to point to the last instruction in
2476		 the previous function.  */
2477	      index -= 1;
2478	      steps -= 1;
2479	      break;
2480	    }
2481
2482	  /* We now point to the first instruction in the new function.  */
2483	  bfun = next;
2484	  index = 0;
2485	}
2486
2487      /* We did make progress.  */
2488      gdb_assert (adv > 0);
2489    }
2490
2491  /* Update the iterator.  */
2492  it->call_index = bfun->number - 1;
2493  it->insn_index = index;
2494
2495  return steps;
2496}
2497
2498/* See btrace.h.  */
2499
2500unsigned int
2501btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2502{
2503  const struct btrace_function *bfun;
2504  unsigned int index, steps;
2505
2506  bfun = &it->btinfo->functions[it->call_index];
2507  steps = 0;
2508  index = it->insn_index;
2509
2510  while (stride != 0)
2511    {
2512      unsigned int adv;
2513
2514      /* Move to the previous function if we're at the start of this one.  */
2515      if (index == 0)
2516	{
2517	  const struct btrace_function *prev;
2518
2519	  prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2520	  if (prev == NULL)
2521	    break;
2522
2523	  /* We point to one after the last instruction in the new function.  */
2524	  bfun = prev;
2525	  index = bfun->insn.size ();
2526
2527	  /* An empty function segment represents a gap in the trace.  We count
2528	     it as one instruction.  */
2529	  if (index == 0)
2530	    {
2531	      stride -= 1;
2532	      steps += 1;
2533
2534	      continue;
2535	    }
2536	}
2537
2538      /* Advance the iterator as far as possible within this segment.  */
2539      adv = std::min (index, stride);
2540
2541      stride -= adv;
2542      index -= adv;
2543      steps += adv;
2544
2545      /* We did make progress.  */
2546      gdb_assert (adv > 0);
2547    }
2548
2549  /* Update the iterator.  */
2550  it->call_index = bfun->number - 1;
2551  it->insn_index = index;
2552
2553  return steps;
2554}
2555
2556/* See btrace.h.  */
2557
2558int
2559btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2560		 const struct btrace_insn_iterator *rhs)
2561{
2562  gdb_assert (lhs->btinfo == rhs->btinfo);
2563
2564  if (lhs->call_index != rhs->call_index)
2565    return lhs->call_index - rhs->call_index;
2566
2567  return lhs->insn_index - rhs->insn_index;
2568}
2569
2570/* See btrace.h.  */
2571
2572int
2573btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2574			    const struct btrace_thread_info *btinfo,
2575			    unsigned int number)
2576{
2577  const struct btrace_function *bfun;
2578  unsigned int upper, lower;
2579
2580  if (btinfo->functions.empty ())
2581      return 0;
2582
2583  lower = 0;
2584  bfun = &btinfo->functions[lower];
2585  if (number < bfun->insn_offset)
2586    return 0;
2587
2588  upper = btinfo->functions.size () - 1;
2589  bfun = &btinfo->functions[upper];
2590  if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2591    return 0;
2592
2593  /* We assume that there are no holes in the numbering.  */
2594  for (;;)
2595    {
2596      const unsigned int average = lower + (upper - lower) / 2;
2597
2598      bfun = &btinfo->functions[average];
2599
2600      if (number < bfun->insn_offset)
2601	{
2602	  upper = average - 1;
2603	  continue;
2604	}
2605
2606      if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2607	{
2608	  lower = average + 1;
2609	  continue;
2610	}
2611
2612      break;
2613    }
2614
2615  it->btinfo = btinfo;
2616  it->call_index = bfun->number - 1;
2617  it->insn_index = number - bfun->insn_offset;
2618  return 1;
2619}
2620
2621/* Returns true if the recording ends with a function segment that
2622   contains only a single (i.e. the current) instruction.  */
2623
2624static bool
2625btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2626{
2627  const btrace_function *bfun;
2628
2629  if (btinfo->functions.empty ())
2630    return false;
2631
2632  bfun = &btinfo->functions.back ();
2633  if (bfun->errcode != 0)
2634    return false;
2635
2636  return ftrace_call_num_insn (bfun) == 1;
2637}
2638
2639/* See btrace.h.  */
2640
2641const struct btrace_function *
2642btrace_call_get (const struct btrace_call_iterator *it)
2643{
2644  if (it->index >= it->btinfo->functions.size ())
2645    return NULL;
2646
2647  return &it->btinfo->functions[it->index];
2648}
2649
2650/* See btrace.h.  */
2651
2652unsigned int
2653btrace_call_number (const struct btrace_call_iterator *it)
2654{
2655  const unsigned int length = it->btinfo->functions.size ();
2656
2657  /* If the last function segment contains only a single instruction (i.e. the
2658     current instruction), skip it.  */
2659  if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2660    return length;
2661
2662  return it->index + 1;
2663}
2664
2665/* See btrace.h.  */
2666
2667void
2668btrace_call_begin (struct btrace_call_iterator *it,
2669		   const struct btrace_thread_info *btinfo)
2670{
2671  if (btinfo->functions.empty ())
2672    error (_("No trace."));
2673
2674  it->btinfo = btinfo;
2675  it->index = 0;
2676}
2677
2678/* See btrace.h.  */
2679
2680void
2681btrace_call_end (struct btrace_call_iterator *it,
2682		 const struct btrace_thread_info *btinfo)
2683{
2684  if (btinfo->functions.empty ())
2685    error (_("No trace."));
2686
2687  it->btinfo = btinfo;
2688  it->index = btinfo->functions.size ();
2689}
2690
2691/* See btrace.h.  */
2692
2693unsigned int
2694btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2695{
2696  const unsigned int length = it->btinfo->functions.size ();
2697
2698  if (it->index + stride < length - 1)
2699    /* Default case: Simply advance the iterator.  */
2700    it->index += stride;
2701  else if (it->index + stride == length - 1)
2702    {
2703      /* We land exactly at the last function segment.  If it contains only one
2704	 instruction (i.e. the current instruction) it is not actually part of
2705	 the trace.  */
2706      if (btrace_ends_with_single_insn (it->btinfo))
2707	it->index = length;
2708      else
2709	it->index = length - 1;
2710    }
2711  else
2712    {
2713      /* We land past the last function segment and have to adjust the stride.
2714	 If the last function segment contains only one instruction (i.e. the
2715	 current instruction) it is not actually part of the trace.  */
2716      if (btrace_ends_with_single_insn (it->btinfo))
2717	stride = length - it->index - 1;
2718      else
2719	stride = length - it->index;
2720
2721      it->index = length;
2722    }
2723
2724  return stride;
2725}
2726
2727/* See btrace.h.  */
2728
2729unsigned int
2730btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2731{
2732  const unsigned int length = it->btinfo->functions.size ();
2733  int steps = 0;
2734
2735  gdb_assert (it->index <= length);
2736
2737  if (stride == 0 || it->index == 0)
2738    return 0;
2739
2740  /* If we are at the end, the first step is a special case.  If the last
2741     function segment contains only one instruction (i.e. the current
2742     instruction) it is not actually part of the trace.  To be able to step
2743     over this instruction, we need at least one more function segment.  */
2744  if ((it->index == length)  && (length > 1))
2745    {
2746      if (btrace_ends_with_single_insn (it->btinfo))
2747	it->index = length - 2;
2748      else
2749	it->index = length - 1;
2750
2751      steps = 1;
2752      stride -= 1;
2753    }
2754
2755  stride = std::min (stride, it->index);
2756
2757  it->index -= stride;
2758  return steps + stride;
2759}
2760
2761/* See btrace.h.  */
2762
2763int
2764btrace_call_cmp (const struct btrace_call_iterator *lhs,
2765		 const struct btrace_call_iterator *rhs)
2766{
2767  gdb_assert (lhs->btinfo == rhs->btinfo);
2768  return (int) (lhs->index - rhs->index);
2769}
2770
2771/* See btrace.h.  */
2772
2773int
2774btrace_find_call_by_number (struct btrace_call_iterator *it,
2775			    const struct btrace_thread_info *btinfo,
2776			    unsigned int number)
2777{
2778  const unsigned int length = btinfo->functions.size ();
2779
2780  if ((number == 0) || (number > length))
2781    return 0;
2782
2783  it->btinfo = btinfo;
2784  it->index = number - 1;
2785  return 1;
2786}
2787
2788/* See btrace.h.  */
2789
2790void
2791btrace_set_insn_history (struct btrace_thread_info *btinfo,
2792			 const struct btrace_insn_iterator *begin,
2793			 const struct btrace_insn_iterator *end)
2794{
2795  if (btinfo->insn_history == NULL)
2796    btinfo->insn_history = XCNEW (struct btrace_insn_history);
2797
2798  btinfo->insn_history->begin = *begin;
2799  btinfo->insn_history->end = *end;
2800}
2801
2802/* See btrace.h.  */
2803
2804void
2805btrace_set_call_history (struct btrace_thread_info *btinfo,
2806			 const struct btrace_call_iterator *begin,
2807			 const struct btrace_call_iterator *end)
2808{
2809  gdb_assert (begin->btinfo == end->btinfo);
2810
2811  if (btinfo->call_history == NULL)
2812    btinfo->call_history = XCNEW (struct btrace_call_history);
2813
2814  btinfo->call_history->begin = *begin;
2815  btinfo->call_history->end = *end;
2816}
2817
2818/* See btrace.h.  */
2819
2820int
2821btrace_is_replaying (struct thread_info *tp)
2822{
2823  return tp->btrace.replay != NULL;
2824}
2825
2826/* See btrace.h.  */
2827
2828int
2829btrace_is_empty (struct thread_info *tp)
2830{
2831  struct btrace_insn_iterator begin, end;
2832  struct btrace_thread_info *btinfo;
2833
2834  btinfo = &tp->btrace;
2835
2836  if (btinfo->functions.empty ())
2837    return 1;
2838
2839  btrace_insn_begin (&begin, btinfo);
2840  btrace_insn_end (&end, btinfo);
2841
2842  return btrace_insn_cmp (&begin, &end) == 0;
2843}
2844
2845#if defined (HAVE_LIBIPT)
2846
2847/* Print a single packet.  */
2848
2849static void
2850pt_print_packet (const struct pt_packet *packet)
2851{
2852  switch (packet->type)
2853    {
2854    default:
2855      gdb_printf (("[??: %x]"), packet->type);
2856      break;
2857
2858    case ppt_psb:
2859      gdb_printf (("psb"));
2860      break;
2861
2862    case ppt_psbend:
2863      gdb_printf (("psbend"));
2864      break;
2865
2866    case ppt_pad:
2867      gdb_printf (("pad"));
2868      break;
2869
2870    case ppt_tip:
2871      gdb_printf (("tip %u: 0x%" PRIx64 ""),
2872		  packet->payload.ip.ipc,
2873		  packet->payload.ip.ip);
2874      break;
2875
2876    case ppt_tip_pge:
2877      gdb_printf (("tip.pge %u: 0x%" PRIx64 ""),
2878		  packet->payload.ip.ipc,
2879		  packet->payload.ip.ip);
2880      break;
2881
2882    case ppt_tip_pgd:
2883      gdb_printf (("tip.pgd %u: 0x%" PRIx64 ""),
2884		  packet->payload.ip.ipc,
2885		  packet->payload.ip.ip);
2886      break;
2887
2888    case ppt_fup:
2889      gdb_printf (("fup %u: 0x%" PRIx64 ""),
2890		  packet->payload.ip.ipc,
2891		  packet->payload.ip.ip);
2892      break;
2893
2894    case ppt_tnt_8:
2895      gdb_printf (("tnt-8 %u: 0x%" PRIx64 ""),
2896		  packet->payload.tnt.bit_size,
2897		  packet->payload.tnt.payload);
2898      break;
2899
2900    case ppt_tnt_64:
2901      gdb_printf (("tnt-64 %u: 0x%" PRIx64 ""),
2902		  packet->payload.tnt.bit_size,
2903		  packet->payload.tnt.payload);
2904      break;
2905
2906    case ppt_pip:
2907      gdb_printf (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2908		  packet->payload.pip.nr ? (" nr") : (""));
2909      break;
2910
2911    case ppt_tsc:
2912      gdb_printf (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2913      break;
2914
2915    case ppt_cbr:
2916      gdb_printf (("cbr %u"), packet->payload.cbr.ratio);
2917      break;
2918
2919    case ppt_mode:
2920      switch (packet->payload.mode.leaf)
2921	{
2922	default:
2923	  gdb_printf (("mode %u"), packet->payload.mode.leaf);
2924	  break;
2925
2926	case pt_mol_exec:
2927	  gdb_printf (("mode.exec%s%s"),
2928		      packet->payload.mode.bits.exec.csl
2929		      ? (" cs.l") : (""),
2930		      packet->payload.mode.bits.exec.csd
2931		      ? (" cs.d") : (""));
2932	  break;
2933
2934	case pt_mol_tsx:
2935	  gdb_printf (("mode.tsx%s%s"),
2936		      packet->payload.mode.bits.tsx.intx
2937		      ? (" intx") : (""),
2938		      packet->payload.mode.bits.tsx.abrt
2939		      ? (" abrt") : (""));
2940	  break;
2941	}
2942      break;
2943
2944    case ppt_ovf:
2945      gdb_printf (("ovf"));
2946      break;
2947
2948    case ppt_stop:
2949      gdb_printf (("stop"));
2950      break;
2951
2952    case ppt_vmcs:
2953      gdb_printf (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2954      break;
2955
2956    case ppt_tma:
2957      gdb_printf (("tma %x %x"), packet->payload.tma.ctc,
2958		  packet->payload.tma.fc);
2959      break;
2960
2961    case ppt_mtc:
2962      gdb_printf (("mtc %x"), packet->payload.mtc.ctc);
2963      break;
2964
2965    case ppt_cyc:
2966      gdb_printf (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2967      break;
2968
2969    case ppt_mnt:
2970      gdb_printf (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2971      break;
2972    }
2973}
2974
2975/* Decode packets into MAINT using DECODER.  */
2976
2977static void
2978btrace_maint_decode_pt (struct btrace_maint_info *maint,
2979			struct pt_packet_decoder *decoder)
2980{
2981  int errcode;
2982
2983  if (maint->variant.pt.packets == NULL)
2984    maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
2985
2986  for (;;)
2987    {
2988      struct btrace_pt_packet packet;
2989
2990      errcode = pt_pkt_sync_forward (decoder);
2991      if (errcode < 0)
2992	break;
2993
2994      for (;;)
2995	{
2996	  pt_pkt_get_offset (decoder, &packet.offset);
2997
2998	  errcode = pt_pkt_next (decoder, &packet.packet,
2999				 sizeof(packet.packet));
3000	  if (errcode < 0)
3001	    break;
3002
3003	  if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
3004	    {
3005	      packet.errcode = pt_errcode (errcode);
3006	      maint->variant.pt.packets->push_back (packet);
3007	    }
3008	}
3009
3010      if (errcode == -pte_eos)
3011	break;
3012
3013      packet.errcode = pt_errcode (errcode);
3014      maint->variant.pt.packets->push_back (packet);
3015
3016      warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
3017	       packet.offset, pt_errstr (packet.errcode));
3018    }
3019
3020  if (errcode != -pte_eos)
3021    warning (_("Failed to synchronize onto the Intel Processor Trace "
3022	       "stream: %s."), pt_errstr (pt_errcode (errcode)));
3023}
3024
3025/* Update the packet history in BTINFO.  */
3026
3027static void
3028btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3029{
3030  struct pt_packet_decoder *decoder;
3031  const struct btrace_cpu *cpu;
3032  struct btrace_data_pt *pt;
3033  struct pt_config config;
3034  int errcode;
3035
3036  pt = &btinfo->data.variant.pt;
3037
3038  /* Nothing to do if there is no trace.  */
3039  if (pt->size == 0)
3040    return;
3041
3042  memset (&config, 0, sizeof(config));
3043
3044  config.size = sizeof (config);
3045  config.begin = pt->data;
3046  config.end = pt->data + pt->size;
3047
3048  cpu = record_btrace_get_cpu ();
3049  if (cpu == nullptr)
3050    cpu = &pt->config.cpu;
3051
3052  /* We treat an unknown vendor as 'no errata'.  */
3053  if (cpu->vendor != CV_UNKNOWN)
3054    {
3055      config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
3056      config.cpu.family = cpu->family;
3057      config.cpu.model = cpu->model;
3058      config.cpu.stepping = cpu->stepping;
3059
3060      errcode = pt_cpu_errata (&config.errata, &config.cpu);
3061      if (errcode < 0)
3062	error (_("Failed to configure the Intel Processor Trace "
3063		 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
3064    }
3065
3066  decoder = pt_pkt_alloc_decoder (&config);
3067  if (decoder == NULL)
3068    error (_("Failed to allocate the Intel Processor Trace decoder."));
3069
3070  try
3071    {
3072      btrace_maint_decode_pt (&btinfo->maint, decoder);
3073    }
3074  catch (const gdb_exception &except)
3075    {
3076      pt_pkt_free_decoder (decoder);
3077
3078      if (except.reason < 0)
3079	throw;
3080    }
3081
3082  pt_pkt_free_decoder (decoder);
3083}
3084
3085#endif /* !defined (HAVE_LIBIPT)  */
3086
3087/* Update the packet maintenance information for BTINFO and store the
3088   low and high bounds into BEGIN and END, respectively.
3089   Store the current iterator state into FROM and TO.  */
3090
3091static void
3092btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3093			     unsigned int *begin, unsigned int *end,
3094			     unsigned int *from, unsigned int *to)
3095{
3096  switch (btinfo->data.format)
3097    {
3098    default:
3099      *begin = 0;
3100      *end = 0;
3101      *from = 0;
3102      *to = 0;
3103      break;
3104
3105    case BTRACE_FORMAT_BTS:
3106      /* Nothing to do - we operate directly on BTINFO->DATA.  */
3107      *begin = 0;
3108      *end = btinfo->data.variant.bts.blocks->size ();
3109      *from = btinfo->maint.variant.bts.packet_history.begin;
3110      *to = btinfo->maint.variant.bts.packet_history.end;
3111      break;
3112
3113#if defined (HAVE_LIBIPT)
3114    case BTRACE_FORMAT_PT:
3115      if (btinfo->maint.variant.pt.packets == nullptr)
3116	btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
3117
3118      if (btinfo->maint.variant.pt.packets->empty ())
3119	btrace_maint_update_pt_packets (btinfo);
3120
3121      *begin = 0;
3122      *end = btinfo->maint.variant.pt.packets->size ();
3123      *from = btinfo->maint.variant.pt.packet_history.begin;
3124      *to = btinfo->maint.variant.pt.packet_history.end;
3125      break;
3126#endif /* defined (HAVE_LIBIPT)  */
3127    }
3128}
3129
3130/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3131   update the current iterator position.  */
3132
3133static void
3134btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3135			    unsigned int begin, unsigned int end)
3136{
3137  switch (btinfo->data.format)
3138    {
3139    default:
3140      break;
3141
3142    case BTRACE_FORMAT_BTS:
3143      {
3144	const std::vector<btrace_block> &blocks
3145	  = *btinfo->data.variant.bts.blocks;
3146	unsigned int blk;
3147
3148	for (blk = begin; blk < end; ++blk)
3149	  {
3150	    const btrace_block &block = blocks.at (blk);
3151
3152	    gdb_printf ("%u\tbegin: %s, end: %s\n", blk,
3153			core_addr_to_string_nz (block.begin),
3154			core_addr_to_string_nz (block.end));
3155	  }
3156
3157	btinfo->maint.variant.bts.packet_history.begin = begin;
3158	btinfo->maint.variant.bts.packet_history.end = end;
3159      }
3160      break;
3161
3162#if defined (HAVE_LIBIPT)
3163    case BTRACE_FORMAT_PT:
3164      {
3165	const std::vector<btrace_pt_packet> &packets
3166	  = *btinfo->maint.variant.pt.packets;
3167	unsigned int pkt;
3168
3169	for (pkt = begin; pkt < end; ++pkt)
3170	  {
3171	    const struct btrace_pt_packet &packet = packets.at (pkt);
3172
3173	    gdb_printf ("%u\t", pkt);
3174	    gdb_printf ("0x%" PRIx64 "\t", packet.offset);
3175
3176	    if (packet.errcode == pte_ok)
3177	      pt_print_packet (&packet.packet);
3178	    else
3179	      gdb_printf ("[error: %s]", pt_errstr (packet.errcode));
3180
3181	    gdb_printf ("\n");
3182	  }
3183
3184	btinfo->maint.variant.pt.packet_history.begin = begin;
3185	btinfo->maint.variant.pt.packet_history.end = end;
3186      }
3187      break;
3188#endif /* defined (HAVE_LIBIPT)  */
3189    }
3190}
3191
3192/* Read a number from an argument string.  */
3193
3194static unsigned int
3195get_uint (const char **arg)
3196{
3197  const char *begin, *pos;
3198  char *end;
3199  unsigned long number;
3200
3201  begin = *arg;
3202  pos = skip_spaces (begin);
3203
3204  if (!isdigit (*pos))
3205    error (_("Expected positive number, got: %s."), pos);
3206
3207  number = strtoul (pos, &end, 10);
3208  if (number > UINT_MAX)
3209    error (_("Number too big."));
3210
3211  *arg += (end - begin);
3212
3213  return (unsigned int) number;
3214}
3215
3216/* Read a context size from an argument string.  */
3217
3218static int
3219get_context_size (const char **arg)
3220{
3221  const char *pos = skip_spaces (*arg);
3222
3223  if (!isdigit (*pos))
3224    error (_("Expected positive number, got: %s."), pos);
3225
3226  char *end;
3227  long result = strtol (pos, &end, 10);
3228  *arg = end;
3229  return result;
3230}
3231
3232/* Complain about junk at the end of an argument string.  */
3233
3234static void
3235no_chunk (const char *arg)
3236{
3237  if (*arg != 0)
3238    error (_("Junk after argument: %s."), arg);
3239}
3240
3241/* The "maintenance btrace packet-history" command.  */
3242
3243static void
3244maint_btrace_packet_history_cmd (const char *arg, int from_tty)
3245{
3246  struct btrace_thread_info *btinfo;
3247  unsigned int size, begin, end, from, to;
3248
3249  thread_info *tp = find_thread_ptid (current_inferior (), inferior_ptid);
3250  if (tp == NULL)
3251    error (_("No thread."));
3252
3253  size = 10;
3254  btinfo = &tp->btrace;
3255
3256  btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3257  if (begin == end)
3258    {
3259      gdb_printf (_("No trace.\n"));
3260      return;
3261    }
3262
3263  if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3264    {
3265      from = to;
3266
3267      if (end - from < size)
3268	size = end - from;
3269      to = from + size;
3270    }
3271  else if (strcmp (arg, "-") == 0)
3272    {
3273      to = from;
3274
3275      if (to - begin < size)
3276	size = to - begin;
3277      from = to - size;
3278    }
3279  else
3280    {
3281      from = get_uint (&arg);
3282      if (end <= from)
3283	error (_("'%u' is out of range."), from);
3284
3285      arg = skip_spaces (arg);
3286      if (*arg == ',')
3287	{
3288	  arg = skip_spaces (++arg);
3289
3290	  if (*arg == '+')
3291	    {
3292	      arg += 1;
3293	      size = get_context_size (&arg);
3294
3295	      no_chunk (arg);
3296
3297	      if (end - from < size)
3298		size = end - from;
3299	      to = from + size;
3300	    }
3301	  else if (*arg == '-')
3302	    {
3303	      arg += 1;
3304	      size = get_context_size (&arg);
3305
3306	      no_chunk (arg);
3307
3308	      /* Include the packet given as first argument.  */
3309	      from += 1;
3310	      to = from;
3311
3312	      if (to - begin < size)
3313		size = to - begin;
3314	      from = to - size;
3315	    }
3316	  else
3317	    {
3318	      to = get_uint (&arg);
3319
3320	      /* Include the packet at the second argument and silently
3321		 truncate the range.  */
3322	      if (to < end)
3323		to += 1;
3324	      else
3325		to = end;
3326
3327	      no_chunk (arg);
3328	    }
3329	}
3330      else
3331	{
3332	  no_chunk (arg);
3333
3334	  if (end - from < size)
3335	    size = end - from;
3336	  to = from + size;
3337	}
3338
3339      dont_repeat ();
3340    }
3341
3342  btrace_maint_print_packets (btinfo, from, to);
3343}
3344
3345/* The "maintenance btrace clear-packet-history" command.  */
3346
3347static void
3348maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3349{
3350  if (args != NULL && *args != 0)
3351    error (_("Invalid argument."));
3352
3353  if (inferior_ptid == null_ptid)
3354    error (_("No thread."));
3355
3356  thread_info *tp = inferior_thread ();
3357  btrace_thread_info *btinfo = &tp->btrace;
3358
3359  /* Must clear the maint data before - it depends on BTINFO->DATA.  */
3360  btrace_maint_clear (btinfo);
3361  btinfo->data.clear ();
3362}
3363
3364/* The "maintenance btrace clear" command.  */
3365
3366static void
3367maint_btrace_clear_cmd (const char *args, int from_tty)
3368{
3369  if (args != NULL && *args != 0)
3370    error (_("Invalid argument."));
3371
3372  if (inferior_ptid == null_ptid)
3373    error (_("No thread."));
3374
3375  thread_info *tp = inferior_thread ();
3376  btrace_clear (tp);
3377}
3378
3379/* The "maintenance info btrace" command.  */
3380
3381static void
3382maint_info_btrace_cmd (const char *args, int from_tty)
3383{
3384  struct btrace_thread_info *btinfo;
3385  const struct btrace_config *conf;
3386
3387  if (args != NULL && *args != 0)
3388    error (_("Invalid argument."));
3389
3390  if (inferior_ptid == null_ptid)
3391    error (_("No thread."));
3392
3393  thread_info *tp = inferior_thread ();
3394
3395  btinfo = &tp->btrace;
3396
3397  conf = btrace_conf (btinfo);
3398  if (conf == NULL)
3399    error (_("No btrace configuration."));
3400
3401  gdb_printf (_("Format: %s.\n"),
3402	      btrace_format_string (conf->format));
3403
3404  switch (conf->format)
3405    {
3406    default:
3407      break;
3408
3409    case BTRACE_FORMAT_BTS:
3410      gdb_printf (_("Number of packets: %zu.\n"),
3411		  btinfo->data.variant.bts.blocks->size ());
3412      break;
3413
3414#if defined (HAVE_LIBIPT)
3415    case BTRACE_FORMAT_PT:
3416      {
3417	struct pt_version version;
3418
3419	version = pt_library_version ();
3420	gdb_printf (_("Version: %u.%u.%u%s.\n"), version.major,
3421		    version.minor, version.build,
3422		    version.ext != NULL ? version.ext : "");
3423
3424	btrace_maint_update_pt_packets (btinfo);
3425	gdb_printf (_("Number of packets: %zu.\n"),
3426		    ((btinfo->maint.variant.pt.packets == nullptr)
3427		     ? 0 : btinfo->maint.variant.pt.packets->size ()));
3428      }
3429      break;
3430#endif /* defined (HAVE_LIBIPT)  */
3431    }
3432}
3433
3434/* The "maint show btrace pt skip-pad" show value function. */
3435
3436static void
3437show_maint_btrace_pt_skip_pad  (struct ui_file *file, int from_tty,
3438				  struct cmd_list_element *c,
3439				  const char *value)
3440{
3441  gdb_printf (file, _("Skip PAD packets is %s.\n"), value);
3442}
3443
3444
3445/* Initialize btrace maintenance commands.  */
3446
3447void _initialize_btrace ();
3448void
3449_initialize_btrace ()
3450{
3451  add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3452	   _("Info about branch tracing data."), &maintenanceinfolist);
3453
3454  add_basic_prefix_cmd ("btrace", class_maintenance,
3455			_("Branch tracing maintenance commands."),
3456			&maint_btrace_cmdlist, 0, &maintenancelist);
3457
3458  add_setshow_prefix_cmd ("btrace", class_maintenance,
3459			  _("Set branch tracing specific variables."),
3460			  _("Show branch tracing specific variables."),
3461			  &maint_btrace_set_cmdlist,
3462			  &maint_btrace_show_cmdlist,
3463			  &maintenance_set_cmdlist,
3464			  &maintenance_show_cmdlist);
3465
3466  add_setshow_prefix_cmd ("pt", class_maintenance,
3467			  _("Set Intel Processor Trace specific variables."),
3468			  _("Show Intel Processor Trace specific variables."),
3469			  &maint_btrace_pt_set_cmdlist,
3470			  &maint_btrace_pt_show_cmdlist,
3471			  &maint_btrace_set_cmdlist,
3472			  &maint_btrace_show_cmdlist);
3473
3474  add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3475			   &maint_btrace_pt_skip_pad, _("\
3476Set whether PAD packets should be skipped in the btrace packet history."), _("\
3477Show whether PAD packets should be skipped in the btrace packet history."),_("\
3478When enabled, PAD packets are ignored in the btrace packet history."),
3479			   NULL, show_maint_btrace_pt_skip_pad,
3480			   &maint_btrace_pt_set_cmdlist,
3481			   &maint_btrace_pt_show_cmdlist);
3482
3483  add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3484	   _("Print the raw branch tracing data.\n\
3485With no argument, print ten more packets after the previous ten-line print.\n\
3486With '-' as argument print ten packets before a previous ten-line print.\n\
3487One argument specifies the starting packet of a ten-line print.\n\
3488Two arguments with comma between specify starting and ending packets to \
3489print.\n\
3490Preceded with '+'/'-' the second argument specifies the distance from the \
3491first."),
3492	   &maint_btrace_cmdlist);
3493
3494  add_cmd ("clear-packet-history", class_maintenance,
3495	   maint_btrace_clear_packet_history_cmd,
3496	   _("Clears the branch tracing packet history.\n\
3497Discards the raw branch tracing data but not the execution history data."),
3498	   &maint_btrace_cmdlist);
3499
3500  add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3501	   _("Clears the branch tracing data.\n\
3502Discards the raw branch tracing data and the execution history data.\n\
3503The next 'record' command will fetch the branch tracing data anew."),
3504	   &maint_btrace_cmdlist);
3505
3506}
3507