1/* SPU target-dependent code for GDB, the GNU debugger.
2   Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3   Free Software Foundation, Inc.
4
5   Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
6   Based on a port by Sid Manning <sid@us.ibm.com>.
7
8   This file is part of GDB.
9
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 3 of the License, or
13   (at your option) any later version.
14
15   This program is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19
20   You should have received a copy of the GNU General Public License
21   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
22
23#include "defs.h"
24#include "arch-utils.h"
25#include "gdbtypes.h"
26#include "gdbcmd.h"
27#include "gdbcore.h"
28#include "gdb_string.h"
29#include "gdb_assert.h"
30#include "frame.h"
31#include "frame-unwind.h"
32#include "frame-base.h"
33#include "trad-frame.h"
34#include "symtab.h"
35#include "symfile.h"
36#include "value.h"
37#include "inferior.h"
38#include "dis-asm.h"
39#include "objfiles.h"
40#include "language.h"
41#include "regcache.h"
42#include "reggroups.h"
43#include "floatformat.h"
44#include "block.h"
45#include "observer.h"
46#include "infcall.h"
47#include "dwarf2.h"
48#include "exceptions.h"
49#include "spu-tdep.h"
50
51
52/* The list of available "set spu " and "show spu " commands.  */
53static struct cmd_list_element *setspucmdlist = NULL;
54static struct cmd_list_element *showspucmdlist = NULL;
55
56/* Whether to stop for new SPE contexts.  */
57static int spu_stop_on_load_p = 0;
58/* Whether to automatically flush the SW-managed cache.  */
59static int spu_auto_flush_cache_p = 1;
60
61
62/* The tdep structure.  */
63struct gdbarch_tdep
64{
65  /* The spufs ID identifying our address space.  */
66  int id;
67
68  /* SPU-specific vector type.  */
69  struct type *spu_builtin_type_vec128;
70};
71
72
73/* SPU-specific vector type.  */
74static struct type *
75spu_builtin_type_vec128 (struct gdbarch *gdbarch)
76{
77  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
78
79  if (!tdep->spu_builtin_type_vec128)
80    {
81      const struct builtin_type *bt = builtin_type (gdbarch);
82      struct type *t;
83
84      t = arch_composite_type (gdbarch,
85			       "__spu_builtin_type_vec128", TYPE_CODE_UNION);
86      append_composite_type_field (t, "uint128", bt->builtin_int128);
87      append_composite_type_field (t, "v2_int64",
88				   init_vector_type (bt->builtin_int64, 2));
89      append_composite_type_field (t, "v4_int32",
90				   init_vector_type (bt->builtin_int32, 4));
91      append_composite_type_field (t, "v8_int16",
92				   init_vector_type (bt->builtin_int16, 8));
93      append_composite_type_field (t, "v16_int8",
94				   init_vector_type (bt->builtin_int8, 16));
95      append_composite_type_field (t, "v2_double",
96				   init_vector_type (bt->builtin_double, 2));
97      append_composite_type_field (t, "v4_float",
98				   init_vector_type (bt->builtin_float, 4));
99
100      TYPE_VECTOR (t) = 1;
101      TYPE_NAME (t) = "spu_builtin_type_vec128";
102
103      tdep->spu_builtin_type_vec128 = t;
104    }
105
106  return tdep->spu_builtin_type_vec128;
107}
108
109
110/* The list of available "info spu " commands.  */
111static struct cmd_list_element *infospucmdlist = NULL;
112
113/* Registers.  */
114
115static const char *
116spu_register_name (struct gdbarch *gdbarch, int reg_nr)
117{
118  static char *register_names[] =
119    {
120      "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
121      "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
122      "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
123      "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
124      "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
125      "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
126      "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
127      "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
128      "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
129      "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
130      "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
131      "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
132      "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
133      "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
134      "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
135      "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
136      "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
137    };
138
139  if (reg_nr < 0)
140    return NULL;
141  if (reg_nr >= sizeof register_names / sizeof *register_names)
142    return NULL;
143
144  return register_names[reg_nr];
145}
146
147static struct type *
148spu_register_type (struct gdbarch *gdbarch, int reg_nr)
149{
150  if (reg_nr < SPU_NUM_GPRS)
151    return spu_builtin_type_vec128 (gdbarch);
152
153  switch (reg_nr)
154    {
155    case SPU_ID_REGNUM:
156      return builtin_type (gdbarch)->builtin_uint32;
157
158    case SPU_PC_REGNUM:
159      return builtin_type (gdbarch)->builtin_func_ptr;
160
161    case SPU_SP_REGNUM:
162      return builtin_type (gdbarch)->builtin_data_ptr;
163
164    case SPU_FPSCR_REGNUM:
165      return builtin_type (gdbarch)->builtin_uint128;
166
167    case SPU_SRR0_REGNUM:
168      return builtin_type (gdbarch)->builtin_uint32;
169
170    case SPU_LSLR_REGNUM:
171      return builtin_type (gdbarch)->builtin_uint32;
172
173    case SPU_DECR_REGNUM:
174      return builtin_type (gdbarch)->builtin_uint32;
175
176    case SPU_DECR_STATUS_REGNUM:
177      return builtin_type (gdbarch)->builtin_uint32;
178
179    default:
180      internal_error (__FILE__, __LINE__, _("invalid regnum"));
181    }
182}
183
184/* Pseudo registers for preferred slots - stack pointer.  */
185
186static enum register_status
187spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
188			      gdb_byte *buf)
189{
190  struct gdbarch *gdbarch = get_regcache_arch (regcache);
191  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
192  enum register_status status;
193  gdb_byte reg[32];
194  char annex[32];
195  ULONGEST id;
196
197  status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
198  if (status != REG_VALID)
199    return status;
200  xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
201  memset (reg, 0, sizeof reg);
202  target_read (&current_target, TARGET_OBJECT_SPU, annex,
203	       reg, 0, sizeof reg);
204
205  store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
206  return REG_VALID;
207}
208
209static enum register_status
210spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
211                          int regnum, gdb_byte *buf)
212{
213  gdb_byte reg[16];
214  char annex[32];
215  ULONGEST id;
216  enum register_status status;
217
218  switch (regnum)
219    {
220    case SPU_SP_REGNUM:
221      status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
222      if (status != REG_VALID)
223	return status;
224      memcpy (buf, reg, 4);
225      return status;
226
227    case SPU_FPSCR_REGNUM:
228      status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
229      if (status != REG_VALID)
230	return status;
231      xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
232      target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
233      return status;
234
235    case SPU_SRR0_REGNUM:
236      return spu_pseudo_register_read_spu (regcache, "srr0", buf);
237
238    case SPU_LSLR_REGNUM:
239      return spu_pseudo_register_read_spu (regcache, "lslr", buf);
240
241    case SPU_DECR_REGNUM:
242      return spu_pseudo_register_read_spu (regcache, "decr", buf);
243
244    case SPU_DECR_STATUS_REGNUM:
245      return spu_pseudo_register_read_spu (regcache, "decr_status", buf);
246
247    default:
248      internal_error (__FILE__, __LINE__, _("invalid regnum"));
249    }
250}
251
252static void
253spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
254			       const gdb_byte *buf)
255{
256  struct gdbarch *gdbarch = get_regcache_arch (regcache);
257  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
258  gdb_byte reg[32];
259  char annex[32];
260  ULONGEST id;
261
262  regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
263  xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
264  xsnprintf (reg, sizeof reg, "0x%s",
265	     phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
266  target_write (&current_target, TARGET_OBJECT_SPU, annex,
267		reg, 0, strlen (reg));
268}
269
270static void
271spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
272                           int regnum, const gdb_byte *buf)
273{
274  gdb_byte reg[16];
275  char annex[32];
276  ULONGEST id;
277
278  switch (regnum)
279    {
280    case SPU_SP_REGNUM:
281      regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
282      memcpy (reg, buf, 4);
283      regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
284      break;
285
286    case SPU_FPSCR_REGNUM:
287      regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
288      xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
289      target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
290      break;
291
292    case SPU_SRR0_REGNUM:
293      spu_pseudo_register_write_spu (regcache, "srr0", buf);
294      break;
295
296    case SPU_LSLR_REGNUM:
297      spu_pseudo_register_write_spu (regcache, "lslr", buf);
298      break;
299
300    case SPU_DECR_REGNUM:
301      spu_pseudo_register_write_spu (regcache, "decr", buf);
302      break;
303
304    case SPU_DECR_STATUS_REGNUM:
305      spu_pseudo_register_write_spu (regcache, "decr_status", buf);
306      break;
307
308    default:
309      internal_error (__FILE__, __LINE__, _("invalid regnum"));
310    }
311}
312
313/* Value conversion -- access scalar values at the preferred slot.  */
314
315static struct value *
316spu_value_from_register (struct type *type, int regnum,
317			 struct frame_info *frame)
318{
319  struct value *value = default_value_from_register (type, regnum, frame);
320  int len = TYPE_LENGTH (type);
321
322  if (regnum < SPU_NUM_GPRS && len < 16)
323    {
324      int preferred_slot = len < 4 ? 4 - len : 0;
325      set_value_offset (value, preferred_slot);
326    }
327
328  return value;
329}
330
331/* Register groups.  */
332
333static int
334spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
335			 struct reggroup *group)
336{
337  /* Registers displayed via 'info regs'.  */
338  if (group == general_reggroup)
339    return 1;
340
341  /* Registers displayed via 'info float'.  */
342  if (group == float_reggroup)
343    return 0;
344
345  /* Registers that need to be saved/restored in order to
346     push or pop frames.  */
347  if (group == save_reggroup || group == restore_reggroup)
348    return 1;
349
350  return default_register_reggroup_p (gdbarch, regnum, group);
351}
352
353
354/* Address handling.  */
355
356static int
357spu_gdbarch_id (struct gdbarch *gdbarch)
358{
359  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
360  int id = tdep->id;
361
362  /* The objfile architecture of a standalone SPU executable does not
363     provide an SPU ID.  Retrieve it from the objfile's relocated
364     address range in this special case.  */
365  if (id == -1
366      && symfile_objfile && symfile_objfile->obfd
367      && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
368      && symfile_objfile->sections != symfile_objfile->sections_end)
369    id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
370
371  return id;
372}
373
374static int
375spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
376{
377  if (dwarf2_addr_class == 1)
378    return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
379  else
380    return 0;
381}
382
383static const char *
384spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
385{
386  if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
387    return "__ea";
388  else
389    return NULL;
390}
391
392static int
393spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
394				      const char *name, int *type_flags_ptr)
395{
396  if (strcmp (name, "__ea") == 0)
397    {
398      *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
399      return 1;
400    }
401  else
402   return 0;
403}
404
405static void
406spu_address_to_pointer (struct gdbarch *gdbarch,
407			struct type *type, gdb_byte *buf, CORE_ADDR addr)
408{
409  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
410  store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
411			  SPUADDR_ADDR (addr));
412}
413
414static CORE_ADDR
415spu_pointer_to_address (struct gdbarch *gdbarch,
416			struct type *type, const gdb_byte *buf)
417{
418  int id = spu_gdbarch_id (gdbarch);
419  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
420  ULONGEST addr
421    = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
422
423  /* Do not convert __ea pointers.  */
424  if (TYPE_ADDRESS_CLASS_1 (type))
425    return addr;
426
427  return addr? SPUADDR (id, addr) : 0;
428}
429
430static CORE_ADDR
431spu_integer_to_address (struct gdbarch *gdbarch,
432			struct type *type, const gdb_byte *buf)
433{
434  int id = spu_gdbarch_id (gdbarch);
435  ULONGEST addr = unpack_long (type, buf);
436
437  return SPUADDR (id, addr);
438}
439
440
441/* Decoding SPU instructions.  */
442
443enum
444  {
445    op_lqd   = 0x34,
446    op_lqx   = 0x3c4,
447    op_lqa   = 0x61,
448    op_lqr   = 0x67,
449    op_stqd  = 0x24,
450    op_stqx  = 0x144,
451    op_stqa  = 0x41,
452    op_stqr  = 0x47,
453
454    op_il    = 0x081,
455    op_ila   = 0x21,
456    op_a     = 0x0c0,
457    op_ai    = 0x1c,
458
459    op_selb  = 0x8,
460
461    op_br    = 0x64,
462    op_bra   = 0x60,
463    op_brsl  = 0x66,
464    op_brasl = 0x62,
465    op_brnz  = 0x42,
466    op_brz   = 0x40,
467    op_brhnz = 0x46,
468    op_brhz  = 0x44,
469    op_bi    = 0x1a8,
470    op_bisl  = 0x1a9,
471    op_biz   = 0x128,
472    op_binz  = 0x129,
473    op_bihz  = 0x12a,
474    op_bihnz = 0x12b,
475  };
476
477static int
478is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
479{
480  if ((insn >> 21) == op)
481    {
482      *rt = insn & 127;
483      *ra = (insn >> 7) & 127;
484      *rb = (insn >> 14) & 127;
485      return 1;
486    }
487
488  return 0;
489}
490
491static int
492is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
493{
494  if ((insn >> 28) == op)
495    {
496      *rt = (insn >> 21) & 127;
497      *ra = (insn >> 7) & 127;
498      *rb = (insn >> 14) & 127;
499      *rc = insn & 127;
500      return 1;
501    }
502
503  return 0;
504}
505
506static int
507is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
508{
509  if ((insn >> 21) == op)
510    {
511      *rt = insn & 127;
512      *ra = (insn >> 7) & 127;
513      *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
514      return 1;
515    }
516
517  return 0;
518}
519
520static int
521is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
522{
523  if ((insn >> 24) == op)
524    {
525      *rt = insn & 127;
526      *ra = (insn >> 7) & 127;
527      *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
528      return 1;
529    }
530
531  return 0;
532}
533
534static int
535is_ri16 (unsigned int insn, int op, int *rt, int *i16)
536{
537  if ((insn >> 23) == op)
538    {
539      *rt = insn & 127;
540      *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
541      return 1;
542    }
543
544  return 0;
545}
546
547static int
548is_ri18 (unsigned int insn, int op, int *rt, int *i18)
549{
550  if ((insn >> 25) == op)
551    {
552      *rt = insn & 127;
553      *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
554      return 1;
555    }
556
557  return 0;
558}
559
560static int
561is_branch (unsigned int insn, int *offset, int *reg)
562{
563  int rt, i7, i16;
564
565  if (is_ri16 (insn, op_br, &rt, &i16)
566      || is_ri16 (insn, op_brsl, &rt, &i16)
567      || is_ri16 (insn, op_brnz, &rt, &i16)
568      || is_ri16 (insn, op_brz, &rt, &i16)
569      || is_ri16 (insn, op_brhnz, &rt, &i16)
570      || is_ri16 (insn, op_brhz, &rt, &i16))
571    {
572      *reg = SPU_PC_REGNUM;
573      *offset = i16 << 2;
574      return 1;
575    }
576
577  if (is_ri16 (insn, op_bra, &rt, &i16)
578      || is_ri16 (insn, op_brasl, &rt, &i16))
579    {
580      *reg = -1;
581      *offset = i16 << 2;
582      return 1;
583    }
584
585  if (is_ri7 (insn, op_bi, &rt, reg, &i7)
586      || is_ri7 (insn, op_bisl, &rt, reg, &i7)
587      || is_ri7 (insn, op_biz, &rt, reg, &i7)
588      || is_ri7 (insn, op_binz, &rt, reg, &i7)
589      || is_ri7 (insn, op_bihz, &rt, reg, &i7)
590      || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
591    {
592      *offset = 0;
593      return 1;
594    }
595
596  return 0;
597}
598
599
600/* Prolog parsing.  */
601
602struct spu_prologue_data
603  {
604    /* Stack frame size.  -1 if analysis was unsuccessful.  */
605    int size;
606
607    /* How to find the CFA.  The CFA is equal to SP at function entry.  */
608    int cfa_reg;
609    int cfa_offset;
610
611    /* Offset relative to CFA where a register is saved.  -1 if invalid.  */
612    int reg_offset[SPU_NUM_GPRS];
613  };
614
615static CORE_ADDR
616spu_analyze_prologue (struct gdbarch *gdbarch,
617		      CORE_ADDR start_pc, CORE_ADDR end_pc,
618                      struct spu_prologue_data *data)
619{
620  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
621  int found_sp = 0;
622  int found_fp = 0;
623  int found_lr = 0;
624  int found_bc = 0;
625  int reg_immed[SPU_NUM_GPRS];
626  gdb_byte buf[16];
627  CORE_ADDR prolog_pc = start_pc;
628  CORE_ADDR pc;
629  int i;
630
631
632  /* Initialize DATA to default values.  */
633  data->size = -1;
634
635  data->cfa_reg = SPU_RAW_SP_REGNUM;
636  data->cfa_offset = 0;
637
638  for (i = 0; i < SPU_NUM_GPRS; i++)
639    data->reg_offset[i] = -1;
640
641  /* Set up REG_IMMED array.  This is non-zero for a register if we know its
642     preferred slot currently holds this immediate value.  */
643  for (i = 0; i < SPU_NUM_GPRS; i++)
644      reg_immed[i] = 0;
645
646  /* Scan instructions until the first branch.
647
648     The following instructions are important prolog components:
649
650	- The first instruction to set up the stack pointer.
651	- The first instruction to set up the frame pointer.
652	- The first instruction to save the link register.
653	- The first instruction to save the backchain.
654
655     We return the instruction after the latest of these four,
656     or the incoming PC if none is found.  The first instruction
657     to set up the stack pointer also defines the frame size.
658
659     Note that instructions saving incoming arguments to their stack
660     slots are not counted as important, because they are hard to
661     identify with certainty.  This should not matter much, because
662     arguments are relevant only in code compiled with debug data,
663     and in such code the GDB core will advance until the first source
664     line anyway, using SAL data.
665
666     For purposes of stack unwinding, we analyze the following types
667     of instructions in addition:
668
669      - Any instruction adding to the current frame pointer.
670      - Any instruction loading an immediate constant into a register.
671      - Any instruction storing a register onto the stack.
672
673     These are used to compute the CFA and REG_OFFSET output.  */
674
675  for (pc = start_pc; pc < end_pc; pc += 4)
676    {
677      unsigned int insn;
678      int rt, ra, rb, rc, immed;
679
680      if (target_read_memory (pc, buf, 4))
681	break;
682      insn = extract_unsigned_integer (buf, 4, byte_order);
683
684      /* AI is the typical instruction to set up a stack frame.
685         It is also used to initialize the frame pointer.  */
686      if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
687	{
688	  if (rt == data->cfa_reg && ra == data->cfa_reg)
689	    data->cfa_offset -= immed;
690
691	  if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
692	      && !found_sp)
693	    {
694	      found_sp = 1;
695	      prolog_pc = pc + 4;
696
697	      data->size = -immed;
698	    }
699	  else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
700		   && !found_fp)
701	    {
702	      found_fp = 1;
703	      prolog_pc = pc + 4;
704
705	      data->cfa_reg = SPU_FP_REGNUM;
706	      data->cfa_offset -= immed;
707	    }
708	}
709
710      /* A is used to set up stack frames of size >= 512 bytes.
711         If we have tracked the contents of the addend register,
712         we can handle this as well.  */
713      else if (is_rr (insn, op_a, &rt, &ra, &rb))
714	{
715	  if (rt == data->cfa_reg && ra == data->cfa_reg)
716	    {
717	      if (reg_immed[rb] != 0)
718		data->cfa_offset -= reg_immed[rb];
719	      else
720		data->cfa_reg = -1;  /* We don't know the CFA any more.  */
721	    }
722
723	  if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
724	      && !found_sp)
725	    {
726	      found_sp = 1;
727	      prolog_pc = pc + 4;
728
729	      if (reg_immed[rb] != 0)
730		data->size = -reg_immed[rb];
731	    }
732	}
733
734      /* We need to track IL and ILA used to load immediate constants
735         in case they are later used as input to an A instruction.  */
736      else if (is_ri16 (insn, op_il, &rt, &immed))
737	{
738	  reg_immed[rt] = immed;
739
740	  if (rt == SPU_RAW_SP_REGNUM && !found_sp)
741	    found_sp = 1;
742	}
743
744      else if (is_ri18 (insn, op_ila, &rt, &immed))
745	{
746	  reg_immed[rt] = immed & 0x3ffff;
747
748	  if (rt == SPU_RAW_SP_REGNUM && !found_sp)
749	    found_sp = 1;
750	}
751
752      /* STQD is used to save registers to the stack.  */
753      else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
754	{
755	  if (ra == data->cfa_reg)
756	    data->reg_offset[rt] = data->cfa_offset - (immed << 4);
757
758	  if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
759              && !found_lr)
760	    {
761	      found_lr = 1;
762	      prolog_pc = pc + 4;
763	    }
764
765	  if (ra == SPU_RAW_SP_REGNUM
766	      && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
767	      && !found_bc)
768	    {
769	      found_bc = 1;
770	      prolog_pc = pc + 4;
771	    }
772	}
773
774      /* _start uses SELB to set up the stack pointer.  */
775      else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
776	{
777	  if (rt == SPU_RAW_SP_REGNUM && !found_sp)
778	    found_sp = 1;
779	}
780
781      /* We terminate if we find a branch.  */
782      else if (is_branch (insn, &immed, &ra))
783	break;
784    }
785
786
787  /* If we successfully parsed until here, and didn't find any instruction
788     modifying SP, we assume we have a frameless function.  */
789  if (!found_sp)
790    data->size = 0;
791
792  /* Return cooked instead of raw SP.  */
793  if (data->cfa_reg == SPU_RAW_SP_REGNUM)
794    data->cfa_reg = SPU_SP_REGNUM;
795
796  return prolog_pc;
797}
798
799/* Return the first instruction after the prologue starting at PC.  */
800static CORE_ADDR
801spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
802{
803  struct spu_prologue_data data;
804  return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
805}
806
807/* Return the frame pointer in use at address PC.  */
808static void
809spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
810			   int *reg, LONGEST *offset)
811{
812  struct spu_prologue_data data;
813  spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
814
815  if (data.size != -1 && data.cfa_reg != -1)
816    {
817      /* The 'frame pointer' address is CFA minus frame size.  */
818      *reg = data.cfa_reg;
819      *offset = data.cfa_offset - data.size;
820    }
821  else
822    {
823      /* ??? We don't really know ...  */
824      *reg = SPU_SP_REGNUM;
825      *offset = 0;
826    }
827}
828
829/* Return true if we are in the function's epilogue, i.e. after the
830   instruction that destroyed the function's stack frame.
831
832   1) scan forward from the point of execution:
833       a) If you find an instruction that modifies the stack pointer
834          or transfers control (except a return), execution is not in
835          an epilogue, return.
836       b) Stop scanning if you find a return instruction or reach the
837          end of the function or reach the hard limit for the size of
838          an epilogue.
839   2) scan backward from the point of execution:
840        a) If you find an instruction that modifies the stack pointer,
841            execution *is* in an epilogue, return.
842        b) Stop scanning if you reach an instruction that transfers
843           control or the beginning of the function or reach the hard
844           limit for the size of an epilogue.  */
845
846static int
847spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
848{
849  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
850  CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
851  bfd_byte buf[4];
852  unsigned int insn;
853  int rt, ra, rb, rc, immed;
854
855  /* Find the search limits based on function boundaries and hard limit.
856     We assume the epilogue can be up to 64 instructions long.  */
857
858  const int spu_max_epilogue_size = 64 * 4;
859
860  if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
861    return 0;
862
863  if (pc - func_start < spu_max_epilogue_size)
864    epilogue_start = func_start;
865  else
866    epilogue_start = pc - spu_max_epilogue_size;
867
868  if (func_end - pc < spu_max_epilogue_size)
869    epilogue_end = func_end;
870  else
871    epilogue_end = pc + spu_max_epilogue_size;
872
873  /* Scan forward until next 'bi $0'.  */
874
875  for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
876    {
877      if (target_read_memory (scan_pc, buf, 4))
878	return 0;
879      insn = extract_unsigned_integer (buf, 4, byte_order);
880
881      if (is_branch (insn, &immed, &ra))
882	{
883	  if (immed == 0 && ra == SPU_LR_REGNUM)
884	    break;
885
886	  return 0;
887	}
888
889      if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
890	  || is_rr (insn, op_a, &rt, &ra, &rb)
891	  || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
892	{
893	  if (rt == SPU_RAW_SP_REGNUM)
894	    return 0;
895	}
896    }
897
898  if (scan_pc >= epilogue_end)
899    return 0;
900
901  /* Scan backward until adjustment to stack pointer (R1).  */
902
903  for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
904    {
905      if (target_read_memory (scan_pc, buf, 4))
906	return 0;
907      insn = extract_unsigned_integer (buf, 4, byte_order);
908
909      if (is_branch (insn, &immed, &ra))
910	return 0;
911
912      if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
913	  || is_rr (insn, op_a, &rt, &ra, &rb)
914	  || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
915	{
916	  if (rt == SPU_RAW_SP_REGNUM)
917	    return 1;
918	}
919    }
920
921  return 0;
922}
923
924
925/* Normal stack frames.  */
926
927struct spu_unwind_cache
928{
929  CORE_ADDR func;
930  CORE_ADDR frame_base;
931  CORE_ADDR local_base;
932
933  struct trad_frame_saved_reg *saved_regs;
934};
935
936static struct spu_unwind_cache *
937spu_frame_unwind_cache (struct frame_info *this_frame,
938			void **this_prologue_cache)
939{
940  struct gdbarch *gdbarch = get_frame_arch (this_frame);
941  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
942  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
943  struct spu_unwind_cache *info;
944  struct spu_prologue_data data;
945  CORE_ADDR id = tdep->id;
946  gdb_byte buf[16];
947
948  if (*this_prologue_cache)
949    return *this_prologue_cache;
950
951  info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
952  *this_prologue_cache = info;
953  info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
954  info->frame_base = 0;
955  info->local_base = 0;
956
957  /* Find the start of the current function, and analyze its prologue.  */
958  info->func = get_frame_func (this_frame);
959  if (info->func == 0)
960    {
961      /* Fall back to using the current PC as frame ID.  */
962      info->func = get_frame_pc (this_frame);
963      data.size = -1;
964    }
965  else
966    spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
967			  &data);
968
969  /* If successful, use prologue analysis data.  */
970  if (data.size != -1 && data.cfa_reg != -1)
971    {
972      CORE_ADDR cfa;
973      int i;
974
975      /* Determine CFA via unwound CFA_REG plus CFA_OFFSET.  */
976      get_frame_register (this_frame, data.cfa_reg, buf);
977      cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
978      cfa = SPUADDR (id, cfa);
979
980      /* Call-saved register slots.  */
981      for (i = 0; i < SPU_NUM_GPRS; i++)
982	if (i == SPU_LR_REGNUM
983	    || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
984	  if (data.reg_offset[i] != -1)
985	    info->saved_regs[i].addr = cfa - data.reg_offset[i];
986
987      /* Frame bases.  */
988      info->frame_base = cfa;
989      info->local_base = cfa - data.size;
990    }
991
992  /* Otherwise, fall back to reading the backchain link.  */
993  else
994    {
995      CORE_ADDR reg;
996      LONGEST backchain;
997      ULONGEST lslr;
998      int status;
999
1000      /* Get local store limit.  */
1001      lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
1002      if (!lslr)
1003	lslr = (ULONGEST) -1;
1004
1005      /* Get the backchain.  */
1006      reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1007      status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1008					 &backchain);
1009
1010      /* A zero backchain terminates the frame chain.  Also, sanity
1011         check against the local store size limit.  */
1012      if (status && backchain > 0 && backchain <= lslr)
1013	{
1014	  /* Assume the link register is saved into its slot.  */
1015	  if (backchain + 16 <= lslr)
1016	    info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id,
1017							    backchain + 16);
1018
1019          /* Frame bases.  */
1020	  info->frame_base = SPUADDR (id, backchain);
1021	  info->local_base = SPUADDR (id, reg);
1022	}
1023    }
1024
1025  /* If we didn't find a frame, we cannot determine SP / return address.  */
1026  if (info->frame_base == 0)
1027    return info;
1028
1029  /* The previous SP is equal to the CFA.  */
1030  trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1031			SPUADDR_ADDR (info->frame_base));
1032
1033  /* Read full contents of the unwound link register in order to
1034     be able to determine the return address.  */
1035  if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1036    target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1037  else
1038    get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1039
1040  /* Normally, the return address is contained in the slot 0 of the
1041     link register, and slots 1-3 are zero.  For an overlay return,
1042     slot 0 contains the address of the overlay manager return stub,
1043     slot 1 contains the partition number of the overlay section to
1044     be returned to, and slot 2 contains the return address within
1045     that section.  Return the latter address in that case.  */
1046  if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1047    trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1048			  extract_unsigned_integer (buf + 8, 4, byte_order));
1049  else
1050    trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1051			  extract_unsigned_integer (buf, 4, byte_order));
1052
1053  return info;
1054}
1055
1056static void
1057spu_frame_this_id (struct frame_info *this_frame,
1058		   void **this_prologue_cache, struct frame_id *this_id)
1059{
1060  struct spu_unwind_cache *info =
1061    spu_frame_unwind_cache (this_frame, this_prologue_cache);
1062
1063  if (info->frame_base == 0)
1064    return;
1065
1066  *this_id = frame_id_build (info->frame_base, info->func);
1067}
1068
1069static struct value *
1070spu_frame_prev_register (struct frame_info *this_frame,
1071			 void **this_prologue_cache, int regnum)
1072{
1073  struct spu_unwind_cache *info
1074    = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1075
1076  /* Special-case the stack pointer.  */
1077  if (regnum == SPU_RAW_SP_REGNUM)
1078    regnum = SPU_SP_REGNUM;
1079
1080  return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1081}
1082
1083static const struct frame_unwind spu_frame_unwind = {
1084  NORMAL_FRAME,
1085  default_frame_unwind_stop_reason,
1086  spu_frame_this_id,
1087  spu_frame_prev_register,
1088  NULL,
1089  default_frame_sniffer
1090};
1091
1092static CORE_ADDR
1093spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1094{
1095  struct spu_unwind_cache *info
1096    = spu_frame_unwind_cache (this_frame, this_cache);
1097  return info->local_base;
1098}
1099
1100static const struct frame_base spu_frame_base = {
1101  &spu_frame_unwind,
1102  spu_frame_base_address,
1103  spu_frame_base_address,
1104  spu_frame_base_address
1105};
1106
1107static CORE_ADDR
1108spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1109{
1110  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1111  CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1112  /* Mask off interrupt enable bit.  */
1113  return SPUADDR (tdep->id, pc & -4);
1114}
1115
1116static CORE_ADDR
1117spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1118{
1119  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1120  CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1121  return SPUADDR (tdep->id, sp);
1122}
1123
1124static CORE_ADDR
1125spu_read_pc (struct regcache *regcache)
1126{
1127  struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1128  ULONGEST pc;
1129  regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1130  /* Mask off interrupt enable bit.  */
1131  return SPUADDR (tdep->id, pc & -4);
1132}
1133
1134static void
1135spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1136{
1137  /* Keep interrupt enabled state unchanged.  */
1138  ULONGEST old_pc;
1139  regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1140  regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1141				  (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1142}
1143
1144
1145/* Cell/B.E. cross-architecture unwinder support.  */
1146
1147struct spu2ppu_cache
1148{
1149  struct frame_id frame_id;
1150  struct regcache *regcache;
1151};
1152
1153static struct gdbarch *
1154spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1155{
1156  struct spu2ppu_cache *cache = *this_cache;
1157  return get_regcache_arch (cache->regcache);
1158}
1159
1160static void
1161spu2ppu_this_id (struct frame_info *this_frame,
1162		 void **this_cache, struct frame_id *this_id)
1163{
1164  struct spu2ppu_cache *cache = *this_cache;
1165  *this_id = cache->frame_id;
1166}
1167
1168static struct value *
1169spu2ppu_prev_register (struct frame_info *this_frame,
1170		       void **this_cache, int regnum)
1171{
1172  struct spu2ppu_cache *cache = *this_cache;
1173  struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1174  gdb_byte *buf;
1175
1176  buf = alloca (register_size (gdbarch, regnum));
1177  regcache_cooked_read (cache->regcache, regnum, buf);
1178  return frame_unwind_got_bytes (this_frame, regnum, buf);
1179}
1180
1181static int
1182spu2ppu_sniffer (const struct frame_unwind *self,
1183		 struct frame_info *this_frame, void **this_prologue_cache)
1184{
1185  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1186  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1187  CORE_ADDR base, func, backchain;
1188  gdb_byte buf[4];
1189
1190  if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
1191    return 0;
1192
1193  base = get_frame_sp (this_frame);
1194  func = get_frame_pc (this_frame);
1195  if (target_read_memory (base, buf, 4))
1196    return 0;
1197  backchain = extract_unsigned_integer (buf, 4, byte_order);
1198
1199  if (!backchain)
1200    {
1201      struct frame_info *fi;
1202
1203      struct spu2ppu_cache *cache
1204	= FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1205
1206      cache->frame_id = frame_id_build (base + 16, func);
1207
1208      for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1209	if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1210	  break;
1211
1212      if (fi)
1213	{
1214	  cache->regcache = frame_save_as_regcache (fi);
1215	  *this_prologue_cache = cache;
1216	  return 1;
1217	}
1218      else
1219	{
1220	  struct regcache *regcache;
1221	  regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1222	  cache->regcache = regcache_dup (regcache);
1223	  *this_prologue_cache = cache;
1224	  return 1;
1225	}
1226    }
1227
1228  return 0;
1229}
1230
1231static void
1232spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1233{
1234  struct spu2ppu_cache *cache = this_cache;
1235  regcache_xfree (cache->regcache);
1236}
1237
1238static const struct frame_unwind spu2ppu_unwind = {
1239  ARCH_FRAME,
1240  default_frame_unwind_stop_reason,
1241  spu2ppu_this_id,
1242  spu2ppu_prev_register,
1243  NULL,
1244  spu2ppu_sniffer,
1245  spu2ppu_dealloc_cache,
1246  spu2ppu_prev_arch,
1247};
1248
1249
1250/* Function calling convention.  */
1251
1252static CORE_ADDR
1253spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1254{
1255  return sp & ~15;
1256}
1257
1258static CORE_ADDR
1259spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1260		     struct value **args, int nargs, struct type *value_type,
1261		     CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1262		     struct regcache *regcache)
1263{
1264  /* Allocate space sufficient for a breakpoint, keeping the stack aligned.  */
1265  sp = (sp - 4) & ~15;
1266  /* Store the address of that breakpoint */
1267  *bp_addr = sp;
1268  /* The call starts at the callee's entry point.  */
1269  *real_pc = funaddr;
1270
1271  return sp;
1272}
1273
1274static int
1275spu_scalar_value_p (struct type *type)
1276{
1277  switch (TYPE_CODE (type))
1278    {
1279    case TYPE_CODE_INT:
1280    case TYPE_CODE_ENUM:
1281    case TYPE_CODE_RANGE:
1282    case TYPE_CODE_CHAR:
1283    case TYPE_CODE_BOOL:
1284    case TYPE_CODE_PTR:
1285    case TYPE_CODE_REF:
1286      return TYPE_LENGTH (type) <= 16;
1287
1288    default:
1289      return 0;
1290    }
1291}
1292
1293static void
1294spu_value_to_regcache (struct regcache *regcache, int regnum,
1295		       struct type *type, const gdb_byte *in)
1296{
1297  int len = TYPE_LENGTH (type);
1298
1299  if (spu_scalar_value_p (type))
1300    {
1301      int preferred_slot = len < 4 ? 4 - len : 0;
1302      regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1303    }
1304  else
1305    {
1306      while (len >= 16)
1307	{
1308	  regcache_cooked_write (regcache, regnum++, in);
1309	  in += 16;
1310	  len -= 16;
1311	}
1312
1313      if (len > 0)
1314	regcache_cooked_write_part (regcache, regnum, 0, len, in);
1315    }
1316}
1317
1318static void
1319spu_regcache_to_value (struct regcache *regcache, int regnum,
1320		       struct type *type, gdb_byte *out)
1321{
1322  int len = TYPE_LENGTH (type);
1323
1324  if (spu_scalar_value_p (type))
1325    {
1326      int preferred_slot = len < 4 ? 4 - len : 0;
1327      regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1328    }
1329  else
1330    {
1331      while (len >= 16)
1332	{
1333	  regcache_cooked_read (regcache, regnum++, out);
1334	  out += 16;
1335	  len -= 16;
1336	}
1337
1338      if (len > 0)
1339	regcache_cooked_read_part (regcache, regnum, 0, len, out);
1340    }
1341}
1342
1343static CORE_ADDR
1344spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1345		     struct regcache *regcache, CORE_ADDR bp_addr,
1346		     int nargs, struct value **args, CORE_ADDR sp,
1347		     int struct_return, CORE_ADDR struct_addr)
1348{
1349  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1350  CORE_ADDR sp_delta;
1351  int i;
1352  int regnum = SPU_ARG1_REGNUM;
1353  int stack_arg = -1;
1354  gdb_byte buf[16];
1355
1356  /* Set the return address.  */
1357  memset (buf, 0, sizeof buf);
1358  store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1359  regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1360
1361  /* If STRUCT_RETURN is true, then the struct return address (in
1362     STRUCT_ADDR) will consume the first argument-passing register.
1363     Both adjust the register count and store that value.  */
1364  if (struct_return)
1365    {
1366      memset (buf, 0, sizeof buf);
1367      store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1368      regcache_cooked_write (regcache, regnum++, buf);
1369    }
1370
1371  /* Fill in argument registers.  */
1372  for (i = 0; i < nargs; i++)
1373    {
1374      struct value *arg = args[i];
1375      struct type *type = check_typedef (value_type (arg));
1376      const gdb_byte *contents = value_contents (arg);
1377      int len = TYPE_LENGTH (type);
1378      int n_regs = align_up (len, 16) / 16;
1379
1380      /* If the argument doesn't wholly fit into registers, it and
1381	 all subsequent arguments go to the stack.  */
1382      if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1383	{
1384	  stack_arg = i;
1385	  break;
1386	}
1387
1388      spu_value_to_regcache (regcache, regnum, type, contents);
1389      regnum += n_regs;
1390    }
1391
1392  /* Overflow arguments go to the stack.  */
1393  if (stack_arg != -1)
1394    {
1395      CORE_ADDR ap;
1396
1397      /* Allocate all required stack size.  */
1398      for (i = stack_arg; i < nargs; i++)
1399	{
1400	  struct type *type = check_typedef (value_type (args[i]));
1401	  sp -= align_up (TYPE_LENGTH (type), 16);
1402	}
1403
1404      /* Fill in stack arguments.  */
1405      ap = sp;
1406      for (i = stack_arg; i < nargs; i++)
1407	{
1408	  struct value *arg = args[i];
1409	  struct type *type = check_typedef (value_type (arg));
1410	  int len = TYPE_LENGTH (type);
1411	  int preferred_slot;
1412
1413	  if (spu_scalar_value_p (type))
1414	    preferred_slot = len < 4 ? 4 - len : 0;
1415	  else
1416	    preferred_slot = 0;
1417
1418	  target_write_memory (ap + preferred_slot, value_contents (arg), len);
1419	  ap += align_up (TYPE_LENGTH (type), 16);
1420	}
1421    }
1422
1423  /* Allocate stack frame header.  */
1424  sp -= 32;
1425
1426  /* Store stack back chain.  */
1427  regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1428  target_write_memory (sp, buf, 16);
1429
1430  /* Finally, update all slots of the SP register.  */
1431  sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1432  for (i = 0; i < 4; i++)
1433    {
1434      CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1435      store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1436    }
1437  regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1438
1439  return sp;
1440}
1441
1442static struct frame_id
1443spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1444{
1445  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1446  CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1447  CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1448  return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1449}
1450
1451/* Function return value access.  */
1452
1453static enum return_value_convention
1454spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1455		  struct type *type, struct regcache *regcache,
1456		  gdb_byte *out, const gdb_byte *in)
1457{
1458  enum return_value_convention rvc;
1459  int opencl_vector = 0;
1460
1461  if (func_type
1462      && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL
1463      && TYPE_CODE (type) == TYPE_CODE_ARRAY
1464      && TYPE_VECTOR (type))
1465    opencl_vector = 1;
1466
1467  if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1468    rvc = RETURN_VALUE_REGISTER_CONVENTION;
1469  else
1470    rvc = RETURN_VALUE_STRUCT_CONVENTION;
1471
1472  if (in)
1473    {
1474      switch (rvc)
1475	{
1476	case RETURN_VALUE_REGISTER_CONVENTION:
1477	  if (opencl_vector && TYPE_LENGTH (type) == 2)
1478	    regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in);
1479	  else
1480	    spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1481	  break;
1482
1483	case RETURN_VALUE_STRUCT_CONVENTION:
1484	  error (_("Cannot set function return value."));
1485	  break;
1486	}
1487    }
1488  else if (out)
1489    {
1490      switch (rvc)
1491	{
1492	case RETURN_VALUE_REGISTER_CONVENTION:
1493	  if (opencl_vector && TYPE_LENGTH (type) == 2)
1494	    regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out);
1495	  else
1496	    spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1497	  break;
1498
1499	case RETURN_VALUE_STRUCT_CONVENTION:
1500	  error (_("Function return value unknown."));
1501	  break;
1502	}
1503    }
1504
1505  return rvc;
1506}
1507
1508
1509/* Breakpoints.  */
1510
1511static const gdb_byte *
1512spu_breakpoint_from_pc (struct gdbarch *gdbarch,
1513			CORE_ADDR * pcptr, int *lenptr)
1514{
1515  static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1516
1517  *lenptr = sizeof breakpoint;
1518  return breakpoint;
1519}
1520
1521static int
1522spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1523			      struct bp_target_info *bp_tgt)
1524{
1525  /* We work around a problem in combined Cell/B.E. debugging here.  Consider
1526     that in a combined application, we have some breakpoints inserted in SPU
1527     code, and now the application forks (on the PPU side).  GDB common code
1528     will assume that the fork system call copied all breakpoints into the new
1529     process' address space, and that all those copies now need to be removed
1530     (see breakpoint.c:detach_breakpoints).
1531
1532     While this is certainly true for PPU side breakpoints, it is not true
1533     for SPU side breakpoints.  fork will clone the SPU context file
1534     descriptors, so that all the existing SPU contexts are in accessible
1535     in the new process.  However, the contents of the SPU contexts themselves
1536     are *not* cloned.  Therefore the effect of detach_breakpoints is to
1537     remove SPU breakpoints from the *original* SPU context's local store
1538     -- this is not the correct behaviour.
1539
1540     The workaround is to check whether the PID we are asked to remove this
1541     breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1542     PID of the current inferior (i.e. current_inferior ()->pid).  This is only
1543     true in the context of detach_breakpoints.  If so, we simply do nothing.
1544     [ Note that for the fork child process, it does not matter if breakpoints
1545     remain inserted, because those SPU contexts are not runnable anyway --
1546     the Linux kernel allows only the original process to invoke spu_run.  */
1547
1548  if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1549    return 0;
1550
1551  return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1552}
1553
1554
1555/* Software single-stepping support.  */
1556
1557static int
1558spu_software_single_step (struct frame_info *frame)
1559{
1560  struct gdbarch *gdbarch = get_frame_arch (frame);
1561  struct address_space *aspace = get_frame_address_space (frame);
1562  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1563  CORE_ADDR pc, next_pc;
1564  unsigned int insn;
1565  int offset, reg;
1566  gdb_byte buf[4];
1567  ULONGEST lslr;
1568
1569  pc = get_frame_pc (frame);
1570
1571  if (target_read_memory (pc, buf, 4))
1572    return 1;
1573  insn = extract_unsigned_integer (buf, 4, byte_order);
1574
1575  /* Get local store limit.  */
1576  lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1577  if (!lslr)
1578    lslr = (ULONGEST) -1;
1579
1580  /* Next sequential instruction is at PC + 4, except if the current
1581     instruction is a PPE-assisted call, in which case it is at PC + 8.
1582     Wrap around LS limit to be on the safe side.  */
1583  if ((insn & 0xffffff00) == 0x00002100)
1584    next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
1585  else
1586    next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
1587
1588  insert_single_step_breakpoint (gdbarch,
1589				 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1590
1591  if (is_branch (insn, &offset, &reg))
1592    {
1593      CORE_ADDR target = offset;
1594
1595      if (reg == SPU_PC_REGNUM)
1596	target += SPUADDR_ADDR (pc);
1597      else if (reg != -1)
1598	{
1599	  int optim, unavail;
1600
1601	  if (get_frame_register_bytes (frame, reg, 0, 4, buf,
1602					 &optim, &unavail))
1603	    target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1604	  else
1605	    {
1606	      if (optim)
1607		error (_("Could not determine address of "
1608			 "single-step breakpoint."));
1609	      if (unavail)
1610		throw_error (NOT_AVAILABLE_ERROR,
1611			     _("Could not determine address of "
1612			       "single-step breakpoint."));
1613	    }
1614	}
1615
1616      target = target & lslr;
1617      if (target != next_pc)
1618	insert_single_step_breakpoint (gdbarch, aspace,
1619				       SPUADDR (SPUADDR_SPU (pc), target));
1620    }
1621
1622  return 1;
1623}
1624
1625
1626/* Longjmp support.  */
1627
1628static int
1629spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1630{
1631  struct gdbarch *gdbarch = get_frame_arch (frame);
1632  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1633  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1634  gdb_byte buf[4];
1635  CORE_ADDR jb_addr;
1636  int optim, unavail;
1637
1638  /* Jump buffer is pointed to by the argument register $r3.  */
1639  if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf,
1640				 &optim, &unavail))
1641    return 0;
1642
1643  jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1644  if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1645    return 0;
1646
1647  *pc = extract_unsigned_integer (buf, 4, byte_order);
1648  *pc = SPUADDR (tdep->id, *pc);
1649  return 1;
1650}
1651
1652
1653/* Disassembler.  */
1654
1655struct spu_dis_asm_data
1656{
1657  struct gdbarch *gdbarch;
1658  int id;
1659};
1660
1661static void
1662spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1663{
1664  struct spu_dis_asm_data *data = info->application_data;
1665  print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1666}
1667
1668static int
1669gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1670{
1671  /* The opcodes disassembler does 18-bit address arithmetic.  Make
1672     sure the SPU ID encoded in the high bits is added back when we
1673     call print_address.  */
1674  struct disassemble_info spu_info = *info;
1675  struct spu_dis_asm_data data;
1676  data.gdbarch = info->application_data;
1677  data.id = SPUADDR_SPU (memaddr);
1678
1679  spu_info.application_data = &data;
1680  spu_info.print_address_func = spu_dis_asm_print_address;
1681  return print_insn_spu (memaddr, &spu_info);
1682}
1683
1684
1685/* Target overlays for the SPU overlay manager.
1686
1687   See the documentation of simple_overlay_update for how the
1688   interface is supposed to work.
1689
1690   Data structures used by the overlay manager:
1691
1692   struct ovly_table
1693     {
1694        u32 vma;
1695        u32 size;
1696        u32 pos;
1697        u32 buf;
1698     } _ovly_table[];   -- one entry per overlay section
1699
1700   struct ovly_buf_table
1701     {
1702        u32 mapped;
1703     } _ovly_buf_table[];  -- one entry per overlay buffer
1704
1705   _ovly_table should never change.
1706
1707   Both tables are aligned to a 16-byte boundary, the symbols
1708   _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1709   size set to the size of the respective array. buf in _ovly_table is
1710   an index into _ovly_buf_table.
1711
1712   mapped is an index into _ovly_table.  Both the mapped and buf indices start
1713   from one to reference the first entry in their respective tables.  */
1714
1715/* Using the per-objfile private data mechanism, we store for each
1716   objfile an array of "struct spu_overlay_table" structures, one
1717   for each obj_section of the objfile.  This structure holds two
1718   fields, MAPPED_PTR and MAPPED_VAL.  If MAPPED_PTR is zero, this
1719   is *not* an overlay section.  If it is non-zero, it represents
1720   a target address.  The overlay section is mapped iff the target
1721   integer at this location equals MAPPED_VAL.  */
1722
1723static const struct objfile_data *spu_overlay_data;
1724
1725struct spu_overlay_table
1726  {
1727    CORE_ADDR mapped_ptr;
1728    CORE_ADDR mapped_val;
1729  };
1730
1731/* Retrieve the overlay table for OBJFILE.  If not already cached, read
1732   the _ovly_table data structure from the target and initialize the
1733   spu_overlay_table data structure from it.  */
1734static struct spu_overlay_table *
1735spu_get_overlay_table (struct objfile *objfile)
1736{
1737  enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1738		   BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1739  struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1740  CORE_ADDR ovly_table_base, ovly_buf_table_base;
1741  unsigned ovly_table_size, ovly_buf_table_size;
1742  struct spu_overlay_table *tbl;
1743  struct obj_section *osect;
1744  char *ovly_table;
1745  int i;
1746
1747  tbl = objfile_data (objfile, spu_overlay_data);
1748  if (tbl)
1749    return tbl;
1750
1751  ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1752  if (!ovly_table_msym)
1753    return NULL;
1754
1755  ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
1756					       NULL, objfile);
1757  if (!ovly_buf_table_msym)
1758    return NULL;
1759
1760  ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1761  ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1762
1763  ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1764  ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1765
1766  ovly_table = xmalloc (ovly_table_size);
1767  read_memory (ovly_table_base, ovly_table, ovly_table_size);
1768
1769  tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1770			objfile->sections_end - objfile->sections,
1771			struct spu_overlay_table);
1772
1773  for (i = 0; i < ovly_table_size / 16; i++)
1774    {
1775      CORE_ADDR vma  = extract_unsigned_integer (ovly_table + 16*i + 0,
1776						 4, byte_order);
1777      CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1778						 4, byte_order);
1779      CORE_ADDR pos  = extract_unsigned_integer (ovly_table + 16*i + 8,
1780						 4, byte_order);
1781      CORE_ADDR buf  = extract_unsigned_integer (ovly_table + 16*i + 12,
1782						 4, byte_order);
1783
1784      if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1785	continue;
1786
1787      ALL_OBJFILE_OSECTIONS (objfile, osect)
1788	if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1789	    && pos == osect->the_bfd_section->filepos)
1790	  {
1791	    int ndx = osect - objfile->sections;
1792	    tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1793	    tbl[ndx].mapped_val = i + 1;
1794	    break;
1795	  }
1796    }
1797
1798  xfree (ovly_table);
1799  set_objfile_data (objfile, spu_overlay_data, tbl);
1800  return tbl;
1801}
1802
1803/* Read _ovly_buf_table entry from the target to dermine whether
1804   OSECT is currently mapped, and update the mapped state.  */
1805static void
1806spu_overlay_update_osect (struct obj_section *osect)
1807{
1808  enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1809		   BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1810  struct spu_overlay_table *ovly_table;
1811  CORE_ADDR id, val;
1812
1813  ovly_table = spu_get_overlay_table (osect->objfile);
1814  if (!ovly_table)
1815    return;
1816
1817  ovly_table += osect - osect->objfile->sections;
1818  if (ovly_table->mapped_ptr == 0)
1819    return;
1820
1821  id = SPUADDR_SPU (obj_section_addr (osect));
1822  val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1823				      4, byte_order);
1824  osect->ovly_mapped = (val == ovly_table->mapped_val);
1825}
1826
1827/* If OSECT is NULL, then update all sections' mapped state.
1828   If OSECT is non-NULL, then update only OSECT's mapped state.  */
1829static void
1830spu_overlay_update (struct obj_section *osect)
1831{
1832  /* Just one section.  */
1833  if (osect)
1834    spu_overlay_update_osect (osect);
1835
1836  /* All sections.  */
1837  else
1838    {
1839      struct objfile *objfile;
1840
1841      ALL_OBJSECTIONS (objfile, osect)
1842	if (section_is_overlay (osect))
1843	  spu_overlay_update_osect (osect);
1844    }
1845}
1846
1847/* Whenever a new objfile is loaded, read the target's _ovly_table.
1848   If there is one, go through all sections and make sure for non-
1849   overlay sections LMA equals VMA, while for overlay sections LMA
1850   is larger than SPU_OVERLAY_LMA.  */
1851static void
1852spu_overlay_new_objfile (struct objfile *objfile)
1853{
1854  struct spu_overlay_table *ovly_table;
1855  struct obj_section *osect;
1856
1857  /* If we've already touched this file, do nothing.  */
1858  if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1859    return;
1860
1861  /* Consider only SPU objfiles.  */
1862  if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1863    return;
1864
1865  /* Check if this objfile has overlays.  */
1866  ovly_table = spu_get_overlay_table (objfile);
1867  if (!ovly_table)
1868    return;
1869
1870  /* Now go and fiddle with all the LMAs.  */
1871  ALL_OBJFILE_OSECTIONS (objfile, osect)
1872    {
1873      bfd *obfd = objfile->obfd;
1874      asection *bsect = osect->the_bfd_section;
1875      int ndx = osect - objfile->sections;
1876
1877      if (ovly_table[ndx].mapped_ptr == 0)
1878	bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1879      else
1880	bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
1881    }
1882}
1883
1884
1885/* Insert temporary breakpoint on "main" function of newly loaded
1886   SPE context OBJFILE.  */
1887static void
1888spu_catch_start (struct objfile *objfile)
1889{
1890  struct minimal_symbol *minsym;
1891  struct symtab *symtab;
1892  CORE_ADDR pc;
1893  char buf[32];
1894
1895  /* Do this only if requested by "set spu stop-on-load on".  */
1896  if (!spu_stop_on_load_p)
1897    return;
1898
1899  /* Consider only SPU objfiles.  */
1900  if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1901    return;
1902
1903  /* The main objfile is handled differently.  */
1904  if (objfile == symfile_objfile)
1905    return;
1906
1907  /* There can be multiple symbols named "main".  Search for the
1908     "main" in *this* objfile.  */
1909  minsym = lookup_minimal_symbol ("main", NULL, objfile);
1910  if (!minsym)
1911    return;
1912
1913  /* If we have debugging information, try to use it -- this
1914     will allow us to properly skip the prologue.  */
1915  pc = SYMBOL_VALUE_ADDRESS (minsym);
1916  symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (minsym));
1917  if (symtab != NULL)
1918    {
1919      struct blockvector *bv = BLOCKVECTOR (symtab);
1920      struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1921      struct symbol *sym;
1922      struct symtab_and_line sal;
1923
1924      sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
1925      if (sym)
1926	{
1927	  fixup_symbol_section (sym, objfile);
1928	  sal = find_function_start_sal (sym, 1);
1929	  pc = sal.pc;
1930	}
1931    }
1932
1933  /* Use a numerical address for the set_breakpoint command to avoid having
1934     the breakpoint re-set incorrectly.  */
1935  xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
1936  create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
1937		     NULL /* cond_string */, -1 /* thread */,
1938		     0 /* parse_condition_and_thread */, 1 /* tempflag */,
1939		     bp_breakpoint /* type_wanted */,
1940		     0 /* ignore_count */,
1941		     AUTO_BOOLEAN_FALSE /* pending_break_support */,
1942		     NULL /* ops */, 0 /* from_tty */, 1 /* enabled */,
1943		     0 /* internal  */);
1944}
1945
1946
1947/* Look up OBJFILE loaded into FRAME's SPU context.  */
1948static struct objfile *
1949spu_objfile_from_frame (struct frame_info *frame)
1950{
1951  struct gdbarch *gdbarch = get_frame_arch (frame);
1952  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1953  struct objfile *obj;
1954
1955  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1956    return NULL;
1957
1958  ALL_OBJFILES (obj)
1959    {
1960      if (obj->sections != obj->sections_end
1961	  && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
1962	return obj;
1963    }
1964
1965  return NULL;
1966}
1967
1968/* Flush cache for ea pointer access if available.  */
1969static void
1970flush_ea_cache (void)
1971{
1972  struct minimal_symbol *msymbol;
1973  struct objfile *obj;
1974
1975  if (!has_stack_frames ())
1976    return;
1977
1978  obj = spu_objfile_from_frame (get_current_frame ());
1979  if (obj == NULL)
1980    return;
1981
1982  /* Lookup inferior function __cache_flush.  */
1983  msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
1984  if (msymbol != NULL)
1985    {
1986      struct type *type;
1987      CORE_ADDR addr;
1988
1989      type = objfile_type (obj)->builtin_void;
1990      type = lookup_function_type (type);
1991      type = lookup_pointer_type (type);
1992      addr = SYMBOL_VALUE_ADDRESS (msymbol);
1993
1994      call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
1995    }
1996}
1997
1998/* This handler is called when the inferior has stopped.  If it is stopped in
1999   SPU architecture then flush the ea cache if used.  */
2000static void
2001spu_attach_normal_stop (struct bpstats *bs, int print_frame)
2002{
2003  if (!spu_auto_flush_cache_p)
2004    return;
2005
2006  /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
2007     re-entering this function when __cache_flush stops.  */
2008  spu_auto_flush_cache_p = 0;
2009  flush_ea_cache ();
2010  spu_auto_flush_cache_p = 1;
2011}
2012
2013
2014/* "info spu" commands.  */
2015
2016static void
2017info_spu_event_command (char *args, int from_tty)
2018{
2019  struct frame_info *frame = get_selected_frame (NULL);
2020  ULONGEST event_status = 0;
2021  ULONGEST event_mask = 0;
2022  struct cleanup *chain;
2023  gdb_byte buf[100];
2024  char annex[32];
2025  LONGEST len;
2026  int rc, id;
2027
2028  if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2029    error (_("\"info spu\" is only supported on the SPU architecture."));
2030
2031  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2032
2033  xsnprintf (annex, sizeof annex, "%d/event_status", id);
2034  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2035		     buf, 0, (sizeof (buf) - 1));
2036  if (len <= 0)
2037    error (_("Could not read event_status."));
2038  buf[len] = '\0';
2039  event_status = strtoulst (buf, NULL, 16);
2040
2041  xsnprintf (annex, sizeof annex, "%d/event_mask", id);
2042  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2043		     buf, 0, (sizeof (buf) - 1));
2044  if (len <= 0)
2045    error (_("Could not read event_mask."));
2046  buf[len] = '\0';
2047  event_mask = strtoulst (buf, NULL, 16);
2048
2049  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
2050
2051  if (ui_out_is_mi_like_p (uiout))
2052    {
2053      ui_out_field_fmt (uiout, "event_status",
2054			"0x%s", phex_nz (event_status, 4));
2055      ui_out_field_fmt (uiout, "event_mask",
2056			"0x%s", phex_nz (event_mask, 4));
2057    }
2058  else
2059    {
2060      printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2061      printf_filtered (_("Event Mask   0x%s\n"), phex (event_mask, 4));
2062    }
2063
2064  do_cleanups (chain);
2065}
2066
2067static void
2068info_spu_signal_command (char *args, int from_tty)
2069{
2070  struct frame_info *frame = get_selected_frame (NULL);
2071  struct gdbarch *gdbarch = get_frame_arch (frame);
2072  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2073  ULONGEST signal1 = 0;
2074  ULONGEST signal1_type = 0;
2075  int signal1_pending = 0;
2076  ULONGEST signal2 = 0;
2077  ULONGEST signal2_type = 0;
2078  int signal2_pending = 0;
2079  struct cleanup *chain;
2080  char annex[32];
2081  gdb_byte buf[100];
2082  LONGEST len;
2083  int rc, id;
2084
2085  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2086    error (_("\"info spu\" is only supported on the SPU architecture."));
2087
2088  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2089
2090  xsnprintf (annex, sizeof annex, "%d/signal1", id);
2091  len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2092  if (len < 0)
2093    error (_("Could not read signal1."));
2094  else if (len == 4)
2095    {
2096      signal1 = extract_unsigned_integer (buf, 4, byte_order);
2097      signal1_pending = 1;
2098    }
2099
2100  xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2101  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2102		     buf, 0, (sizeof (buf) - 1));
2103  if (len <= 0)
2104    error (_("Could not read signal1_type."));
2105  buf[len] = '\0';
2106  signal1_type = strtoulst (buf, NULL, 16);
2107
2108  xsnprintf (annex, sizeof annex, "%d/signal2", id);
2109  len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2110  if (len < 0)
2111    error (_("Could not read signal2."));
2112  else if (len == 4)
2113    {
2114      signal2 = extract_unsigned_integer (buf, 4, byte_order);
2115      signal2_pending = 1;
2116    }
2117
2118  xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2119  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2120		     buf, 0, (sizeof (buf) - 1));
2121  if (len <= 0)
2122    error (_("Could not read signal2_type."));
2123  buf[len] = '\0';
2124  signal2_type = strtoulst (buf, NULL, 16);
2125
2126  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
2127
2128  if (ui_out_is_mi_like_p (uiout))
2129    {
2130      ui_out_field_int (uiout, "signal1_pending", signal1_pending);
2131      ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2132      ui_out_field_int (uiout, "signal1_type", signal1_type);
2133      ui_out_field_int (uiout, "signal2_pending", signal2_pending);
2134      ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2135      ui_out_field_int (uiout, "signal2_type", signal2_type);
2136    }
2137  else
2138    {
2139      if (signal1_pending)
2140	printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2141      else
2142	printf_filtered (_("Signal 1 not pending "));
2143
2144      if (signal1_type)
2145	printf_filtered (_("(Type Or)\n"));
2146      else
2147	printf_filtered (_("(Type Overwrite)\n"));
2148
2149      if (signal2_pending)
2150	printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2151      else
2152	printf_filtered (_("Signal 2 not pending "));
2153
2154      if (signal2_type)
2155	printf_filtered (_("(Type Or)\n"));
2156      else
2157	printf_filtered (_("(Type Overwrite)\n"));
2158    }
2159
2160  do_cleanups (chain);
2161}
2162
2163static void
2164info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2165		       const char *field, const char *msg)
2166{
2167  struct cleanup *chain;
2168  int i;
2169
2170  if (nr <= 0)
2171    return;
2172
2173  chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
2174
2175  ui_out_table_header (uiout, 32, ui_left, field, msg);
2176  ui_out_table_body (uiout);
2177
2178  for (i = 0; i < nr; i++)
2179    {
2180      struct cleanup *val_chain;
2181      ULONGEST val;
2182      val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
2183      val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2184      ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
2185      do_cleanups (val_chain);
2186
2187      if (!ui_out_is_mi_like_p (uiout))
2188	printf_filtered ("\n");
2189    }
2190
2191  do_cleanups (chain);
2192}
2193
2194static void
2195info_spu_mailbox_command (char *args, int from_tty)
2196{
2197  struct frame_info *frame = get_selected_frame (NULL);
2198  struct gdbarch *gdbarch = get_frame_arch (frame);
2199  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2200  struct cleanup *chain;
2201  char annex[32];
2202  gdb_byte buf[1024];
2203  LONGEST len;
2204  int i, id;
2205
2206  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2207    error (_("\"info spu\" is only supported on the SPU architecture."));
2208
2209  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2210
2211  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
2212
2213  xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2214  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2215		     buf, 0, sizeof buf);
2216  if (len < 0)
2217    error (_("Could not read mbox_info."));
2218
2219  info_spu_mailbox_list (buf, len / 4, byte_order,
2220			 "mbox", "SPU Outbound Mailbox");
2221
2222  xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2223  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2224		     buf, 0, sizeof buf);
2225  if (len < 0)
2226    error (_("Could not read ibox_info."));
2227
2228  info_spu_mailbox_list (buf, len / 4, byte_order,
2229			 "ibox", "SPU Outbound Interrupt Mailbox");
2230
2231  xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2232  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2233		     buf, 0, sizeof buf);
2234  if (len < 0)
2235    error (_("Could not read wbox_info."));
2236
2237  info_spu_mailbox_list (buf, len / 4, byte_order,
2238			 "wbox", "SPU Inbound Mailbox");
2239
2240  do_cleanups (chain);
2241}
2242
2243static ULONGEST
2244spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2245{
2246  ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2247  return (word >> (63 - last)) & mask;
2248}
2249
2250static void
2251info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2252{
2253  static char *spu_mfc_opcode[256] =
2254    {
2255    /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2256             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2257    /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2258             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2259    /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2260             "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2261    /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2262             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2263    /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2264             "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2265    /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2266             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2267    /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2268             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2269    /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2270             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2271    /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2272             NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2273    /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2274             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2275    /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2276             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2277    /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2278             "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2279    /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2280             "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2281    /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2282             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2283    /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2284             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2285    /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2286             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2287    };
2288
2289  int *seq = alloca (nr * sizeof (int));
2290  int done = 0;
2291  struct cleanup *chain;
2292  int i, j;
2293
2294
2295  /* Determine sequence in which to display (valid) entries.  */
2296  for (i = 0; i < nr; i++)
2297    {
2298      /* Search for the first valid entry all of whose
2299	 dependencies are met.  */
2300      for (j = 0; j < nr; j++)
2301	{
2302          ULONGEST mfc_cq_dw3;
2303	  ULONGEST dependencies;
2304
2305	  if (done & (1 << (nr - 1 - j)))
2306	    continue;
2307
2308	  mfc_cq_dw3
2309	    = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2310	  if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2311	    continue;
2312
2313	  dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2314	  if ((dependencies & done) != dependencies)
2315	    continue;
2316
2317	  seq[i] = j;
2318	  done |= 1 << (nr - 1 - j);
2319	  break;
2320	}
2321
2322      if (j == nr)
2323	break;
2324    }
2325
2326  nr = i;
2327
2328
2329  chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
2330
2331  ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
2332  ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
2333  ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
2334  ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
2335  ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
2336  ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
2337  ui_out_table_header (uiout, 7, ui_left, "size", "Size");
2338  ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
2339  ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
2340  ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
2341
2342  ui_out_table_body (uiout);
2343
2344  for (i = 0; i < nr; i++)
2345    {
2346      struct cleanup *cmd_chain;
2347      ULONGEST mfc_cq_dw0;
2348      ULONGEST mfc_cq_dw1;
2349      ULONGEST mfc_cq_dw2;
2350      int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2351      int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
2352      ULONGEST mfc_ea;
2353      int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2354
2355      /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2356	 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1.  */
2357
2358      mfc_cq_dw0
2359	= extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2360      mfc_cq_dw1
2361	= extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2362      mfc_cq_dw2
2363	= extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2364
2365      list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2366      list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2367      mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2368      mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2369      list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2370      rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2371      tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2372
2373      mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2374		| spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2375
2376      mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2377      mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2378      noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2379      qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2380      ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2381      cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2382
2383      cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
2384
2385      if (spu_mfc_opcode[mfc_cmd_opcode])
2386	ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2387      else
2388	ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
2389
2390      ui_out_field_int (uiout, "tag", mfc_cmd_tag);
2391      ui_out_field_int (uiout, "tid", tclass_id);
2392      ui_out_field_int (uiout, "rid", rclass_id);
2393
2394      if (ea_valid_p)
2395	ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
2396      else
2397	ui_out_field_skip (uiout, "ea");
2398
2399      ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
2400      if (qw_valid_p)
2401	ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
2402      else
2403	ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
2404
2405      if (list_valid_p)
2406	{
2407	  ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
2408	  ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
2409	}
2410      else
2411	{
2412	  ui_out_field_skip (uiout, "lstaddr");
2413	  ui_out_field_skip (uiout, "lstsize");
2414	}
2415
2416      if (cmd_error_p)
2417	ui_out_field_string (uiout, "error_p", "*");
2418      else
2419	ui_out_field_skip (uiout, "error_p");
2420
2421      do_cleanups (cmd_chain);
2422
2423      if (!ui_out_is_mi_like_p (uiout))
2424	printf_filtered ("\n");
2425    }
2426
2427  do_cleanups (chain);
2428}
2429
2430static void
2431info_spu_dma_command (char *args, int from_tty)
2432{
2433  struct frame_info *frame = get_selected_frame (NULL);
2434  struct gdbarch *gdbarch = get_frame_arch (frame);
2435  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2436  ULONGEST dma_info_type;
2437  ULONGEST dma_info_mask;
2438  ULONGEST dma_info_status;
2439  ULONGEST dma_info_stall_and_notify;
2440  ULONGEST dma_info_atomic_command_status;
2441  struct cleanup *chain;
2442  char annex[32];
2443  gdb_byte buf[1024];
2444  LONGEST len;
2445  int i, id;
2446
2447  if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2448    error (_("\"info spu\" is only supported on the SPU architecture."));
2449
2450  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2451
2452  xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2453  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2454		     buf, 0, 40 + 16 * 32);
2455  if (len <= 0)
2456    error (_("Could not read dma_info."));
2457
2458  dma_info_type
2459    = extract_unsigned_integer (buf, 8, byte_order);
2460  dma_info_mask
2461    = extract_unsigned_integer (buf + 8, 8, byte_order);
2462  dma_info_status
2463    = extract_unsigned_integer (buf + 16, 8, byte_order);
2464  dma_info_stall_and_notify
2465    = extract_unsigned_integer (buf + 24, 8, byte_order);
2466  dma_info_atomic_command_status
2467    = extract_unsigned_integer (buf + 32, 8, byte_order);
2468
2469  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2470
2471  if (ui_out_is_mi_like_p (uiout))
2472    {
2473      ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2474			phex_nz (dma_info_type, 4));
2475      ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2476			phex_nz (dma_info_mask, 4));
2477      ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2478			phex_nz (dma_info_status, 4));
2479      ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2480			phex_nz (dma_info_stall_and_notify, 4));
2481      ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2482			phex_nz (dma_info_atomic_command_status, 4));
2483    }
2484  else
2485    {
2486      const char *query_msg = _("no query pending");
2487
2488      if (dma_info_type & 4)
2489	switch (dma_info_type & 3)
2490	  {
2491	    case 1: query_msg = _("'any' query pending"); break;
2492	    case 2: query_msg = _("'all' query pending"); break;
2493	    default: query_msg = _("undefined query type"); break;
2494	  }
2495
2496      printf_filtered (_("Tag-Group Status  0x%s\n"),
2497		       phex (dma_info_status, 4));
2498      printf_filtered (_("Tag-Group Mask    0x%s (%s)\n"),
2499		       phex (dma_info_mask, 4), query_msg);
2500      printf_filtered (_("Stall-and-Notify  0x%s\n"),
2501		       phex (dma_info_stall_and_notify, 4));
2502      printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2503		       phex (dma_info_atomic_command_status, 4));
2504      printf_filtered ("\n");
2505    }
2506
2507  info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2508  do_cleanups (chain);
2509}
2510
2511static void
2512info_spu_proxydma_command (char *args, int from_tty)
2513{
2514  struct frame_info *frame = get_selected_frame (NULL);
2515  struct gdbarch *gdbarch = get_frame_arch (frame);
2516  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2517  ULONGEST dma_info_type;
2518  ULONGEST dma_info_mask;
2519  ULONGEST dma_info_status;
2520  struct cleanup *chain;
2521  char annex[32];
2522  gdb_byte buf[1024];
2523  LONGEST len;
2524  int i, id;
2525
2526  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2527    error (_("\"info spu\" is only supported on the SPU architecture."));
2528
2529  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2530
2531  xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2532  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2533		     buf, 0, 24 + 8 * 32);
2534  if (len <= 0)
2535    error (_("Could not read proxydma_info."));
2536
2537  dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2538  dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2539  dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2540
2541  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2542
2543  if (ui_out_is_mi_like_p (uiout))
2544    {
2545      ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2546			phex_nz (dma_info_type, 4));
2547      ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2548			phex_nz (dma_info_mask, 4));
2549      ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2550			phex_nz (dma_info_status, 4));
2551    }
2552  else
2553    {
2554      const char *query_msg;
2555
2556      switch (dma_info_type & 3)
2557	{
2558	case 0: query_msg = _("no query pending"); break;
2559	case 1: query_msg = _("'any' query pending"); break;
2560	case 2: query_msg = _("'all' query pending"); break;
2561	default: query_msg = _("undefined query type"); break;
2562	}
2563
2564      printf_filtered (_("Tag-Group Status  0x%s\n"),
2565		       phex (dma_info_status, 4));
2566      printf_filtered (_("Tag-Group Mask    0x%s (%s)\n"),
2567		       phex (dma_info_mask, 4), query_msg);
2568      printf_filtered ("\n");
2569    }
2570
2571  info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2572  do_cleanups (chain);
2573}
2574
2575static void
2576info_spu_command (char *args, int from_tty)
2577{
2578  printf_unfiltered (_("\"info spu\" must be followed by "
2579		       "the name of an SPU facility.\n"));
2580  help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2581}
2582
2583
2584/* Root of all "set spu "/"show spu " commands.  */
2585
2586static void
2587show_spu_command (char *args, int from_tty)
2588{
2589  help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2590}
2591
2592static void
2593set_spu_command (char *args, int from_tty)
2594{
2595  help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2596}
2597
2598static void
2599show_spu_stop_on_load (struct ui_file *file, int from_tty,
2600                       struct cmd_list_element *c, const char *value)
2601{
2602  fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2603                    value);
2604}
2605
2606static void
2607show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2608			   struct cmd_list_element *c, const char *value)
2609{
2610  fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2611                    value);
2612}
2613
2614
2615/* Set up gdbarch struct.  */
2616
2617static struct gdbarch *
2618spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2619{
2620  struct gdbarch *gdbarch;
2621  struct gdbarch_tdep *tdep;
2622  int id = -1;
2623
2624  /* Which spufs ID was requested as address space?  */
2625  if (info.tdep_info)
2626    id = *(int *)info.tdep_info;
2627  /* For objfile architectures of SPU solibs, decode the ID from the name.
2628     This assumes the filename convention employed by solib-spu.c.  */
2629  else if (info.abfd)
2630    {
2631      char *name = strrchr (info.abfd->filename, '@');
2632      if (name)
2633	sscanf (name, "@0x%*x <%d>", &id);
2634    }
2635
2636  /* Find a candidate among extant architectures.  */
2637  for (arches = gdbarch_list_lookup_by_info (arches, &info);
2638       arches != NULL;
2639       arches = gdbarch_list_lookup_by_info (arches->next, &info))
2640    {
2641      tdep = gdbarch_tdep (arches->gdbarch);
2642      if (tdep && tdep->id == id)
2643	return arches->gdbarch;
2644    }
2645
2646  /* None found, so create a new architecture.  */
2647  tdep = XCALLOC (1, struct gdbarch_tdep);
2648  tdep->id = id;
2649  gdbarch = gdbarch_alloc (&info, tdep);
2650
2651  /* Disassembler.  */
2652  set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2653
2654  /* Registers.  */
2655  set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2656  set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2657  set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2658  set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2659  set_gdbarch_read_pc (gdbarch, spu_read_pc);
2660  set_gdbarch_write_pc (gdbarch, spu_write_pc);
2661  set_gdbarch_register_name (gdbarch, spu_register_name);
2662  set_gdbarch_register_type (gdbarch, spu_register_type);
2663  set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2664  set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2665  set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2666  set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2667
2668  /* Data types.  */
2669  set_gdbarch_char_signed (gdbarch, 0);
2670  set_gdbarch_ptr_bit (gdbarch, 32);
2671  set_gdbarch_addr_bit (gdbarch, 32);
2672  set_gdbarch_short_bit (gdbarch, 16);
2673  set_gdbarch_int_bit (gdbarch, 32);
2674  set_gdbarch_long_bit (gdbarch, 32);
2675  set_gdbarch_long_long_bit (gdbarch, 64);
2676  set_gdbarch_float_bit (gdbarch, 32);
2677  set_gdbarch_double_bit (gdbarch, 64);
2678  set_gdbarch_long_double_bit (gdbarch, 64);
2679  set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2680  set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2681  set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2682
2683  /* Address handling.  */
2684  set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2685  set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2686  set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2687  set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2688  set_gdbarch_address_class_type_flags_to_name
2689    (gdbarch, spu_address_class_type_flags_to_name);
2690  set_gdbarch_address_class_name_to_type_flags
2691    (gdbarch, spu_address_class_name_to_type_flags);
2692
2693
2694  /* Inferior function calls.  */
2695  set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2696  set_gdbarch_frame_align (gdbarch, spu_frame_align);
2697  set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2698  set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2699  set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2700  set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2701  set_gdbarch_return_value (gdbarch, spu_return_value);
2702
2703  /* Frame handling.  */
2704  set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2705  frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2706  frame_base_set_default (gdbarch, &spu_frame_base);
2707  set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2708  set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2709  set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2710  set_gdbarch_frame_args_skip (gdbarch, 0);
2711  set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2712  set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2713
2714  /* Cell/B.E. cross-architecture unwinder support.  */
2715  frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2716
2717  /* Breakpoints.  */
2718  set_gdbarch_decr_pc_after_break (gdbarch, 4);
2719  set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2720  set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
2721  set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2722  set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2723  set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2724
2725  /* Overlays.  */
2726  set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2727
2728  return gdbarch;
2729}
2730
2731/* Provide a prototype to silence -Wmissing-prototypes.  */
2732extern initialize_file_ftype _initialize_spu_tdep;
2733
2734void
2735_initialize_spu_tdep (void)
2736{
2737  register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2738
2739  /* Add ourselves to objfile event chain.  */
2740  observer_attach_new_objfile (spu_overlay_new_objfile);
2741  spu_overlay_data = register_objfile_data ();
2742
2743  /* Install spu stop-on-load handler.  */
2744  observer_attach_new_objfile (spu_catch_start);
2745
2746  /* Add ourselves to normal_stop event chain.  */
2747  observer_attach_normal_stop (spu_attach_normal_stop);
2748
2749  /* Add root prefix command for all "set spu"/"show spu" commands.  */
2750  add_prefix_cmd ("spu", no_class, set_spu_command,
2751		  _("Various SPU specific commands."),
2752		  &setspucmdlist, "set spu ", 0, &setlist);
2753  add_prefix_cmd ("spu", no_class, show_spu_command,
2754		  _("Various SPU specific commands."),
2755		  &showspucmdlist, "show spu ", 0, &showlist);
2756
2757  /* Toggle whether or not to add a temporary breakpoint at the "main"
2758     function of new SPE contexts.  */
2759  add_setshow_boolean_cmd ("stop-on-load", class_support,
2760                          &spu_stop_on_load_p, _("\
2761Set whether to stop for new SPE threads."),
2762                           _("\
2763Show whether to stop for new SPE threads."),
2764                           _("\
2765Use \"on\" to give control to the user when a new SPE thread\n\
2766enters its \"main\" function.\n\
2767Use \"off\" to disable stopping for new SPE threads."),
2768                          NULL,
2769                          show_spu_stop_on_load,
2770                          &setspucmdlist, &showspucmdlist);
2771
2772  /* Toggle whether or not to automatically flush the software-managed
2773     cache whenever SPE execution stops.  */
2774  add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2775                          &spu_auto_flush_cache_p, _("\
2776Set whether to automatically flush the software-managed cache."),
2777                           _("\
2778Show whether to automatically flush the software-managed cache."),
2779                           _("\
2780Use \"on\" to automatically flush the software-managed cache\n\
2781whenever SPE execution stops.\n\
2782Use \"off\" to never automatically flush the software-managed cache."),
2783                          NULL,
2784                          show_spu_auto_flush_cache,
2785                          &setspucmdlist, &showspucmdlist);
2786
2787  /* Add root prefix command for all "info spu" commands.  */
2788  add_prefix_cmd ("spu", class_info, info_spu_command,
2789		  _("Various SPU specific commands."),
2790		  &infospucmdlist, "info spu ", 0, &infolist);
2791
2792  /* Add various "info spu" commands.  */
2793  add_cmd ("event", class_info, info_spu_event_command,
2794	   _("Display SPU event facility status.\n"),
2795	   &infospucmdlist);
2796  add_cmd ("signal", class_info, info_spu_signal_command,
2797	   _("Display SPU signal notification facility status.\n"),
2798	   &infospucmdlist);
2799  add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2800	   _("Display SPU mailbox facility status.\n"),
2801	   &infospucmdlist);
2802  add_cmd ("dma", class_info, info_spu_dma_command,
2803	   _("Display MFC DMA status.\n"),
2804	   &infospucmdlist);
2805  add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2806	   _("Display MFC Proxy-DMA status.\n"),
2807	   &infospucmdlist);
2808}
2809