1/* frv simulator support code
2   Copyright (C) 1998-2020 Free Software Foundation, Inc.
3   Contributed by Red Hat.
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3 of the License, or
10(at your option) any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19
20#define WANT_CPU
21#define WANT_CPU_FRVBF
22
23#include "sim-main.h"
24#include "cgen-mem.h"
25#include "cgen-ops.h"
26#include "cgen-engine.h"
27#include "cgen-par.h"
28#include "bfd.h"
29#include "gdb/sim-frv.h"
30#include <math.h>
31
32/* Maintain a flag in order to know when to write the address of the next
33   VLIW instruction into the LR register.  Used by JMPL. JMPIL, and CALL
34   insns.  */
35int frvbf_write_next_vliw_addr_to_LR;
36
37/* The contents of BUF are in target byte order.  */
38int
39frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
40{
41  if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
42    {
43      int hi_available, lo_available;
44      int grn = rn - SIM_FRV_GR0_REGNUM;
45
46      frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
47
48      if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
49	return 0;
50      else
51	SETTSI (buf, GET_H_GR (grn));
52    }
53  else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
54    {
55      int hi_available, lo_available;
56      int frn = rn - SIM_FRV_FR0_REGNUM;
57
58      frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
59
60      if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
61	return 0;
62      else
63	SETTSI (buf, GET_H_FR (frn));
64    }
65  else if (rn == SIM_FRV_PC_REGNUM)
66    SETTSI (buf, GET_H_PC ());
67  else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
68    {
69      /* Make sure the register is implemented.  */
70      FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
71      int spr = rn - SIM_FRV_SPR0_REGNUM;
72      if (! control->spr[spr].implemented)
73	return 0;
74      SETTSI (buf, GET_H_SPR (spr));
75    }
76  else
77    {
78      SETTSI (buf, 0xdeadbeef);
79      return 0;
80    }
81
82  return len;
83}
84
85/* The contents of BUF are in target byte order.  */
86
87int
88frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
89{
90  if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
91    {
92      int hi_available, lo_available;
93      int grn = rn - SIM_FRV_GR0_REGNUM;
94
95      frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
96
97      if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
98	return 0;
99      else
100	SET_H_GR (grn, GETTSI (buf));
101    }
102  else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
103    {
104      int hi_available, lo_available;
105      int frn = rn - SIM_FRV_FR0_REGNUM;
106
107      frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
108
109      if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
110	return 0;
111      else
112	SET_H_FR (frn, GETTSI (buf));
113    }
114  else if (rn == SIM_FRV_PC_REGNUM)
115    SET_H_PC (GETTSI (buf));
116  else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
117    {
118      /* Make sure the register is implemented.  */
119      FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
120      int spr = rn - SIM_FRV_SPR0_REGNUM;
121      if (! control->spr[spr].implemented)
122	return 0;
123      SET_H_SPR (spr, GETTSI (buf));
124    }
125  else
126    return 0;
127
128  return len;
129}
130
131/* Cover fns to access the general registers.  */
132USI
133frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
134{
135  frv_check_gr_access (current_cpu, gr);
136  return CPU (h_gr[gr]);
137}
138
139void
140frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
141{
142  frv_check_gr_access (current_cpu, gr);
143
144  if (gr == 0)
145    return; /* Storing into gr0 has no effect.  */
146
147  CPU (h_gr[gr]) = newval;
148}
149
150/* Cover fns to access the floating point registers.  */
151SF
152frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
153{
154  frv_check_fr_access (current_cpu, fr);
155  return CPU (h_fr[fr]);
156}
157
158void
159frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
160{
161  frv_check_fr_access (current_cpu, fr);
162  CPU (h_fr[fr]) = newval;
163}
164
165/* Cover fns to access the general registers as double words.  */
166static UINT
167check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
168{
169  if (reg & align_mask)
170    {
171      SIM_DESC sd = CPU_STATE (current_cpu);
172      switch (STATE_ARCHITECTURE (sd)->mach)
173	{
174	  /* Note: there is a discrepancy between V2.2 of the FR400
175	     instruction manual and the various FR4xx LSI specs.
176	     The former claims that unaligned registers cause a
177	     register_exception while the latter say it's an
178	     illegal_instruction.  The LSI specs appear to be
179	     correct; in fact, the FR4xx series is not documented
180	     as having a register_exception.  */
181	case bfd_mach_fr400:
182	case bfd_mach_fr450:
183	case bfd_mach_fr550:
184	  frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
185	  break;
186	case bfd_mach_frvtomcat:
187	case bfd_mach_fr500:
188	case bfd_mach_frv:
189	  frv_queue_register_exception_interrupt (current_cpu,
190						  FRV_REC_UNALIGNED);
191	  break;
192	default:
193	  break;
194	}
195
196      reg &= ~align_mask;
197    }
198
199  return reg;
200}
201
202static UINT
203check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
204{
205  if (reg & align_mask)
206    {
207      SIM_DESC sd = CPU_STATE (current_cpu);
208      switch (STATE_ARCHITECTURE (sd)->mach)
209	{
210	  /* See comment in check_register_alignment().  */
211	case bfd_mach_fr400:
212	case bfd_mach_fr450:
213	case bfd_mach_fr550:
214	  frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
215	  break;
216	case bfd_mach_frvtomcat:
217	case bfd_mach_fr500:
218	case bfd_mach_frv:
219	  {
220	    struct frv_fp_exception_info fp_info = {
221	      FSR_NO_EXCEPTION, FTT_INVALID_FR
222	    };
223	    frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
224	  }
225	  break;
226	default:
227	  break;
228	}
229
230      reg &= ~align_mask;
231    }
232
233  return reg;
234}
235
236static UINT
237check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
238{
239  if (address & align_mask)
240    {
241      SIM_DESC sd = CPU_STATE (current_cpu);
242      switch (STATE_ARCHITECTURE (sd)->mach)
243	{
244	  /* See comment in check_register_alignment().  */
245	case bfd_mach_fr400:
246	case bfd_mach_fr450:
247	  frv_queue_data_access_error_interrupt (current_cpu, address);
248	  break;
249	case bfd_mach_frvtomcat:
250	case bfd_mach_fr500:
251	case bfd_mach_frv:
252	  frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
253	  break;
254	default:
255	  break;
256	}
257
258      address &= ~align_mask;
259    }
260
261  return address;
262}
263
264DI
265frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
266{
267  DI value;
268
269  if (gr == 0)
270    return 0; /* gr0 is always 0.  */
271
272  /* Check the register alignment.  */
273  gr = check_register_alignment (current_cpu, gr, 1);
274
275  value = GET_H_GR (gr);
276  value <<= 32;
277  value |=  (USI) GET_H_GR (gr + 1);
278  return value;
279}
280
281void
282frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
283{
284  if (gr == 0)
285    return; /* Storing into gr0 has no effect.  */
286
287  /* Check the register alignment.  */
288  gr = check_register_alignment (current_cpu, gr, 1);
289
290  SET_H_GR (gr    , (newval >> 32) & 0xffffffff);
291  SET_H_GR (gr + 1, (newval      ) & 0xffffffff);
292}
293
294/* Cover fns to access the floating point register as double words.  */
295DF
296frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
297{
298  union {
299    SF as_sf[2];
300    DF as_df;
301  } value;
302
303  /* Check the register alignment.  */
304  fr = check_fr_register_alignment (current_cpu, fr, 1);
305
306  if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
307    {
308      value.as_sf[1] = GET_H_FR (fr);
309      value.as_sf[0] = GET_H_FR (fr + 1);
310    }
311  else
312    {
313      value.as_sf[0] = GET_H_FR (fr);
314      value.as_sf[1] = GET_H_FR (fr + 1);
315    }
316
317  return value.as_df;
318}
319
320void
321frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
322{
323  union {
324    SF as_sf[2];
325    DF as_df;
326  } value;
327
328  /* Check the register alignment.  */
329  fr = check_fr_register_alignment (current_cpu, fr, 1);
330
331  value.as_df = newval;
332  if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
333    {
334      SET_H_FR (fr    , value.as_sf[1]);
335      SET_H_FR (fr + 1, value.as_sf[0]);
336    }
337  else
338    {
339      SET_H_FR (fr    , value.as_sf[0]);
340      SET_H_FR (fr + 1, value.as_sf[1]);
341    }
342}
343
344/* Cover fns to access the floating point register as integer words.  */
345USI
346frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
347{
348  union {
349    SF  as_sf;
350    USI as_usi;
351  } value;
352
353  value.as_sf = GET_H_FR (fr);
354  return value.as_usi;
355}
356
357void
358frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
359{
360  union {
361    SF  as_sf;
362    USI as_usi;
363  } value;
364
365  value.as_usi = newval;
366  SET_H_FR (fr, value.as_sf);
367}
368
369/* Cover fns to access the coprocessor registers as double words.  */
370DI
371frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
372{
373  DI value;
374
375  /* Check the register alignment.  */
376  cpr = check_register_alignment (current_cpu, cpr, 1);
377
378  value = GET_H_CPR (cpr);
379  value <<= 32;
380  value |=  (USI) GET_H_CPR (cpr + 1);
381  return value;
382}
383
384void
385frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
386{
387  /* Check the register alignment.  */
388  cpr = check_register_alignment (current_cpu, cpr, 1);
389
390  SET_H_CPR (cpr    , (newval >> 32) & 0xffffffff);
391  SET_H_CPR (cpr + 1, (newval      ) & 0xffffffff);
392}
393
394/* Cover fns to write registers as quad words.  */
395void
396frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
397{
398  if (gr == 0)
399    return; /* Storing into gr0 has no effect.  */
400
401  /* Check the register alignment.  */
402  gr = check_register_alignment (current_cpu, gr, 3);
403
404  SET_H_GR (gr    , newval[0]);
405  SET_H_GR (gr + 1, newval[1]);
406  SET_H_GR (gr + 2, newval[2]);
407  SET_H_GR (gr + 3, newval[3]);
408}
409
410void
411frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
412{
413  /* Check the register alignment.  */
414  fr = check_fr_register_alignment (current_cpu, fr, 3);
415
416  SET_H_FR (fr    , newval[0]);
417  SET_H_FR (fr + 1, newval[1]);
418  SET_H_FR (fr + 2, newval[2]);
419  SET_H_FR (fr + 3, newval[3]);
420}
421
422void
423frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
424{
425  /* Check the register alignment.  */
426  cpr = check_register_alignment (current_cpu, cpr, 3);
427
428  SET_H_CPR (cpr    , newval[0]);
429  SET_H_CPR (cpr + 1, newval[1]);
430  SET_H_CPR (cpr + 2, newval[2]);
431  SET_H_CPR (cpr + 3, newval[3]);
432}
433
434/* Cover fns to access the special purpose registers.  */
435USI
436frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
437{
438  /* Check access restrictions.  */
439  frv_check_spr_read_access (current_cpu, spr);
440
441  switch (spr)
442    {
443    case H_SPR_PSR:
444      return spr_psr_get_handler (current_cpu);
445    case H_SPR_TBR:
446      return spr_tbr_get_handler (current_cpu);
447    case H_SPR_BPSR:
448      return spr_bpsr_get_handler (current_cpu);
449    case H_SPR_CCR:
450      return spr_ccr_get_handler (current_cpu);
451    case H_SPR_CCCR:
452      return spr_cccr_get_handler (current_cpu);
453    case H_SPR_SR0:
454    case H_SPR_SR1:
455    case H_SPR_SR2:
456    case H_SPR_SR3:
457      return spr_sr_get_handler (current_cpu, spr);
458      break;
459    default:
460      return CPU (h_spr[spr]);
461    }
462  return 0;
463}
464
465void
466frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
467{
468  FRV_REGISTER_CONTROL *control;
469  USI mask;
470  USI oldval;
471
472  /* Check access restrictions.  */
473  frv_check_spr_write_access (current_cpu, spr);
474
475  /* Only set those fields which are writeable.  */
476  control = CPU_REGISTER_CONTROL (current_cpu);
477  mask = control->spr[spr].read_only_mask;
478  oldval = GET_H_SPR (spr);
479
480  newval = (newval & ~mask) | (oldval & mask);
481
482  /* Some registers are represented by individual components which are
483     referenced more often than the register itself.  */
484  switch (spr)
485    {
486    case H_SPR_PSR:
487      spr_psr_set_handler (current_cpu, newval);
488      break;
489    case H_SPR_TBR:
490      spr_tbr_set_handler (current_cpu, newval);
491      break;
492    case H_SPR_BPSR:
493      spr_bpsr_set_handler (current_cpu, newval);
494      break;
495    case H_SPR_CCR:
496      spr_ccr_set_handler (current_cpu, newval);
497      break;
498    case H_SPR_CCCR:
499      spr_cccr_set_handler (current_cpu, newval);
500      break;
501    case H_SPR_SR0:
502    case H_SPR_SR1:
503    case H_SPR_SR2:
504    case H_SPR_SR3:
505      spr_sr_set_handler (current_cpu, spr, newval);
506      break;
507    case H_SPR_IHSR8:
508      frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
509      break;
510    default:
511      CPU (h_spr[spr]) = newval;
512      break;
513    }
514}
515
516/* Cover fns to access the gr_hi and gr_lo registers.  */
517UHI
518frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
519{
520  return (GET_H_GR(gr) >> 16) & 0xffff;
521}
522
523void
524frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
525{
526  USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
527  SET_H_GR (gr, value);
528}
529
530UHI
531frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
532{
533  return GET_H_GR(gr) & 0xffff;
534}
535
536void
537frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
538{
539  USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
540  SET_H_GR (gr, value);
541}
542
543/* Cover fns to access the tbr bits.  */
544USI
545spr_tbr_get_handler (SIM_CPU *current_cpu)
546{
547  int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
548            ((GET_H_TBR_TT  () &  0xff) <<  4);
549
550  return tbr;
551}
552
553void
554spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
555{
556  int tbr = newval;
557
558  SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
559  SET_H_TBR_TT  ((tbr >>  4) & 0xff) ;
560}
561
562/* Cover fns to access the bpsr bits.  */
563USI
564spr_bpsr_get_handler (SIM_CPU *current_cpu)
565{
566  int bpsr = ((GET_H_BPSR_BS  () & 0x1) << 12) |
567             ((GET_H_BPSR_BET () & 0x1)      );
568
569  return bpsr;
570}
571
572void
573spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
574{
575  int bpsr = newval;
576
577  SET_H_BPSR_BS  ((bpsr >> 12) & 1);
578  SET_H_BPSR_BET ((bpsr      ) & 1);
579}
580
581/* Cover fns to access the psr bits.  */
582USI
583spr_psr_get_handler (SIM_CPU *current_cpu)
584{
585  int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
586            ((GET_H_PSR_VER   () & 0xf) << 24) |
587            ((GET_H_PSR_ICE   () & 0x1) << 16) |
588            ((GET_H_PSR_NEM   () & 0x1) << 14) |
589            ((GET_H_PSR_CM    () & 0x1) << 13) |
590            ((GET_H_PSR_BE    () & 0x1) << 12) |
591            ((GET_H_PSR_ESR   () & 0x1) << 11) |
592            ((GET_H_PSR_EF    () & 0x1) <<  8) |
593            ((GET_H_PSR_EM    () & 0x1) <<  7) |
594            ((GET_H_PSR_PIL   () & 0xf) <<  3) |
595            ((GET_H_PSR_S     () & 0x1) <<  2) |
596            ((GET_H_PSR_PS    () & 0x1) <<  1) |
597            ((GET_H_PSR_ET    () & 0x1)      );
598
599  return psr;
600}
601
602void
603spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
604{
605  /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
606     first.  */
607  SET_H_PSR_S ((newval >>  2) & 1);
608
609  SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
610  SET_H_PSR_VER   ((newval >> 24) & 0xf);
611  SET_H_PSR_ICE   ((newval >> 16) & 1);
612  SET_H_PSR_NEM   ((newval >> 14) & 1);
613  SET_H_PSR_CM    ((newval >> 13) & 1);
614  SET_H_PSR_BE    ((newval >> 12) & 1);
615  SET_H_PSR_ESR   ((newval >> 11) & 1);
616  SET_H_PSR_EF    ((newval >>  8) & 1);
617  SET_H_PSR_EM    ((newval >>  7) & 1);
618  SET_H_PSR_PIL   ((newval >>  3) & 0xf);
619  SET_H_PSR_PS    ((newval >>  1) & 1);
620  SET_H_PSR_ET    ((newval      ) & 1);
621}
622
623void
624frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
625{
626  /* If switching from user to supervisor mode, or vice-versa, then switch
627     the supervisor/user context.  */
628  int psr_s = GET_H_PSR_S ();
629  if (psr_s != (newval & 1))
630    {
631      frvbf_switch_supervisor_user_context (current_cpu);
632      CPU (h_psr_s) = newval & 1;
633    }
634}
635
636/* Cover fns to access the ccr bits.  */
637USI
638spr_ccr_get_handler (SIM_CPU *current_cpu)
639{
640  int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
641            ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
642            ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
643            ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
644            ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
645            ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) <<  8) |
646            ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) <<  4) |
647            ((GET_H_FCCR (H_FCCR_FCC0) & 0xf)      );
648
649  return ccr;
650}
651
652void
653spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
654{
655  int ccr = newval;
656
657  SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
658  SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
659  SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
660  SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
661  SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
662  SET_H_FCCR (H_FCCR_FCC2, (newval >>  8) & 0xf);
663  SET_H_FCCR (H_FCCR_FCC1, (newval >>  4) & 0xf);
664  SET_H_FCCR (H_FCCR_FCC0, (newval      ) & 0xf);
665}
666
667QI
668frvbf_set_icc_for_shift_right (
669  SIM_CPU *current_cpu, SI value, SI shift, QI icc
670)
671{
672  /* Set the C flag of the given icc to the logical OR of the bits shifted
673     out.  */
674  int mask = (1 << shift) - 1;
675  if ((value & mask) != 0)
676    return icc | 0x1;
677
678  return icc & 0xe;
679}
680
681QI
682frvbf_set_icc_for_shift_left (
683  SIM_CPU *current_cpu, SI value, SI shift, QI icc
684)
685{
686  /* Set the V flag of the given icc to the logical OR of the bits shifted
687     out.  */
688  int mask = ((1 << shift) - 1) << (32 - shift);
689  if ((value & mask) != 0)
690    return icc | 0x2;
691
692  return icc & 0xd;
693}
694
695/* Cover fns to access the cccr bits.  */
696USI
697spr_cccr_get_handler (SIM_CPU *current_cpu)
698{
699  int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
700             ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
701             ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
702             ((GET_H_CCCR (H_CCCR_CC4) & 0x3) <<  8) |
703             ((GET_H_CCCR (H_CCCR_CC3) & 0x3) <<  6) |
704             ((GET_H_CCCR (H_CCCR_CC2) & 0x3) <<  4) |
705             ((GET_H_CCCR (H_CCCR_CC1) & 0x3) <<  2) |
706             ((GET_H_CCCR (H_CCCR_CC0) & 0x3)      );
707
708  return cccr;
709}
710
711void
712spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
713{
714  int cccr = newval;
715
716  SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
717  SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
718  SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
719  SET_H_CCCR (H_CCCR_CC4, (newval >>  8) & 0x3);
720  SET_H_CCCR (H_CCCR_CC3, (newval >>  6) & 0x3);
721  SET_H_CCCR (H_CCCR_CC2, (newval >>  4) & 0x3);
722  SET_H_CCCR (H_CCCR_CC1, (newval >>  2) & 0x3);
723  SET_H_CCCR (H_CCCR_CC0, (newval      ) & 0x3);
724}
725
726/* Cover fns to access the sr bits.  */
727USI
728spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
729{
730  /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
731     otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3.  */
732  int psr_esr = GET_H_PSR_ESR ();
733  if (! psr_esr)
734    return GET_H_GR (4 + (spr - H_SPR_SR0));
735
736  return CPU (h_spr[spr]);
737}
738
739void
740spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
741{
742  /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
743     otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3.  */
744  int psr_esr = GET_H_PSR_ESR ();
745  if (! psr_esr)
746    SET_H_GR (4 + (spr - H_SPR_SR0), newval);
747  else
748    CPU (h_spr[spr]) = newval;
749}
750
751/* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set.  */
752void
753frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
754{
755  if (GET_H_PSR_ESR ())
756    {
757      /* We need to be in supervisor mode to swap the registers. Access the
758	 PSR.S directly in order to avoid recursive context switches.  */
759      int i;
760      int save_psr_s = CPU (h_psr_s);
761      CPU (h_psr_s) = 1;
762      for (i = 0; i < 4; ++i)
763	{
764	  int gr = i + 4;
765	  int spr = i + H_SPR_SR0;
766	  SI tmp = GET_H_SPR (spr);
767	  SET_H_SPR (spr, GET_H_GR (gr));
768	  SET_H_GR (gr, tmp);
769	}
770      CPU (h_psr_s) = save_psr_s;
771    }
772}
773
774/* Handle load/store of quad registers.  */
775void
776frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
777{
778  int i;
779  SI value[4];
780
781  /* Check memory alignment */
782  address = check_memory_alignment (current_cpu, address, 0xf);
783
784  /* If we need to count cycles, then the cache operation will be
785     initiated from the model profiling functions.
786     See frvbf_model_....  */
787  if (model_insn)
788    {
789      CPU_LOAD_ADDRESS (current_cpu) = address;
790      CPU_LOAD_LENGTH (current_cpu) = 16;
791    }
792  else
793    {
794      for (i = 0; i < 4; ++i)
795	{
796	  value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
797	  address += 4;
798	}
799      sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
800			     value);
801    }
802}
803
804void
805frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
806{
807  int i;
808  SI value[4];
809  USI hsr0;
810
811  /* Check register and memory alignment.  */
812  src_ix = check_register_alignment (current_cpu, src_ix, 3);
813  address = check_memory_alignment (current_cpu, address, 0xf);
814
815  for (i = 0; i < 4; ++i)
816    {
817      /* GR0 is always 0.  */
818      if (src_ix == 0)
819	value[i] = 0;
820      else
821	value[i] = GET_H_GR (src_ix + i);
822    }
823  hsr0 = GET_HSR0 ();
824  if (GET_HSR0_DCE (hsr0))
825    sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
826  else
827    sim_queue_mem_xi_write (current_cpu, address, value);
828}
829
830void
831frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
832{
833  int i;
834  SI value[4];
835
836  /* Check memory alignment */
837  address = check_memory_alignment (current_cpu, address, 0xf);
838
839  /* If we need to count cycles, then the cache operation will be
840     initiated from the model profiling functions.
841     See frvbf_model_....  */
842  if (model_insn)
843    {
844      CPU_LOAD_ADDRESS (current_cpu) = address;
845      CPU_LOAD_LENGTH (current_cpu) = 16;
846    }
847  else
848    {
849      for (i = 0; i < 4; ++i)
850	{
851	  value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
852	  address += 4;
853	}
854      sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
855			     value);
856    }
857}
858
859void
860frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
861{
862  int i;
863  SI value[4];
864  USI hsr0;
865
866  /* Check register and memory alignment.  */
867  src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
868  address = check_memory_alignment (current_cpu, address, 0xf);
869
870  for (i = 0; i < 4; ++i)
871    value[i] = GET_H_FR (src_ix + i);
872
873  hsr0 = GET_HSR0 ();
874  if (GET_HSR0_DCE (hsr0))
875    sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
876  else
877    sim_queue_mem_xi_write (current_cpu, address, value);
878}
879
880void
881frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
882{
883  int i;
884  SI value[4];
885
886  /* Check memory alignment */
887  address = check_memory_alignment (current_cpu, address, 0xf);
888
889  /* If we need to count cycles, then the cache operation will be
890     initiated from the model profiling functions.
891     See frvbf_model_....  */
892  if (model_insn)
893    {
894      CPU_LOAD_ADDRESS (current_cpu) = address;
895      CPU_LOAD_LENGTH (current_cpu) = 16;
896    }
897  else
898    {
899      for (i = 0; i < 4; ++i)
900	{
901	  value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
902	  address += 4;
903	}
904      sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
905			     value);
906    }
907}
908
909void
910frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
911{
912  int i;
913  SI value[4];
914  USI hsr0;
915
916  /* Check register and memory alignment.  */
917  src_ix = check_register_alignment (current_cpu, src_ix, 3);
918  address = check_memory_alignment (current_cpu, address, 0xf);
919
920  for (i = 0; i < 4; ++i)
921    value[i] = GET_H_CPR (src_ix + i);
922
923  hsr0 = GET_HSR0 ();
924  if (GET_HSR0_DCE (hsr0))
925    sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
926  else
927    sim_queue_mem_xi_write (current_cpu, address, value);
928}
929
930void
931frvbf_signed_integer_divide (
932  SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
933)
934{
935  enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
936  if (arg1 == 0x80000000 && arg2 == -1)
937    {
938      /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
939	 otherwise it may result in 0x7fffffff (sparc compatibility) or
940	 0x80000000 (C language compatibility). */
941      USI isr;
942      dtt = FRV_DTT_OVERFLOW;
943
944      isr = GET_ISR ();
945      if (GET_ISR_EDE (isr))
946	sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
947			       0x7fffffff);
948      else
949	sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
950			       0x80000000);
951      frvbf_force_update (current_cpu); /* Force update of target register.  */
952    }
953  else if (arg2 == 0)
954    dtt = FRV_DTT_DIVISION_BY_ZERO;
955  else
956    sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
957			   arg1 / arg2);
958
959  /* Check for exceptions.  */
960  if (dtt != FRV_DTT_NO_EXCEPTION)
961    dtt = frvbf_division_exception (current_cpu, dtt, target_index,
962				    non_excepting);
963  if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
964    {
965      /* Non excepting instruction. Clear the NE flag for the target
966	 register.  */
967      SI NE_flags[2];
968      GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
969      CLEAR_NE_FLAG (NE_flags, target_index);
970      SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
971    }
972}
973
974void
975frvbf_unsigned_integer_divide (
976  SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
977)
978{
979  if (arg2 == 0)
980    frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
981			      target_index, non_excepting);
982  else
983    {
984      sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
985			     arg1 / arg2);
986      if (non_excepting)
987	{
988	  /* Non excepting instruction. Clear the NE flag for the target
989	     register.  */
990	  SI NE_flags[2];
991	  GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
992	  CLEAR_NE_FLAG (NE_flags, target_index);
993	  SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
994	}
995    }
996}
997
998/* Clear accumulators.  */
999void
1000frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
1001{
1002  SIM_DESC sd = CPU_STATE (current_cpu);
1003  int acc_mask =
1004    (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
1005    (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
1006    (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
1007    (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
1008    63;
1009  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1010
1011  ps->mclracc_acc = acc_ix;
1012  ps->mclracc_A   = A;
1013  if (A == 0 || acc_ix != 0) /* Clear 1 accumuator?  */
1014    {
1015      /* This instruction is a nop if the referenced accumulator is not
1016	 implemented. */
1017      if ((acc_ix & acc_mask) == acc_ix)
1018	sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1019    }
1020  else
1021    {
1022      /* Clear all implemented accumulators.  */
1023      int i;
1024      for (i = 0; i <= acc_mask; ++i)
1025	if ((i & acc_mask) == i)
1026	  sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1027    }
1028}
1029
1030/* Functions to aid insn semantics.  */
1031
1032/* Compute the result of the SCAN and SCANI insns after the shift and xor.  */
1033SI
1034frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1035{
1036  SI i;
1037  SI mask;
1038
1039  if (value == 0)
1040    return 63;
1041
1042  /* Find the position of the first non-zero bit.
1043     The loop will terminate since there is guaranteed to be at least one
1044     non-zero bit.  */
1045  mask = 1 << (sizeof (mask) * 8 - 1);
1046  for (i = 0; (value & mask) == 0; ++i)
1047    value <<= 1;
1048
1049  return i;
1050}
1051
1052/* Compute the result of the cut insns.  */
1053SI
1054frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1055{
1056  SI result;
1057  cut_point &= 0x3f;
1058  if (cut_point < 32)
1059    {
1060      result = reg1 << cut_point;
1061      result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1062    }
1063  else
1064    result = reg2 << (cut_point - 32);
1065
1066  return result;
1067}
1068
1069/* Compute the result of the cut insns.  */
1070SI
1071frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1072{
1073  /* The cut point is the lower 6 bits (signed) of what we are passed.  */
1074  cut_point = cut_point << 26 >> 26;
1075
1076  /* The cut_point is relative to bit 40 of 64 bits.  */
1077  if (cut_point >= 0)
1078    return (acc << (cut_point + 24)) >> 32;
1079
1080  /* Extend the sign bit (bit 40) for negative cuts.  */
1081  if (cut_point == -32)
1082    return (acc << 24) >> 63; /* Special case for full shiftout.  */
1083
1084  return (acc << 24) >> (32 + -cut_point);
1085}
1086
1087/* Compute the result of the cut insns.  */
1088SI
1089frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1090{
1091  /* The cut point is the lower 6 bits (signed) of what we are passed.  */
1092  cut_point = cut_point << 26 >> 26;
1093
1094  if (cut_point >= 0)
1095    {
1096      /* The cut_point is relative to bit 40 of 64 bits.  */
1097      DI shifted = acc << (cut_point + 24);
1098      DI unshifted = shifted >> (cut_point + 24);
1099
1100      /* The result will be saturated if significant bits are shifted out.  */
1101      if (unshifted != acc)
1102	{
1103	  if (acc < 0)
1104	    return 0x80000000;
1105	  return 0x7fffffff;
1106	}
1107    }
1108
1109  /* The result will not be saturated, so use the code for the normal cut.  */
1110  return frvbf_media_cut (current_cpu, acc, cut_point);
1111}
1112
1113/* Compute the result of int accumulator cut (SCUTSS).  */
1114SI
1115frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1116{
1117  DI lower, upper;
1118
1119  /* The cut point is the lower 7 bits (signed) of what we are passed.  */
1120  cut_point = cut_point << 25 >> 25;
1121
1122  /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1123     The top bit of the return value corresponds to bit (63 - CUT_POINT)
1124     of this 128-bit value.
1125
1126     Since we can't deal with 128-bit values very easily, convert the
1127     operation into an equivalent 64-bit one.  */
1128  if (cut_point < 0)
1129    {
1130      /* Avoid an undefined shift operation.  */
1131      if (cut_point == -64)
1132	acc >>= 63;
1133      else
1134	acc >>= -cut_point;
1135      cut_point = 0;
1136    }
1137
1138  /* Get the shifted but unsaturated result.  Set LOWER to the lowest
1139     32 bits of the result and UPPER to the result >> 31.  */
1140  if (cut_point < 32)
1141    {
1142      /* The cut loses the (32 - CUT_POINT) least significant bits.
1143	 Round the result up if the most significant of these lost bits
1144	 is 1.  */
1145      lower = acc >> (32 - cut_point);
1146      if (lower < 0x7fffffff)
1147	if (acc & LSBIT64 (32 - cut_point - 1))
1148	  lower++;
1149      upper = lower >> 31;
1150    }
1151  else
1152    {
1153      lower = acc << (cut_point - 32);
1154      upper = acc >> (63 - cut_point);
1155    }
1156
1157  /* Saturate the result.  */
1158  if (upper < -1)
1159    return ~0x7fffffff;
1160  else if (upper > 0)
1161    return 0x7fffffff;
1162  else
1163    return lower;
1164}
1165
1166/* Compute the result of shift-left-arithmetic-with-saturation (SLASS).  */
1167SI
1168frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1169{
1170  int neg_arg1;
1171
1172  /* FIXME: what to do with negative shift amt?  */
1173  if (arg2 <= 0)
1174    return arg1;
1175
1176  if (arg1 == 0)
1177    return 0;
1178
1179  /* Signed shift by 31 or greater saturates by definition.  */
1180  if (arg2 >= 31)
1181    if (arg1 > 0)
1182      return (SI) 0x7fffffff;
1183    else
1184      return (SI) 0x80000000;
1185
1186  /* OK, arg2 is between 1 and 31.  */
1187  neg_arg1 = (arg1 < 0);
1188  do {
1189    arg1 <<= 1;
1190    /* Check for sign bit change (saturation).  */
1191    if (neg_arg1 && (arg1 >= 0))
1192      return (SI) 0x80000000;
1193    else if (!neg_arg1 && (arg1 < 0))
1194      return (SI) 0x7fffffff;
1195  } while (--arg2 > 0);
1196
1197  return arg1;
1198}
1199
1200/* Simulate the media custom insns.  */
1201void
1202frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1203{
1204  /* The semantics of the insn are a nop, since it is implementation defined.
1205     We do need to check whether it's implemented and set up for MTRAP
1206     if it's not.  */
1207  USI msr0 = GET_MSR (0);
1208  if (GET_MSR_EMCI (msr0) == 0)
1209    {
1210      /* no interrupt queued at this time.  */
1211      frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1212    }
1213}
1214
1215/* Simulate the media average (MAVEH) insn.  */
1216static HI
1217do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1218{
1219  SIM_DESC sd = CPU_STATE (current_cpu);
1220  SI sum = (arg1 + arg2);
1221  HI result = sum >> 1;
1222  int rounding_value;
1223
1224  /* On fr4xx and fr550, check the rounding mode.  On other machines
1225     rounding is always toward negative infinity and the result is
1226     already correctly rounded.  */
1227  switch (STATE_ARCHITECTURE (sd)->mach)
1228    {
1229      /* Need to check rounding mode. */
1230    case bfd_mach_fr400:
1231    case bfd_mach_fr450:
1232    case bfd_mach_fr550:
1233      /* Check whether rounding will be required.  Rounding will be required
1234	 if the sum is an odd number.  */
1235      rounding_value = sum & 1;
1236      if (rounding_value)
1237	{
1238	  USI msr0 = GET_MSR (0);
1239	  /* Check MSR0.SRDAV to determine which bits control the rounding.  */
1240	  if (GET_MSR_SRDAV (msr0))
1241	    {
1242	      /* MSR0.RD controls rounding.  */
1243	      switch (GET_MSR_RD (msr0))
1244		{
1245		case 0:
1246		  /* Round to nearest.  */
1247		  if (result >= 0)
1248		    ++result;
1249		  break;
1250		case 1:
1251		  /* Round toward 0. */
1252		  if (result < 0)
1253		    ++result;
1254		  break;
1255		case 2:
1256		  /* Round toward positive infinity.  */
1257		  ++result;
1258		  break;
1259		case 3:
1260		  /* Round toward negative infinity.  The result is already
1261		     correctly rounded.  */
1262		  break;
1263		default:
1264		  abort ();
1265		  break;
1266		}
1267	    }
1268	  else
1269	    {
1270	      /* MSR0.RDAV controls rounding.  If set, round toward positive
1271		 infinity.  Otherwise the result is already rounded correctly
1272		 toward negative infinity.  */
1273	      if (GET_MSR_RDAV (msr0))
1274		++result;
1275	    }
1276	}
1277      break;
1278    default:
1279      break;
1280    }
1281
1282  return result;
1283}
1284
1285SI
1286frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1287{
1288  SI result;
1289  result  = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1290  result &= 0xffff;
1291  result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1292			      (reg2 >> 16) & 0xffff) << 16;
1293  return result;
1294}
1295
1296/* Maintain a flag in order to know when to write the address of the next
1297   VLIW instruction into the LR register.  Used by JMPL. JMPIL, and CALL.  */
1298void
1299frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1300{
1301  frvbf_write_next_vliw_addr_to_LR = value;
1302}
1303
1304void
1305frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1306{
1307  USI NE_flags[2];
1308
1309  /* Save the target register so interrupt processing can set its NE flag
1310     in the event of an exception.  */
1311  frv_interrupt_state.ne_index = index;
1312
1313  /* Clear the NE flag of the target register. It will be reset if necessary
1314     in the event of an exception.  */
1315  GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1316  CLEAR_NE_FLAG (NE_flags, index);
1317  SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1318}
1319
1320void
1321frvbf_force_update (SIM_CPU *current_cpu)
1322{
1323  CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1324  int ix = CGEN_WRITE_QUEUE_INDEX (q);
1325  if (ix > 0)
1326    {
1327      CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1328      item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1329    }
1330}
1331
1332/* Condition code logic.  */
1333enum cr_ops {
1334  andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1335  num_cr_ops
1336};
1337
1338enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1339
1340static enum cr_result
1341cr_logic[num_cr_ops][4][4] = {
1342  /* andcr */
1343  {
1344    /*                undefined     undefined       false         true */
1345    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1346    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1347    /* false     */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1348    /* true      */ {cr_undefined, cr_undefined, cr_false,     cr_true     }
1349  },
1350  /* orcr */
1351  {
1352    /*                undefined     undefined       false         true */
1353    /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
1354    /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
1355    /* false     */ {cr_false,     cr_false,     cr_false,     cr_true     },
1356    /* true      */ {cr_true,      cr_true,      cr_true,      cr_true     }
1357  },
1358  /* xorcr */
1359  {
1360    /*                undefined     undefined       false         true */
1361    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1362    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1363    /* false     */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
1364    /* true      */ {cr_true,      cr_true,      cr_true,      cr_false    }
1365  },
1366  /* nandcr */
1367  {
1368    /*                undefined     undefined       false         true */
1369    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1370    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1371    /* false     */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1372    /* true      */ {cr_undefined, cr_undefined, cr_true,      cr_false    }
1373  },
1374  /* norcr */
1375  {
1376    /*                undefined     undefined       false         true */
1377    /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
1378    /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
1379    /* false     */ {cr_true,      cr_true,      cr_true,      cr_false    },
1380    /* true      */ {cr_false,     cr_false,     cr_false,     cr_false    }
1381  },
1382  /* andncr */
1383  {
1384    /*                undefined     undefined       false         true */
1385    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1386    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1387    /* false     */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
1388    /* true      */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1389  },
1390  /* orncr */
1391  {
1392    /*                undefined     undefined       false         true */
1393    /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
1394    /* undefined */ {cr_undefined, cr_undefined, cr_false,     cr_true     },
1395    /* false     */ {cr_true,      cr_true,      cr_true,      cr_true     },
1396    /* true      */ {cr_false,     cr_false,     cr_false,     cr_true     }
1397  },
1398  /* nandncr */
1399  {
1400    /*                undefined     undefined       false         true */
1401    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1402    /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1403    /* false     */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
1404    /* true      */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1405  },
1406  /* norncr */
1407  {
1408    /*                undefined     undefined       false         true */
1409    /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
1410    /* undefined */ {cr_undefined, cr_undefined, cr_true,      cr_false    },
1411    /* false     */ {cr_false,     cr_false,     cr_false,     cr_false    },
1412    /* true      */ {cr_true,      cr_true,      cr_true,      cr_false    }
1413  }
1414};
1415
1416UQI
1417frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1418{
1419  return cr_logic[operation][arg1][arg2];
1420}
1421
1422/* Cache Manipulation.  */
1423void
1424frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1425{
1426  /* If we need to count cycles, then the cache operation will be
1427     initiated from the model profiling functions.
1428     See frvbf_model_....  */
1429  int hsr0 = GET_HSR0 ();
1430  if (GET_HSR0_ICE (hsr0))
1431    {
1432      if (model_insn)
1433	{
1434	  CPU_LOAD_ADDRESS (current_cpu) = address;
1435	  CPU_LOAD_LENGTH (current_cpu) = length;
1436	  CPU_LOAD_LOCK (current_cpu) = lock;
1437	}
1438      else
1439	{
1440	  FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1441	  frv_cache_preload (cache, address, length, lock);
1442	}
1443    }
1444}
1445
1446void
1447frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1448{
1449  /* If we need to count cycles, then the cache operation will be
1450     initiated from the model profiling functions.
1451     See frvbf_model_....  */
1452  int hsr0 = GET_HSR0 ();
1453  if (GET_HSR0_DCE (hsr0))
1454    {
1455      if (model_insn)
1456	{
1457	  CPU_LOAD_ADDRESS (current_cpu) = address;
1458	  CPU_LOAD_LENGTH (current_cpu) = length;
1459	  CPU_LOAD_LOCK (current_cpu) = lock;
1460	}
1461      else
1462	{
1463	  FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1464	  frv_cache_preload (cache, address, length, lock);
1465	}
1466    }
1467}
1468
1469void
1470frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1471{
1472  /* If we need to count cycles, then the cache operation will be
1473     initiated from the model profiling functions.
1474     See frvbf_model_....  */
1475  int hsr0 = GET_HSR0 ();
1476  if (GET_HSR0_ICE (hsr0))
1477    {
1478      if (model_insn)
1479	CPU_LOAD_ADDRESS (current_cpu) = address;
1480      else
1481	{
1482	  FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1483	  frv_cache_unlock (cache, address);
1484	}
1485    }
1486}
1487
1488void
1489frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1490{
1491  /* If we need to count cycles, then the cache operation will be
1492     initiated from the model profiling functions.
1493     See frvbf_model_....  */
1494  int hsr0 = GET_HSR0 ();
1495  if (GET_HSR0_DCE (hsr0))
1496    {
1497      if (model_insn)
1498	CPU_LOAD_ADDRESS (current_cpu) = address;
1499      else
1500	{
1501	  FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1502	  frv_cache_unlock (cache, address);
1503	}
1504    }
1505}
1506
1507void
1508frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1509{
1510  /* Make sure the insn was specified properly.  -1 will be passed for ALL
1511     for a icei with A=0.  */
1512  if (all == -1)
1513    {
1514      frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1515      return;
1516    }
1517
1518  /* If we need to count cycles, then the cache operation will be
1519     initiated from the model profiling functions.
1520     See frvbf_model_....  */
1521  if (model_insn)
1522    {
1523      /* Record the all-entries flag for use in profiling.  */
1524      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1525      ps->all_cache_entries = all;
1526      CPU_LOAD_ADDRESS (current_cpu) = address;
1527    }
1528  else
1529    {
1530      FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1531      if (all)
1532	frv_cache_invalidate_all (cache, 0/* flush? */);
1533      else
1534	frv_cache_invalidate (cache, address, 0/* flush? */);
1535    }
1536}
1537
1538void
1539frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1540{
1541  /* Make sure the insn was specified properly.  -1 will be passed for ALL
1542     for a dcei with A=0.  */
1543  if (all == -1)
1544    {
1545      frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1546      return;
1547    }
1548
1549  /* If we need to count cycles, then the cache operation will be
1550     initiated from the model profiling functions.
1551     See frvbf_model_....  */
1552  if (model_insn)
1553    {
1554      /* Record the all-entries flag for use in profiling.  */
1555      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1556      ps->all_cache_entries = all;
1557      CPU_LOAD_ADDRESS (current_cpu) = address;
1558    }
1559  else
1560    {
1561      FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1562      if (all)
1563	frv_cache_invalidate_all (cache, 0/* flush? */);
1564      else
1565	frv_cache_invalidate (cache, address, 0/* flush? */);
1566    }
1567}
1568
1569void
1570frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1571{
1572  /* Make sure the insn was specified properly.  -1 will be passed for ALL
1573     for a dcef with A=0.  */
1574  if (all == -1)
1575    {
1576      frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1577      return;
1578    }
1579
1580  /* If we need to count cycles, then the cache operation will be
1581     initiated from the model profiling functions.
1582     See frvbf_model_....  */
1583  if (model_insn)
1584    {
1585      /* Record the all-entries flag for use in profiling.  */
1586      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1587      ps->all_cache_entries = all;
1588      CPU_LOAD_ADDRESS (current_cpu) = address;
1589    }
1590  else
1591    {
1592      FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1593      if (all)
1594	frv_cache_invalidate_all (cache, 1/* flush? */);
1595      else
1596	frv_cache_invalidate (cache, address, 1/* flush? */);
1597    }
1598}
1599