1/* frv simulator support code 2 Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2007 3 Free Software Foundation, Inc. 4 Contributed by Red Hat. 5 6This file is part of the GNU simulators. 7 8This program is free software; you can redistribute it and/or modify 9it under the terms of the GNU General Public License as published by 10the Free Software Foundation; either version 3 of the License, or 11(at your option) any later version. 12 13This program is distributed in the hope that it will be useful, 14but WITHOUT ANY WARRANTY; without even the implied warranty of 15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16GNU General Public License for more details. 17 18You should have received a copy of the GNU General Public License 19along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21#define WANT_CPU 22#define WANT_CPU_FRVBF 23 24#include "sim-main.h" 25#include "cgen-mem.h" 26#include "cgen-ops.h" 27#include "cgen-engine.h" 28#include "cgen-par.h" 29#include "bfd.h" 30#include "gdb/sim-frv.h" 31#include <math.h> 32 33/* Maintain a flag in order to know when to write the address of the next 34 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL 35 insns. */ 36int frvbf_write_next_vliw_addr_to_LR; 37 38/* The contents of BUF are in target byte order. */ 39int 40frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len) 41{ 42 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM) 43 { 44 int hi_available, lo_available; 45 int grn = rn - SIM_FRV_GR0_REGNUM; 46 47 frv_gr_registers_available (current_cpu, &hi_available, &lo_available); 48 49 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available)) 50 return 0; 51 else 52 SETTSI (buf, GET_H_GR (grn)); 53 } 54 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM) 55 { 56 int hi_available, lo_available; 57 int frn = rn - SIM_FRV_FR0_REGNUM; 58 59 frv_fr_registers_available (current_cpu, &hi_available, &lo_available); 60 61 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available)) 62 return 0; 63 else 64 SETTSI (buf, GET_H_FR (frn)); 65 } 66 else if (rn == SIM_FRV_PC_REGNUM) 67 SETTSI (buf, GET_H_PC ()); 68 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM) 69 { 70 /* Make sure the register is implemented. */ 71 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu); 72 int spr = rn - SIM_FRV_SPR0_REGNUM; 73 if (! control->spr[spr].implemented) 74 return 0; 75 SETTSI (buf, GET_H_SPR (spr)); 76 } 77 else 78 { 79 SETTSI (buf, 0xdeadbeef); 80 return 0; 81 } 82 83 return len; 84} 85 86/* The contents of BUF are in target byte order. */ 87 88int 89frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len) 90{ 91 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM) 92 { 93 int hi_available, lo_available; 94 int grn = rn - SIM_FRV_GR0_REGNUM; 95 96 frv_gr_registers_available (current_cpu, &hi_available, &lo_available); 97 98 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available)) 99 return 0; 100 else 101 SET_H_GR (grn, GETTSI (buf)); 102 } 103 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM) 104 { 105 int hi_available, lo_available; 106 int frn = rn - SIM_FRV_FR0_REGNUM; 107 108 frv_fr_registers_available (current_cpu, &hi_available, &lo_available); 109 110 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available)) 111 return 0; 112 else 113 SET_H_FR (frn, GETTSI (buf)); 114 } 115 else if (rn == SIM_FRV_PC_REGNUM) 116 SET_H_PC (GETTSI (buf)); 117 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM) 118 { 119 /* Make sure the register is implemented. */ 120 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu); 121 int spr = rn - SIM_FRV_SPR0_REGNUM; 122 if (! control->spr[spr].implemented) 123 return 0; 124 SET_H_SPR (spr, GETTSI (buf)); 125 } 126 else 127 return 0; 128 129 return len; 130} 131 132/* Cover fns to access the general registers. */ 133USI 134frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr) 135{ 136 frv_check_gr_access (current_cpu, gr); 137 return CPU (h_gr[gr]); 138} 139 140void 141frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval) 142{ 143 frv_check_gr_access (current_cpu, gr); 144 145 if (gr == 0) 146 return; /* Storing into gr0 has no effect. */ 147 148 CPU (h_gr[gr]) = newval; 149} 150 151/* Cover fns to access the floating point registers. */ 152SF 153frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr) 154{ 155 frv_check_fr_access (current_cpu, fr); 156 return CPU (h_fr[fr]); 157} 158 159void 160frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval) 161{ 162 frv_check_fr_access (current_cpu, fr); 163 CPU (h_fr[fr]) = newval; 164} 165 166/* Cover fns to access the general registers as double words. */ 167static UINT 168check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask) 169{ 170 if (reg & align_mask) 171 { 172 SIM_DESC sd = CPU_STATE (current_cpu); 173 switch (STATE_ARCHITECTURE (sd)->mach) 174 { 175 /* Note: there is a discrepancy between V2.2 of the FR400 176 instruction manual and the various FR4xx LSI specs. 177 The former claims that unaligned registers cause a 178 register_exception while the latter say it's an 179 illegal_instruction. The LSI specs appear to be 180 correct; in fact, the FR4xx series is not documented 181 as having a register_exception. */ 182 case bfd_mach_fr400: 183 case bfd_mach_fr450: 184 case bfd_mach_fr550: 185 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION); 186 break; 187 case bfd_mach_frvtomcat: 188 case bfd_mach_fr500: 189 case bfd_mach_frv: 190 frv_queue_register_exception_interrupt (current_cpu, 191 FRV_REC_UNALIGNED); 192 break; 193 default: 194 break; 195 } 196 197 reg &= ~align_mask; 198 } 199 200 return reg; 201} 202 203static UINT 204check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask) 205{ 206 if (reg & align_mask) 207 { 208 SIM_DESC sd = CPU_STATE (current_cpu); 209 switch (STATE_ARCHITECTURE (sd)->mach) 210 { 211 /* See comment in check_register_alignment(). */ 212 case bfd_mach_fr400: 213 case bfd_mach_fr450: 214 case bfd_mach_fr550: 215 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION); 216 break; 217 case bfd_mach_frvtomcat: 218 case bfd_mach_fr500: 219 case bfd_mach_frv: 220 { 221 struct frv_fp_exception_info fp_info = { 222 FSR_NO_EXCEPTION, FTT_INVALID_FR 223 }; 224 frv_queue_fp_exception_interrupt (current_cpu, & fp_info); 225 } 226 break; 227 default: 228 break; 229 } 230 231 reg &= ~align_mask; 232 } 233 234 return reg; 235} 236 237static UINT 238check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask) 239{ 240 if (address & align_mask) 241 { 242 SIM_DESC sd = CPU_STATE (current_cpu); 243 switch (STATE_ARCHITECTURE (sd)->mach) 244 { 245 /* See comment in check_register_alignment(). */ 246 case bfd_mach_fr400: 247 case bfd_mach_fr450: 248 frv_queue_data_access_error_interrupt (current_cpu, address); 249 break; 250 case bfd_mach_frvtomcat: 251 case bfd_mach_fr500: 252 case bfd_mach_frv: 253 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address); 254 break; 255 default: 256 break; 257 } 258 259 address &= ~align_mask; 260 } 261 262 return address; 263} 264 265DI 266frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr) 267{ 268 DI value; 269 270 if (gr == 0) 271 return 0; /* gr0 is always 0. */ 272 273 /* Check the register alignment. */ 274 gr = check_register_alignment (current_cpu, gr, 1); 275 276 value = GET_H_GR (gr); 277 value <<= 32; 278 value |= (USI) GET_H_GR (gr + 1); 279 return value; 280} 281 282void 283frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval) 284{ 285 if (gr == 0) 286 return; /* Storing into gr0 has no effect. */ 287 288 /* Check the register alignment. */ 289 gr = check_register_alignment (current_cpu, gr, 1); 290 291 SET_H_GR (gr , (newval >> 32) & 0xffffffff); 292 SET_H_GR (gr + 1, (newval ) & 0xffffffff); 293} 294 295/* Cover fns to access the floating point register as double words. */ 296DF 297frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr) 298{ 299 union { 300 SF as_sf[2]; 301 DF as_df; 302 } value; 303 304 /* Check the register alignment. */ 305 fr = check_fr_register_alignment (current_cpu, fr, 1); 306 307 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN) 308 { 309 value.as_sf[1] = GET_H_FR (fr); 310 value.as_sf[0] = GET_H_FR (fr + 1); 311 } 312 else 313 { 314 value.as_sf[0] = GET_H_FR (fr); 315 value.as_sf[1] = GET_H_FR (fr + 1); 316 } 317 318 return value.as_df; 319} 320 321void 322frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval) 323{ 324 union { 325 SF as_sf[2]; 326 DF as_df; 327 } value; 328 329 /* Check the register alignment. */ 330 fr = check_fr_register_alignment (current_cpu, fr, 1); 331 332 value.as_df = newval; 333 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN) 334 { 335 SET_H_FR (fr , value.as_sf[1]); 336 SET_H_FR (fr + 1, value.as_sf[0]); 337 } 338 else 339 { 340 SET_H_FR (fr , value.as_sf[0]); 341 SET_H_FR (fr + 1, value.as_sf[1]); 342 } 343} 344 345/* Cover fns to access the floating point register as integer words. */ 346USI 347frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr) 348{ 349 union { 350 SF as_sf; 351 USI as_usi; 352 } value; 353 354 value.as_sf = GET_H_FR (fr); 355 return value.as_usi; 356} 357 358void 359frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval) 360{ 361 union { 362 SF as_sf; 363 USI as_usi; 364 } value; 365 366 value.as_usi = newval; 367 SET_H_FR (fr, value.as_sf); 368} 369 370/* Cover fns to access the coprocessor registers as double words. */ 371DI 372frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr) 373{ 374 DI value; 375 376 /* Check the register alignment. */ 377 cpr = check_register_alignment (current_cpu, cpr, 1); 378 379 value = GET_H_CPR (cpr); 380 value <<= 32; 381 value |= (USI) GET_H_CPR (cpr + 1); 382 return value; 383} 384 385void 386frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval) 387{ 388 /* Check the register alignment. */ 389 cpr = check_register_alignment (current_cpu, cpr, 1); 390 391 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff); 392 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff); 393} 394 395/* Cover fns to write registers as quad words. */ 396void 397frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval) 398{ 399 if (gr == 0) 400 return; /* Storing into gr0 has no effect. */ 401 402 /* Check the register alignment. */ 403 gr = check_register_alignment (current_cpu, gr, 3); 404 405 SET_H_GR (gr , newval[0]); 406 SET_H_GR (gr + 1, newval[1]); 407 SET_H_GR (gr + 2, newval[2]); 408 SET_H_GR (gr + 3, newval[3]); 409} 410 411void 412frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval) 413{ 414 /* Check the register alignment. */ 415 fr = check_fr_register_alignment (current_cpu, fr, 3); 416 417 SET_H_FR (fr , newval[0]); 418 SET_H_FR (fr + 1, newval[1]); 419 SET_H_FR (fr + 2, newval[2]); 420 SET_H_FR (fr + 3, newval[3]); 421} 422 423void 424frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval) 425{ 426 /* Check the register alignment. */ 427 cpr = check_register_alignment (current_cpu, cpr, 3); 428 429 SET_H_CPR (cpr , newval[0]); 430 SET_H_CPR (cpr + 1, newval[1]); 431 SET_H_CPR (cpr + 2, newval[2]); 432 SET_H_CPR (cpr + 3, newval[3]); 433} 434 435/* Cover fns to access the special purpose registers. */ 436USI 437frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr) 438{ 439 /* Check access restrictions. */ 440 frv_check_spr_read_access (current_cpu, spr); 441 442 switch (spr) 443 { 444 case H_SPR_PSR: 445 return spr_psr_get_handler (current_cpu); 446 case H_SPR_TBR: 447 return spr_tbr_get_handler (current_cpu); 448 case H_SPR_BPSR: 449 return spr_bpsr_get_handler (current_cpu); 450 case H_SPR_CCR: 451 return spr_ccr_get_handler (current_cpu); 452 case H_SPR_CCCR: 453 return spr_cccr_get_handler (current_cpu); 454 case H_SPR_SR0: 455 case H_SPR_SR1: 456 case H_SPR_SR2: 457 case H_SPR_SR3: 458 return spr_sr_get_handler (current_cpu, spr); 459 break; 460 default: 461 return CPU (h_spr[spr]); 462 } 463 return 0; 464} 465 466void 467frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval) 468{ 469 FRV_REGISTER_CONTROL *control; 470 USI mask; 471 USI oldval; 472 473 /* Check access restrictions. */ 474 frv_check_spr_write_access (current_cpu, spr); 475 476 /* Only set those fields which are writeable. */ 477 control = CPU_REGISTER_CONTROL (current_cpu); 478 mask = control->spr[spr].read_only_mask; 479 oldval = GET_H_SPR (spr); 480 481 newval = (newval & ~mask) | (oldval & mask); 482 483 /* Some registers are represented by individual components which are 484 referenced more often than the register itself. */ 485 switch (spr) 486 { 487 case H_SPR_PSR: 488 spr_psr_set_handler (current_cpu, newval); 489 break; 490 case H_SPR_TBR: 491 spr_tbr_set_handler (current_cpu, newval); 492 break; 493 case H_SPR_BPSR: 494 spr_bpsr_set_handler (current_cpu, newval); 495 break; 496 case H_SPR_CCR: 497 spr_ccr_set_handler (current_cpu, newval); 498 break; 499 case H_SPR_CCCR: 500 spr_cccr_set_handler (current_cpu, newval); 501 break; 502 case H_SPR_SR0: 503 case H_SPR_SR1: 504 case H_SPR_SR2: 505 case H_SPR_SR3: 506 spr_sr_set_handler (current_cpu, spr, newval); 507 break; 508 case H_SPR_IHSR8: 509 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu)); 510 break; 511 default: 512 CPU (h_spr[spr]) = newval; 513 break; 514 } 515} 516 517/* Cover fns to access the gr_hi and gr_lo registers. */ 518UHI 519frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr) 520{ 521 return (GET_H_GR(gr) >> 16) & 0xffff; 522} 523 524void 525frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval) 526{ 527 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16); 528 SET_H_GR (gr, value); 529} 530 531UHI 532frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr) 533{ 534 return GET_H_GR(gr) & 0xffff; 535} 536 537void 538frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval) 539{ 540 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff); 541 SET_H_GR (gr, value); 542} 543 544/* Cover fns to access the tbr bits. */ 545USI 546spr_tbr_get_handler (SIM_CPU *current_cpu) 547{ 548 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) | 549 ((GET_H_TBR_TT () & 0xff) << 4); 550 551 return tbr; 552} 553 554void 555spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval) 556{ 557 int tbr = newval; 558 559 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ; 560 SET_H_TBR_TT ((tbr >> 4) & 0xff) ; 561} 562 563/* Cover fns to access the bpsr bits. */ 564USI 565spr_bpsr_get_handler (SIM_CPU *current_cpu) 566{ 567 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) | 568 ((GET_H_BPSR_BET () & 0x1) ); 569 570 return bpsr; 571} 572 573void 574spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval) 575{ 576 int bpsr = newval; 577 578 SET_H_BPSR_BS ((bpsr >> 12) & 1); 579 SET_H_BPSR_BET ((bpsr ) & 1); 580} 581 582/* Cover fns to access the psr bits. */ 583USI 584spr_psr_get_handler (SIM_CPU *current_cpu) 585{ 586 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) | 587 ((GET_H_PSR_VER () & 0xf) << 24) | 588 ((GET_H_PSR_ICE () & 0x1) << 16) | 589 ((GET_H_PSR_NEM () & 0x1) << 14) | 590 ((GET_H_PSR_CM () & 0x1) << 13) | 591 ((GET_H_PSR_BE () & 0x1) << 12) | 592 ((GET_H_PSR_ESR () & 0x1) << 11) | 593 ((GET_H_PSR_EF () & 0x1) << 8) | 594 ((GET_H_PSR_EM () & 0x1) << 7) | 595 ((GET_H_PSR_PIL () & 0xf) << 3) | 596 ((GET_H_PSR_S () & 0x1) << 2) | 597 ((GET_H_PSR_PS () & 0x1) << 1) | 598 ((GET_H_PSR_ET () & 0x1) ); 599 600 return psr; 601} 602 603void 604spr_psr_set_handler (SIM_CPU *current_cpu, USI newval) 605{ 606 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S 607 first. */ 608 SET_H_PSR_S ((newval >> 2) & 1); 609 610 SET_H_PSR_IMPLE ((newval >> 28) & 0xf); 611 SET_H_PSR_VER ((newval >> 24) & 0xf); 612 SET_H_PSR_ICE ((newval >> 16) & 1); 613 SET_H_PSR_NEM ((newval >> 14) & 1); 614 SET_H_PSR_CM ((newval >> 13) & 1); 615 SET_H_PSR_BE ((newval >> 12) & 1); 616 SET_H_PSR_ESR ((newval >> 11) & 1); 617 SET_H_PSR_EF ((newval >> 8) & 1); 618 SET_H_PSR_EM ((newval >> 7) & 1); 619 SET_H_PSR_PIL ((newval >> 3) & 0xf); 620 SET_H_PSR_PS ((newval >> 1) & 1); 621 SET_H_PSR_ET ((newval ) & 1); 622} 623 624void 625frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval) 626{ 627 /* If switching from user to supervisor mode, or vice-versa, then switch 628 the supervisor/user context. */ 629 int psr_s = GET_H_PSR_S (); 630 if (psr_s != (newval & 1)) 631 { 632 frvbf_switch_supervisor_user_context (current_cpu); 633 CPU (h_psr_s) = newval & 1; 634 } 635} 636 637/* Cover fns to access the ccr bits. */ 638USI 639spr_ccr_get_handler (SIM_CPU *current_cpu) 640{ 641 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) | 642 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) | 643 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) | 644 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) | 645 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) | 646 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) | 647 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) | 648 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) ); 649 650 return ccr; 651} 652 653void 654spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval) 655{ 656 int ccr = newval; 657 658 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf); 659 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf); 660 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf); 661 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf); 662 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf); 663 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf); 664 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf); 665 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf); 666} 667 668QI 669frvbf_set_icc_for_shift_right ( 670 SIM_CPU *current_cpu, SI value, SI shift, QI icc 671) 672{ 673 /* Set the C flag of the given icc to the logical OR of the bits shifted 674 out. */ 675 int mask = (1 << shift) - 1; 676 if ((value & mask) != 0) 677 return icc | 0x1; 678 679 return icc & 0xe; 680} 681 682QI 683frvbf_set_icc_for_shift_left ( 684 SIM_CPU *current_cpu, SI value, SI shift, QI icc 685) 686{ 687 /* Set the V flag of the given icc to the logical OR of the bits shifted 688 out. */ 689 int mask = ((1 << shift) - 1) << (32 - shift); 690 if ((value & mask) != 0) 691 return icc | 0x2; 692 693 return icc & 0xd; 694} 695 696/* Cover fns to access the cccr bits. */ 697USI 698spr_cccr_get_handler (SIM_CPU *current_cpu) 699{ 700 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) | 701 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) | 702 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) | 703 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) | 704 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) | 705 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) | 706 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) | 707 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) ); 708 709 return cccr; 710} 711 712void 713spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval) 714{ 715 int cccr = newval; 716 717 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3); 718 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3); 719 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3); 720 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3); 721 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3); 722 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3); 723 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3); 724 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3); 725} 726 727/* Cover fns to access the sr bits. */ 728USI 729spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr) 730{ 731 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7, 732 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */ 733 int psr_esr = GET_H_PSR_ESR (); 734 if (! psr_esr) 735 return GET_H_GR (4 + (spr - H_SPR_SR0)); 736 737 return CPU (h_spr[spr]); 738} 739 740void 741spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval) 742{ 743 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7, 744 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */ 745 int psr_esr = GET_H_PSR_ESR (); 746 if (! psr_esr) 747 SET_H_GR (4 + (spr - H_SPR_SR0), newval); 748 else 749 CPU (h_spr[spr]) = newval; 750} 751 752/* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */ 753void 754frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu) 755{ 756 if (GET_H_PSR_ESR ()) 757 { 758 /* We need to be in supervisor mode to swap the registers. Access the 759 PSR.S directly in order to avoid recursive context switches. */ 760 int i; 761 int save_psr_s = CPU (h_psr_s); 762 CPU (h_psr_s) = 1; 763 for (i = 0; i < 4; ++i) 764 { 765 int gr = i + 4; 766 int spr = i + H_SPR_SR0; 767 SI tmp = GET_H_SPR (spr); 768 SET_H_SPR (spr, GET_H_GR (gr)); 769 SET_H_GR (gr, tmp); 770 } 771 CPU (h_psr_s) = save_psr_s; 772 } 773} 774 775/* Handle load/store of quad registers. */ 776void 777frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix) 778{ 779 int i; 780 SI value[4]; 781 782 /* Check memory alignment */ 783 address = check_memory_alignment (current_cpu, address, 0xf); 784 785 /* If we need to count cycles, then the cache operation will be 786 initiated from the model profiling functions. 787 See frvbf_model_.... */ 788 if (model_insn) 789 { 790 CPU_LOAD_ADDRESS (current_cpu) = address; 791 CPU_LOAD_LENGTH (current_cpu) = 16; 792 } 793 else 794 { 795 for (i = 0; i < 4; ++i) 796 { 797 value[i] = frvbf_read_mem_SI (current_cpu, pc, address); 798 address += 4; 799 } 800 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix, 801 value); 802 } 803} 804 805void 806frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix) 807{ 808 int i; 809 SI value[4]; 810 USI hsr0; 811 812 /* Check register and memory alignment. */ 813 src_ix = check_register_alignment (current_cpu, src_ix, 3); 814 address = check_memory_alignment (current_cpu, address, 0xf); 815 816 for (i = 0; i < 4; ++i) 817 { 818 /* GR0 is always 0. */ 819 if (src_ix == 0) 820 value[i] = 0; 821 else 822 value[i] = GET_H_GR (src_ix + i); 823 } 824 hsr0 = GET_HSR0 (); 825 if (GET_HSR0_DCE (hsr0)) 826 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value); 827 else 828 sim_queue_mem_xi_write (current_cpu, address, value); 829} 830 831void 832frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix) 833{ 834 int i; 835 SI value[4]; 836 837 /* Check memory alignment */ 838 address = check_memory_alignment (current_cpu, address, 0xf); 839 840 /* If we need to count cycles, then the cache operation will be 841 initiated from the model profiling functions. 842 See frvbf_model_.... */ 843 if (model_insn) 844 { 845 CPU_LOAD_ADDRESS (current_cpu) = address; 846 CPU_LOAD_LENGTH (current_cpu) = 16; 847 } 848 else 849 { 850 for (i = 0; i < 4; ++i) 851 { 852 value[i] = frvbf_read_mem_SI (current_cpu, pc, address); 853 address += 4; 854 } 855 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix, 856 value); 857 } 858} 859 860void 861frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix) 862{ 863 int i; 864 SI value[4]; 865 USI hsr0; 866 867 /* Check register and memory alignment. */ 868 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3); 869 address = check_memory_alignment (current_cpu, address, 0xf); 870 871 for (i = 0; i < 4; ++i) 872 value[i] = GET_H_FR (src_ix + i); 873 874 hsr0 = GET_HSR0 (); 875 if (GET_HSR0_DCE (hsr0)) 876 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value); 877 else 878 sim_queue_mem_xi_write (current_cpu, address, value); 879} 880 881void 882frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix) 883{ 884 int i; 885 SI value[4]; 886 887 /* Check memory alignment */ 888 address = check_memory_alignment (current_cpu, address, 0xf); 889 890 /* If we need to count cycles, then the cache operation will be 891 initiated from the model profiling functions. 892 See frvbf_model_.... */ 893 if (model_insn) 894 { 895 CPU_LOAD_ADDRESS (current_cpu) = address; 896 CPU_LOAD_LENGTH (current_cpu) = 16; 897 } 898 else 899 { 900 for (i = 0; i < 4; ++i) 901 { 902 value[i] = frvbf_read_mem_SI (current_cpu, pc, address); 903 address += 4; 904 } 905 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix, 906 value); 907 } 908} 909 910void 911frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix) 912{ 913 int i; 914 SI value[4]; 915 USI hsr0; 916 917 /* Check register and memory alignment. */ 918 src_ix = check_register_alignment (current_cpu, src_ix, 3); 919 address = check_memory_alignment (current_cpu, address, 0xf); 920 921 for (i = 0; i < 4; ++i) 922 value[i] = GET_H_CPR (src_ix + i); 923 924 hsr0 = GET_HSR0 (); 925 if (GET_HSR0_DCE (hsr0)) 926 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value); 927 else 928 sim_queue_mem_xi_write (current_cpu, address, value); 929} 930 931void 932frvbf_signed_integer_divide ( 933 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting 934) 935{ 936 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION; 937 if (arg1 == 0x80000000 && arg2 == -1) 938 { 939 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set 940 otherwise it may result in 0x7fffffff (sparc compatibility) or 941 0x80000000 (C language compatibility). */ 942 USI isr; 943 dtt = FRV_DTT_OVERFLOW; 944 945 isr = GET_ISR (); 946 if (GET_ISR_EDE (isr)) 947 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, 948 0x7fffffff); 949 else 950 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, 951 0x80000000); 952 frvbf_force_update (current_cpu); /* Force update of target register. */ 953 } 954 else if (arg2 == 0) 955 dtt = FRV_DTT_DIVISION_BY_ZERO; 956 else 957 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, 958 arg1 / arg2); 959 960 /* Check for exceptions. */ 961 if (dtt != FRV_DTT_NO_EXCEPTION) 962 dtt = frvbf_division_exception (current_cpu, dtt, target_index, 963 non_excepting); 964 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION) 965 { 966 /* Non excepting instruction. Clear the NE flag for the target 967 register. */ 968 SI NE_flags[2]; 969 GET_NE_FLAGS (NE_flags, H_SPR_GNER0); 970 CLEAR_NE_FLAG (NE_flags, target_index); 971 SET_NE_FLAGS (H_SPR_GNER0, NE_flags); 972 } 973} 974 975void 976frvbf_unsigned_integer_divide ( 977 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting 978) 979{ 980 if (arg2 == 0) 981 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO, 982 target_index, non_excepting); 983 else 984 { 985 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, 986 arg1 / arg2); 987 if (non_excepting) 988 { 989 /* Non excepting instruction. Clear the NE flag for the target 990 register. */ 991 SI NE_flags[2]; 992 GET_NE_FLAGS (NE_flags, H_SPR_GNER0); 993 CLEAR_NE_FLAG (NE_flags, target_index); 994 SET_NE_FLAGS (H_SPR_GNER0, NE_flags); 995 } 996 } 997} 998 999/* Clear accumulators. */ 1000void 1001frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A) 1002{ 1003 SIM_DESC sd = CPU_STATE (current_cpu); 1004 int acc_mask = 1005 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 : 1006 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 : 1007 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 : 1008 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 : 1009 63; 1010 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); 1011 1012 ps->mclracc_acc = acc_ix; 1013 ps->mclracc_A = A; 1014 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */ 1015 { 1016 /* This instruction is a nop if the referenced accumulator is not 1017 implemented. */ 1018 if ((acc_ix & acc_mask) == acc_ix) 1019 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0); 1020 } 1021 else 1022 { 1023 /* Clear all implemented accumulators. */ 1024 int i; 1025 for (i = 0; i <= acc_mask; ++i) 1026 if ((i & acc_mask) == i) 1027 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0); 1028 } 1029} 1030 1031/* Functions to aid insn semantics. */ 1032 1033/* Compute the result of the SCAN and SCANI insns after the shift and xor. */ 1034SI 1035frvbf_scan_result (SIM_CPU *current_cpu, SI value) 1036{ 1037 SI i; 1038 SI mask; 1039 1040 if (value == 0) 1041 return 63; 1042 1043 /* Find the position of the first non-zero bit. 1044 The loop will terminate since there is guaranteed to be at least one 1045 non-zero bit. */ 1046 mask = 1 << (sizeof (mask) * 8 - 1); 1047 for (i = 0; (value & mask) == 0; ++i) 1048 value <<= 1; 1049 1050 return i; 1051} 1052 1053/* Compute the result of the cut insns. */ 1054SI 1055frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point) 1056{ 1057 SI result; 1058 if (cut_point < 32) 1059 { 1060 result = reg1 << cut_point; 1061 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1); 1062 } 1063 else 1064 result = reg2 << (cut_point - 32); 1065 1066 return result; 1067} 1068 1069/* Compute the result of the cut insns. */ 1070SI 1071frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point) 1072{ 1073 /* The cut point is the lower 6 bits (signed) of what we are passed. */ 1074 cut_point = cut_point << 26 >> 26; 1075 1076 /* The cut_point is relative to bit 40 of 64 bits. */ 1077 if (cut_point >= 0) 1078 return (acc << (cut_point + 24)) >> 32; 1079 1080 /* Extend the sign bit (bit 40) for negative cuts. */ 1081 if (cut_point == -32) 1082 return (acc << 24) >> 63; /* Special case for full shiftout. */ 1083 1084 return (acc << 24) >> (32 + -cut_point); 1085} 1086 1087/* Compute the result of the cut insns. */ 1088SI 1089frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point) 1090{ 1091 /* The cut point is the lower 6 bits (signed) of what we are passed. */ 1092 cut_point = cut_point << 26 >> 26; 1093 1094 if (cut_point >= 0) 1095 { 1096 /* The cut_point is relative to bit 40 of 64 bits. */ 1097 DI shifted = acc << (cut_point + 24); 1098 DI unshifted = shifted >> (cut_point + 24); 1099 1100 /* The result will be saturated if significant bits are shifted out. */ 1101 if (unshifted != acc) 1102 { 1103 if (acc < 0) 1104 return 0x80000000; 1105 return 0x7fffffff; 1106 } 1107 } 1108 1109 /* The result will not be saturated, so use the code for the normal cut. */ 1110 return frvbf_media_cut (current_cpu, acc, cut_point); 1111} 1112 1113/* Compute the result of int accumulator cut (SCUTSS). */ 1114SI 1115frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point) 1116{ 1117 DI lower, upper; 1118 1119 /* The cut point is the lower 7 bits (signed) of what we are passed. */ 1120 cut_point = cut_point << 25 >> 25; 1121 1122 /* Conceptually, the operation is on a 128-bit sign-extension of ACC. 1123 The top bit of the return value corresponds to bit (63 - CUT_POINT) 1124 of this 128-bit value. 1125 1126 Since we can't deal with 128-bit values very easily, convert the 1127 operation into an equivalent 64-bit one. */ 1128 if (cut_point < 0) 1129 { 1130 /* Avoid an undefined shift operation. */ 1131 if (cut_point == -64) 1132 acc >>= 63; 1133 else 1134 acc >>= -cut_point; 1135 cut_point = 0; 1136 } 1137 1138 /* Get the shifted but unsaturated result. Set LOWER to the lowest 1139 32 bits of the result and UPPER to the result >> 31. */ 1140 if (cut_point < 32) 1141 { 1142 /* The cut loses the (32 - CUT_POINT) least significant bits. 1143 Round the result up if the most significant of these lost bits 1144 is 1. */ 1145 lower = acc >> (32 - cut_point); 1146 if (lower < 0x7fffffff) 1147 if (acc & LSBIT64 (32 - cut_point - 1)) 1148 lower++; 1149 upper = lower >> 31; 1150 } 1151 else 1152 { 1153 lower = acc << (cut_point - 32); 1154 upper = acc >> (63 - cut_point); 1155 } 1156 1157 /* Saturate the result. */ 1158 if (upper < -1) 1159 return ~0x7fffffff; 1160 else if (upper > 0) 1161 return 0x7fffffff; 1162 else 1163 return lower; 1164} 1165 1166/* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */ 1167SI 1168frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2) 1169{ 1170 int neg_arg1; 1171 1172 /* FIXME: what to do with negative shift amt? */ 1173 if (arg2 <= 0) 1174 return arg1; 1175 1176 if (arg1 == 0) 1177 return 0; 1178 1179 /* Signed shift by 31 or greater saturates by definition. */ 1180 if (arg2 >= 31) 1181 if (arg1 > 0) 1182 return (SI) 0x7fffffff; 1183 else 1184 return (SI) 0x80000000; 1185 1186 /* OK, arg2 is between 1 and 31. */ 1187 neg_arg1 = (arg1 < 0); 1188 do { 1189 arg1 <<= 1; 1190 /* Check for sign bit change (saturation). */ 1191 if (neg_arg1 && (arg1 >= 0)) 1192 return (SI) 0x80000000; 1193 else if (!neg_arg1 && (arg1 < 0)) 1194 return (SI) 0x7fffffff; 1195 } while (--arg2 > 0); 1196 1197 return arg1; 1198} 1199 1200/* Simulate the media custom insns. */ 1201void 1202frvbf_media_cop (SIM_CPU *current_cpu, int cop_num) 1203{ 1204 /* The semantics of the insn are a nop, since it is implementation defined. 1205 We do need to check whether it's implemented and set up for MTRAP 1206 if it's not. */ 1207 USI msr0 = GET_MSR (0); 1208 if (GET_MSR_EMCI (msr0) == 0) 1209 { 1210 /* no interrupt queued at this time. */ 1211 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0); 1212 } 1213} 1214 1215/* Simulate the media average (MAVEH) insn. */ 1216static HI 1217do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2) 1218{ 1219 SIM_DESC sd = CPU_STATE (current_cpu); 1220 SI sum = (arg1 + arg2); 1221 HI result = sum >> 1; 1222 int rounding_value; 1223 1224 /* On fr4xx and fr550, check the rounding mode. On other machines 1225 rounding is always toward negative infinity and the result is 1226 already correctly rounded. */ 1227 switch (STATE_ARCHITECTURE (sd)->mach) 1228 { 1229 /* Need to check rounding mode. */ 1230 case bfd_mach_fr400: 1231 case bfd_mach_fr450: 1232 case bfd_mach_fr550: 1233 /* Check whether rounding will be required. Rounding will be required 1234 if the sum is an odd number. */ 1235 rounding_value = sum & 1; 1236 if (rounding_value) 1237 { 1238 USI msr0 = GET_MSR (0); 1239 /* Check MSR0.SRDAV to determine which bits control the rounding. */ 1240 if (GET_MSR_SRDAV (msr0)) 1241 { 1242 /* MSR0.RD controls rounding. */ 1243 switch (GET_MSR_RD (msr0)) 1244 { 1245 case 0: 1246 /* Round to nearest. */ 1247 if (result >= 0) 1248 ++result; 1249 break; 1250 case 1: 1251 /* Round toward 0. */ 1252 if (result < 0) 1253 ++result; 1254 break; 1255 case 2: 1256 /* Round toward positive infinity. */ 1257 ++result; 1258 break; 1259 case 3: 1260 /* Round toward negative infinity. The result is already 1261 correctly rounded. */ 1262 break; 1263 default: 1264 abort (); 1265 break; 1266 } 1267 } 1268 else 1269 { 1270 /* MSR0.RDAV controls rounding. If set, round toward positive 1271 infinity. Otherwise the result is already rounded correctly 1272 toward negative infinity. */ 1273 if (GET_MSR_RDAV (msr0)) 1274 ++result; 1275 } 1276 } 1277 break; 1278 default: 1279 break; 1280 } 1281 1282 return result; 1283} 1284 1285SI 1286frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2) 1287{ 1288 SI result; 1289 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff); 1290 result &= 0xffff; 1291 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff, 1292 (reg2 >> 16) & 0xffff) << 16; 1293 return result; 1294} 1295 1296/* Maintain a flag in order to know when to write the address of the next 1297 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */ 1298void 1299frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value) 1300{ 1301 frvbf_write_next_vliw_addr_to_LR = value; 1302} 1303 1304void 1305frvbf_set_ne_index (SIM_CPU *current_cpu, int index) 1306{ 1307 USI NE_flags[2]; 1308 1309 /* Save the target register so interrupt processing can set its NE flag 1310 in the event of an exception. */ 1311 frv_interrupt_state.ne_index = index; 1312 1313 /* Clear the NE flag of the target register. It will be reset if necessary 1314 in the event of an exception. */ 1315 GET_NE_FLAGS (NE_flags, H_SPR_FNER0); 1316 CLEAR_NE_FLAG (NE_flags, index); 1317 SET_NE_FLAGS (H_SPR_FNER0, NE_flags); 1318} 1319 1320void 1321frvbf_force_update (SIM_CPU *current_cpu) 1322{ 1323 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu); 1324 int ix = CGEN_WRITE_QUEUE_INDEX (q); 1325 if (ix > 0) 1326 { 1327 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1); 1328 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE; 1329 } 1330} 1331 1332/* Condition code logic. */ 1333enum cr_ops { 1334 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr, 1335 num_cr_ops 1336}; 1337 1338enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true}; 1339 1340static enum cr_result 1341cr_logic[num_cr_ops][4][4] = { 1342 /* andcr */ 1343 { 1344 /* undefined undefined false true */ 1345 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1346 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1347 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1348 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true } 1349 }, 1350 /* orcr */ 1351 { 1352 /* undefined undefined false true */ 1353 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true }, 1354 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true }, 1355 /* false */ {cr_false, cr_false, cr_false, cr_true }, 1356 /* true */ {cr_true, cr_true, cr_true, cr_true } 1357 }, 1358 /* xorcr */ 1359 { 1360 /* undefined undefined false true */ 1361 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1362 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1363 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true }, 1364 /* true */ {cr_true, cr_true, cr_true, cr_false } 1365 }, 1366 /* nandcr */ 1367 { 1368 /* undefined undefined false true */ 1369 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1370 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1371 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1372 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false } 1373 }, 1374 /* norcr */ 1375 { 1376 /* undefined undefined false true */ 1377 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false }, 1378 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false }, 1379 /* false */ {cr_true, cr_true, cr_true, cr_false }, 1380 /* true */ {cr_false, cr_false, cr_false, cr_false } 1381 }, 1382 /* andncr */ 1383 { 1384 /* undefined undefined false true */ 1385 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1386 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1387 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true }, 1388 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined} 1389 }, 1390 /* orncr */ 1391 { 1392 /* undefined undefined false true */ 1393 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true }, 1394 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true }, 1395 /* false */ {cr_true, cr_true, cr_true, cr_true }, 1396 /* true */ {cr_false, cr_false, cr_false, cr_true } 1397 }, 1398 /* nandncr */ 1399 { 1400 /* undefined undefined false true */ 1401 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1402 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}, 1403 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false }, 1404 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined} 1405 }, 1406 /* norncr */ 1407 { 1408 /* undefined undefined false true */ 1409 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false }, 1410 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false }, 1411 /* false */ {cr_false, cr_false, cr_false, cr_false }, 1412 /* true */ {cr_true, cr_true, cr_true, cr_false } 1413 } 1414}; 1415 1416UQI 1417frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2) 1418{ 1419 return cr_logic[operation][arg1][arg2]; 1420} 1421 1422/* Cache Manipulation. */ 1423void 1424frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock) 1425{ 1426 /* If we need to count cycles, then the cache operation will be 1427 initiated from the model profiling functions. 1428 See frvbf_model_.... */ 1429 int hsr0 = GET_HSR0 (); 1430 if (GET_HSR0_ICE (hsr0)) 1431 { 1432 if (model_insn) 1433 { 1434 CPU_LOAD_ADDRESS (current_cpu) = address; 1435 CPU_LOAD_LENGTH (current_cpu) = length; 1436 CPU_LOAD_LOCK (current_cpu) = lock; 1437 } 1438 else 1439 { 1440 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu); 1441 frv_cache_preload (cache, address, length, lock); 1442 } 1443 } 1444} 1445 1446void 1447frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock) 1448{ 1449 /* If we need to count cycles, then the cache operation will be 1450 initiated from the model profiling functions. 1451 See frvbf_model_.... */ 1452 int hsr0 = GET_HSR0 (); 1453 if (GET_HSR0_DCE (hsr0)) 1454 { 1455 if (model_insn) 1456 { 1457 CPU_LOAD_ADDRESS (current_cpu) = address; 1458 CPU_LOAD_LENGTH (current_cpu) = length; 1459 CPU_LOAD_LOCK (current_cpu) = lock; 1460 } 1461 else 1462 { 1463 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu); 1464 frv_cache_preload (cache, address, length, lock); 1465 } 1466 } 1467} 1468 1469void 1470frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address) 1471{ 1472 /* If we need to count cycles, then the cache operation will be 1473 initiated from the model profiling functions. 1474 See frvbf_model_.... */ 1475 int hsr0 = GET_HSR0 (); 1476 if (GET_HSR0_ICE (hsr0)) 1477 { 1478 if (model_insn) 1479 CPU_LOAD_ADDRESS (current_cpu) = address; 1480 else 1481 { 1482 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu); 1483 frv_cache_unlock (cache, address); 1484 } 1485 } 1486} 1487 1488void 1489frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address) 1490{ 1491 /* If we need to count cycles, then the cache operation will be 1492 initiated from the model profiling functions. 1493 See frvbf_model_.... */ 1494 int hsr0 = GET_HSR0 (); 1495 if (GET_HSR0_DCE (hsr0)) 1496 { 1497 if (model_insn) 1498 CPU_LOAD_ADDRESS (current_cpu) = address; 1499 else 1500 { 1501 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu); 1502 frv_cache_unlock (cache, address); 1503 } 1504 } 1505} 1506 1507void 1508frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all) 1509{ 1510 /* Make sure the insn was specified properly. -1 will be passed for ALL 1511 for a icei with A=0. */ 1512 if (all == -1) 1513 { 1514 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION); 1515 return; 1516 } 1517 1518 /* If we need to count cycles, then the cache operation will be 1519 initiated from the model profiling functions. 1520 See frvbf_model_.... */ 1521 if (model_insn) 1522 { 1523 /* Record the all-entries flag for use in profiling. */ 1524 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); 1525 ps->all_cache_entries = all; 1526 CPU_LOAD_ADDRESS (current_cpu) = address; 1527 } 1528 else 1529 { 1530 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu); 1531 if (all) 1532 frv_cache_invalidate_all (cache, 0/* flush? */); 1533 else 1534 frv_cache_invalidate (cache, address, 0/* flush? */); 1535 } 1536} 1537 1538void 1539frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all) 1540{ 1541 /* Make sure the insn was specified properly. -1 will be passed for ALL 1542 for a dcei with A=0. */ 1543 if (all == -1) 1544 { 1545 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION); 1546 return; 1547 } 1548 1549 /* If we need to count cycles, then the cache operation will be 1550 initiated from the model profiling functions. 1551 See frvbf_model_.... */ 1552 if (model_insn) 1553 { 1554 /* Record the all-entries flag for use in profiling. */ 1555 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); 1556 ps->all_cache_entries = all; 1557 CPU_LOAD_ADDRESS (current_cpu) = address; 1558 } 1559 else 1560 { 1561 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu); 1562 if (all) 1563 frv_cache_invalidate_all (cache, 0/* flush? */); 1564 else 1565 frv_cache_invalidate (cache, address, 0/* flush? */); 1566 } 1567} 1568 1569void 1570frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all) 1571{ 1572 /* Make sure the insn was specified properly. -1 will be passed for ALL 1573 for a dcef with A=0. */ 1574 if (all == -1) 1575 { 1576 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION); 1577 return; 1578 } 1579 1580 /* If we need to count cycles, then the cache operation will be 1581 initiated from the model profiling functions. 1582 See frvbf_model_.... */ 1583 if (model_insn) 1584 { 1585 /* Record the all-entries flag for use in profiling. */ 1586 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); 1587 ps->all_cache_entries = all; 1588 CPU_LOAD_ADDRESS (current_cpu) = address; 1589 } 1590 else 1591 { 1592 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu); 1593 if (all) 1594 frv_cache_invalidate_all (cache, 1/* flush? */); 1595 else 1596 frv_cache_invalidate (cache, address, 1/* flush? */); 1597 } 1598} 1599