1# Copyright (C) 2013 Apple Inc. All rights reserved. 2# Copyright (C) 2013 Cisco Systems, Inc. All rights reserved. 3# 4# Redistribution and use in source and binary forms, with or without 5# modification, are permitted provided that the following conditions 6# are met: 7# 1. Redistributions of source code must retain the above copyright 8# notice, this list of conditions and the following disclaimer. 9# 2. Redistributions in binary form must reproduce the above copyright 10# notice, this list of conditions and the following disclaimer in the 11# documentation and/or other materials provided with the distribution. 12# 13# THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS, INC. ``AS IS'' AND ANY 14# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS, INC. OR ITS 17# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 25require 'risc' 26 27class Node 28 def sh4SingleHi 29 doubleOperand = sh4Operand 30 raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^dr/ 31 "fr" + ($~.post_match.to_i).to_s 32 end 33 def sh4SingleLo 34 doubleOperand = sh4Operand 35 raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^dr/ 36 "fr" + ($~.post_match.to_i + 1).to_s 37 end 38end 39 40class SpecialRegister < NoChildren 41 def sh4Operand 42 @name 43 end 44 45 def dump 46 @name 47 end 48 49 def register? 50 true 51 end 52end 53 54SH4_TMP_GPRS = [ SpecialRegister.new("r3"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ] 55SH4_TMP_FPRS = [ SpecialRegister.new("dr10") ] 56 57class RegisterID 58 def sh4Operand 59 case name 60 when "a0" 61 "r4" 62 when "a1" 63 "r5" 64 when "t0" 65 "r0" 66 when "t1" 67 "r1" 68 when "t2" 69 "r2" 70 when "t3" 71 "r10" 72 when "t4" 73 "r6" 74 when "cfr" 75 "r14" 76 when "sp" 77 "r15" 78 when "lr" 79 "pr" 80 else 81 raise "Bad register #{name} for SH4 at #{codeOriginString}" 82 end 83 end 84end 85 86class FPRegisterID 87 def sh4Operand 88 case name 89 when "ft0", "fr" 90 "dr0" 91 when "ft1" 92 "dr2" 93 when "ft2" 94 "dr4" 95 when "ft3" 96 "dr6" 97 when "ft4" 98 "dr8" 99 when "fa0" 100 "dr12" 101 else 102 raise "Bad register #{name} for SH4 at #{codeOriginString}" 103 end 104 end 105end 106 107class Immediate 108 def sh4Operand 109 raise "Invalid immediate #{value} at #{codeOriginString}" if value < -128 or value > 127 110 "##{value}" 111 end 112end 113 114class Address 115 def sh4Operand 116 raise "Bad offset #{offset.value} at #{codeOriginString}" if offset.value < 0 or offset.value > 60 117 if offset.value == 0 118 "@#{base.sh4Operand}" 119 else 120 "@(#{offset.value}, #{base.sh4Operand})" 121 end 122 end 123 124 def sh4OperandPostInc 125 raise "Bad offset #{offset.value} for post inc at #{codeOriginString}" unless offset.value == 0 126 "@#{base.sh4Operand}+" 127 end 128 129 def sh4OperandPreDec 130 raise "Bad offset #{offset.value} for pre dec at #{codeOriginString}" unless offset.value == 0 131 "@-#{base.sh4Operand}" 132 end 133end 134 135class BaseIndex 136 def sh4Operand 137 raise "Unconverted base index at #{codeOriginString}" 138 end 139end 140 141class AbsoluteAddress 142 def sh4Operand 143 raise "Unconverted absolute address at #{codeOriginString}" 144 end 145end 146 147 148# 149# Lowering of shift ops for SH4. For example: 150# 151# rshifti foo, bar 152# 153# becomes: 154# 155# negi foo, tmp 156# shad tmp, bar 157# 158 159def sh4LowerShiftOps(list) 160 newList = [] 161 list.each { 162 | node | 163 if node.is_a? Instruction 164 case node.opcode 165 when "ulshifti", "ulshiftp", "urshifti", "urshiftp", "lshifti", "lshiftp", "rshifti", "rshiftp" 166 if node.opcode[0, 1] == "u" 167 type = "l" 168 direction = node.opcode[1, 1] 169 else 170 type = "a" 171 direction = node.opcode[0, 1] 172 end 173 if node.operands[0].is_a? Immediate 174 maskedImm = Immediate.new(node.operands[0].codeOrigin, node.operands[0].value & 31) 175 if maskedImm.value == 0 176 # There is nothing to do here. 177 elsif maskedImm.value == 1 or (type == "l" and [2, 8, 16].include? maskedImm.value) 178 newList << Instruction.new(node.codeOrigin, "sh#{type}#{direction}x", [maskedImm, node.operands[1]]) 179 else 180 tmp = Tmp.new(node.codeOrigin, :gpr) 181 if direction == "l" 182 newList << Instruction.new(node.codeOrigin, "move", [maskedImm, tmp]) 183 else 184 newList << Instruction.new(node.codeOrigin, "move", [Immediate.new(node.operands[0].codeOrigin, -1 * maskedImm.value), tmp]) 185 end 186 newList << Instruction.new(node.codeOrigin, "sh#{type}d", [tmp, node.operands[1]]) 187 end 188 else 189 tmp = Tmp.new(node.codeOrigin, :gpr) 190 newList << Instruction.new(node.codeOrigin, "move", [Immediate.new(node.operands[0].codeOrigin, 31), tmp]) 191 newList << Instruction.new(node.codeOrigin, "andi", [node.operands[0], tmp]) 192 if direction == "r" 193 newList << Instruction.new(node.codeOrigin, "negi", [tmp, tmp]) 194 end 195 newList << Instruction.new(node.codeOrigin, "sh#{type}d", [tmp, node.operands[1]]) 196 end 197 else 198 newList << node 199 end 200 else 201 newList << node 202 end 203 } 204 newList 205end 206 207 208# 209# Lowering of simple branch ops for SH4. For example: 210# 211# baddis foo, bar, baz 212# 213# will become: 214# 215# addi foo, bar, tmp 216# bs tmp, baz 217# 218 219def sh4LowerSimpleBranchOps(list) 220 newList = [] 221 list.each { 222 | node | 223 if node.is_a? Instruction 224 annotation = node.annotation 225 case node.opcode 226 when /^b(addi|subi|ori|addp)/ 227 op = $1 228 bc = $~.post_match 229 230 case op 231 when "addi", "addp" 232 op = "addi" 233 when "subi", "subp" 234 op = "subi" 235 when "ori", "orp" 236 op = "ori" 237 end 238 239 if bc == "s" 240 raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3 241 if node.operands[1].is_a? RegisterID or node.operands[1].is_a? SpecialRegister 242 newList << Instruction.new(node.codeOrigin, op, node.operands[0..1]) 243 newList << Instruction.new(node.codeOrigin, "bs", node.operands[1..2]) 244 else 245 tmpVal = Tmp.new(node.codeOrigin, :gpr) 246 tmpPtr = Tmp.new(node.codeOrigin, :gpr) 247 addr = Address.new(node.codeOrigin, tmpPtr, Immediate.new(node.codeOrigin, 0)) 248 newList << Instruction.new(node.codeOrigin, "leap", [node.operands[1], tmpPtr]) 249 newList << Instruction.new(node.codeOrigin, "loadi", [addr, tmpVal]) 250 newList << Instruction.new(node.codeOrigin, op, [node.operands[0], tmpVal]) 251 newList << Instruction.new(node.codeOrigin, "storei", [tmpVal, addr]) 252 newList << Instruction.new(node.codeOrigin, "bs", [tmpVal, node.operands[2]]) 253 end 254 else 255 newList << node 256 end 257 when "bmulio", "bmulpo" 258 raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3 259 tmp1 = Tmp.new(node.codeOrigin, :gpr) 260 tmp2 = Tmp.new(node.codeOrigin, :gpr) 261 newList << Instruction.new(node.codeOrigin, node.opcode, [tmp1, tmp2].concat(node.operands)) 262 else 263 newList << node 264 end 265 else 266 newList << node 267 end 268 } 269 newList 270end 271 272 273# 274# Lowering of double accesses for SH4. For example: 275# 276# loadd [foo, bar, 8], baz 277# 278# becomes: 279# 280# leap [foo, bar, 8], tmp 281# loaddReversedAndIncrementAddress [tmp], baz 282# 283 284def sh4LowerDoubleAccesses(list) 285 newList = [] 286 list.each { 287 | node | 288 if node.is_a? Instruction 289 case node.opcode 290 when "loadd" 291 tmp = Tmp.new(codeOrigin, :gpr) 292 addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0)) 293 newList << Instruction.new(codeOrigin, "leap", [node.operands[0], tmp]) 294 newList << Instruction.new(node.codeOrigin, "loaddReversedAndIncrementAddress", [addr, node.operands[1]], node.annotation) 295 when "stored" 296 tmp = Tmp.new(codeOrigin, :gpr) 297 addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0)) 298 newList << Instruction.new(codeOrigin, "leap", [node.operands[1].withOffset(8), tmp]) 299 newList << Instruction.new(node.codeOrigin, "storedReversedAndDecrementAddress", [node.operands[0], addr], node.annotation) 300 else 301 newList << node 302 end 303 else 304 newList << node 305 end 306 } 307 newList 308end 309 310 311# 312# Lowering of double specials for SH4. 313# 314 315def sh4LowerDoubleSpecials(list) 316 newList = [] 317 list.each { 318 | node | 319 if node.is_a? Instruction 320 case node.opcode 321 when "bdltun", "bdgtun" 322 # Handle specific floating point unordered opcodes. 323 newList << Instruction.new(codeOrigin, "bdnan", [node.operands[0], node.operands[2]]) 324 newList << Instruction.new(codeOrigin, "bdnan", [node.operands[1], node.operands[2]]) 325 newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands) 326 when "bdnequn", "bdgtequn", "bdltequn" 327 newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands) 328 when "bdneq", "bdgteq", "bdlteq" 329 # Handle specific floating point ordered opcodes. 330 outlabel = LocalLabel.unique("out_#{node.opcode}") 331 outref = LocalLabelReference.new(codeOrigin, outlabel) 332 newList << Instruction.new(codeOrigin, "bdnan", [node.operands[0], outref]) 333 newList << Instruction.new(codeOrigin, "bdnan", [node.operands[1], outref]) 334 newList << Instruction.new(codeOrigin, node.opcode, node.operands) 335 newList << outlabel 336 else 337 newList << node 338 end 339 else 340 newList << node 341 end 342 } 343 newList 344end 345 346 347# 348# Lowering of misplaced labels for SH4. 349# 350 351def sh4LowerMisplacedLabels(list) 352 newList = [] 353 list.each { 354 | node | 355 if node.is_a? Instruction 356 case node.opcode 357 when "jmp" 358 if node.operands[0].is_a? LabelReference 359 tmp = Tmp.new(codeOrigin, :gpr) 360 newList << Instruction.new(codeOrigin, "jmpf", [tmp, node.operands[0]]) 361 else 362 newList << node 363 end 364 when "call" 365 if node.operands[0].is_a? LabelReference 366 tmp1 = Tmp.new(codeOrigin, :gpr) 367 tmp2 = Tmp.new(codeOrigin, :gpr) 368 newList << Instruction.new(codeOrigin, "callf", [tmp1, tmp2, node.operands[0]]) 369 else 370 newList << node 371 end 372 else 373 newList << node 374 end 375 else 376 newList << node 377 end 378 } 379 newList 380end 381 382 383class Sequence 384 def getModifiedListSH4 385 result = @list 386 387 # Verify that we will only see instructions and labels. 388 result.each { 389 | node | 390 unless node.is_a? Instruction or 391 node.is_a? Label or 392 node.is_a? LocalLabel or 393 node.is_a? Skip 394 raise "Unexpected #{node.inspect} at #{node.codeOrigin}" 395 end 396 } 397 398 result = sh4LowerShiftOps(result) 399 result = sh4LowerSimpleBranchOps(result) 400 result = riscLowerMalformedAddresses(result) { 401 | node, address | 402 if address.is_a? Address 403 case node.opcode 404 when "btbz", "btbnz", "cbeq", "bbeq", "bbneq", "bbb", "loadb" 405 (0..15).include? address.offset.value and 406 ((node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "r0") or 407 (node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "r0")) 408 when "loadh" 409 (0..30).include? address.offset.value and 410 ((node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "r0") or 411 (node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "r0")) 412 else 413 (0..60).include? address.offset.value 414 end 415 else 416 false 417 end 418 } 419 result = sh4LowerDoubleAccesses(result) 420 result = sh4LowerDoubleSpecials(result) 421 result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "muli", "mulp", "andi", "ori", "xori", 422 "cbeq", "cieq", "cpeq", "cineq", "cpneq", "cib", "baddio", "bsubio", "bmulio", "baddis", 423 "bbeq", "bbneq", "bbb", "bieq", "bpeq", "bineq", "bpneq", "bia", "bpa", "biaeq", "bpaeq", "bib", "bpb", 424 "bigteq", "bpgteq", "bilt", "bplt", "bigt", "bpgt", "bilteq", "bplteq", "btiz", "btpz", "btinz", "btpnz", "btbz", "btbnz"]) 425 result = riscLowerMalformedImmediates(result, -128..127) 426 result = sh4LowerMisplacedLabels(result) 427 result = riscLowerMisplacedAddresses(result) 428 429 result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_GPRS) 430 result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_FPRS) 431 432 return result 433 end 434end 435 436def sh4Operands(operands) 437 operands.map{|v| v.sh4Operand}.join(", ") 438end 439 440def emitSH4Load32(constant, dest) 441 outlabel = LocalLabel.unique("load32out") 442 constlabel = LocalLabel.unique("load32const") 443 $asm.puts "mov.l #{LocalLabelReference.new(codeOrigin, constlabel).asmLabel}, #{dest.sh4Operand}" 444 $asm.puts "bra #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}" 445 $asm.puts "nop" 446 $asm.puts ".balign 4" 447 constlabel.lower("SH4") 448 $asm.puts ".long #{constant}" 449 outlabel.lower("SH4") 450end 451 452def emitSH4Load32AndJump(constant, scratch) 453 constlabel = LocalLabel.unique("load32const") 454 $asm.puts "mov.l #{LocalLabelReference.new(codeOrigin, constlabel).asmLabel}, #{scratch.sh4Operand}" 455 $asm.puts "jmp @#{scratch.sh4Operand}" 456 $asm.puts "nop" 457 $asm.puts ".balign 4" 458 constlabel.lower("SH4") 459 $asm.puts ".long #{constant}" 460end 461 462def emitSH4LoadImm(operands) 463 if operands[0].value == 0x40000000 464 # FirstConstantRegisterIndex const is often used (0x40000000). 465 # It's more efficient to "build" the value with 3 opcodes without branch. 466 $asm.puts "mov #64, #{operands[1].sh4Operand}" 467 $asm.puts "shll16 #{operands[1].sh4Operand}" 468 $asm.puts "shll8 #{operands[1].sh4Operand}" 469 elsif (-128..127).include? operands[0].value 470 $asm.puts "mov #{sh4Operands(operands)}" 471 elsif (-32768..32767).include? operands[0].value 472 constlabel = LocalLabel.unique("loadconstant") 473 $asm.puts "mov.w @(6, PC), #{operands[1].sh4Operand}" 474 $asm.puts "bra #{LocalLabelReference.new(codeOrigin, constlabel).asmLabel}" 475 $asm.puts "nop" 476 $asm.puts ".word #{operands[0].value}" 477 constlabel.lower("SH4") 478 else 479 emitSH4Load32(operands[0].value, operands[1]) 480 end 481end 482 483def emitSH4Branch(sh4opcode, operand) 484 $asm.puts "#{sh4opcode} @#{operand.sh4Operand}" 485 $asm.puts "nop" 486end 487 488def emitSH4ShiftImm(val, operand, direction) 489 tmp = val 490 while tmp > 0 491 if tmp >= 16 492 $asm.puts "shl#{direction}16 #{operand.sh4Operand}" 493 tmp -= 16 494 elsif tmp >= 8 495 $asm.puts "shl#{direction}8 #{operand.sh4Operand}" 496 tmp -= 8 497 elsif tmp >= 2 498 $asm.puts "shl#{direction}2 #{operand.sh4Operand}" 499 tmp -= 2 500 else 501 $asm.puts "shl#{direction} #{operand.sh4Operand}" 502 tmp -= 1 503 end 504 end 505end 506 507def emitSH4BranchIfT(label, neg) 508 outlabel = LocalLabel.unique("branchIfT") 509 sh4opcode = neg ? "bt" : "bf" 510 $asm.puts "#{sh4opcode} #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}" 511 if label.is_a? LocalLabelReference 512 $asm.puts "bra #{label.asmLabel}" 513 $asm.puts "nop" 514 else 515 emitSH4Load32AndJump(label.asmLabel, SH4_TMP_GPRS[0]) 516 end 517 outlabel.lower("SH4") 518end 519 520def emitSH4IntCompare(cmpOpcode, operands) 521 $asm.puts "cmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}" 522end 523 524def emitSH4CondBranch(cmpOpcode, neg, operands) 525 emitSH4IntCompare(cmpOpcode, operands) 526 emitSH4BranchIfT(operands[2], neg) 527end 528 529def emitSH4CompareSet(cmpOpcode, neg, operands) 530 emitSH4IntCompare(cmpOpcode, operands) 531 if !neg 532 $asm.puts "movt #{operands[2].sh4Operand}" 533 else 534 outlabel = LocalLabel.unique("compareSet") 535 $asm.puts "mov #0, #{operands[2].sh4Operand}" 536 $asm.puts "bt #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}" 537 $asm.puts "mov #1, #{operands[2].sh4Operand}" 538 outlabel.lower("SH4") 539 end 540end 541 542def emitSH4BranchIfNaN(operands) 543 raise "Invalid operands number (#{operands.size})" unless operands.size == 2 544 $asm.puts "fcmp/eq #{sh4Operands([operands[0], operands[0]])}" 545 $asm.puts "bf #{operands[1].asmLabel}" 546end 547 548def emitSH4DoubleCondBranch(cmpOpcode, neg, operands) 549 if cmpOpcode == "lt" 550 $asm.puts "fcmp/gt #{sh4Operands([operands[0], operands[1]])}" 551 else 552 $asm.puts "fcmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}" 553 end 554 emitSH4BranchIfT(operands[2], neg) 555end 556 557class Instruction 558 def lowerSH4 559 $asm.comment codeOriginString 560 case opcode 561 when "addi", "addp" 562 if operands.size == 3 563 if operands[0].sh4Operand == operands[2].sh4Operand 564 $asm.puts "add #{sh4Operands([operands[1], operands[2]])}" 565 elsif operands[1].sh4Operand == operands[2].sh4Operand 566 $asm.puts "add #{sh4Operands([operands[0], operands[2]])}" 567 else 568 $asm.puts "mov #{sh4Operands([operands[0], operands[2]])}" 569 $asm.puts "add #{sh4Operands([operands[1], operands[2]])}" 570 end 571 else 572 $asm.puts "add #{sh4Operands(operands)}" 573 end 574 when "subi", "subp" 575 raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 2 576 if operands[0].is_a? Immediate 577 $asm.puts "add #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[0].value), operands[1]])}" 578 else 579 $asm.puts "sub #{sh4Operands(operands)}" 580 end 581 when "muli", "mulp" 582 $asm.puts "mul.l #{sh4Operands(operands[0..1])}" 583 $asm.puts "sts macl, #{operands[-1].sh4Operand}" 584 when "negi", "negp" 585 if operands.size == 2 586 $asm.puts "neg #{sh4Operands(operands)}" 587 else 588 $asm.puts "neg #{sh4Operands([operands[0], operands[0]])}" 589 end 590 when "andi", "andp", "ori", "orp", "xori", "xorp" 591 raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 2 592 sh4opcode = opcode[0..-2] 593 $asm.puts "#{sh4opcode} #{sh4Operands(operands)}" 594 when "shllx", "shlrx" 595 raise "Unhandled parameters for opcode #{opcode}" unless operands[0].is_a? Immediate 596 if operands[0].value == 1 597 $asm.puts "shl#{opcode[3, 1]} #{operands[1].sh4Operand}" 598 else 599 $asm.puts "shl#{opcode[3, 1]}#{operands[0].value} #{operands[1].sh4Operand}" 600 end 601 when "shld", "shad" 602 $asm.puts "#{opcode} #{sh4Operands(operands)}" 603 when "loaddReversedAndIncrementAddress" 604 # As we are little endian, we don't use "fmov @Rm, DRn" here. 605 $asm.puts "fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleLo}" 606 $asm.puts "fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleHi}" 607 when "storedReversedAndDecrementAddress" 608 # As we are little endian, we don't use "fmov DRm, @Rn" here. 609 $asm.puts "fmov.s #{operands[0].sh4SingleHi}, #{operands[1].sh4OperandPreDec}" 610 $asm.puts "fmov.s #{operands[0].sh4SingleLo}, #{operands[1].sh4OperandPreDec}" 611 when "ci2d" 612 $asm.puts "lds #{operands[0].sh4Operand}, fpul" 613 $asm.puts "float fpul, #{operands[1].sh4Operand}" 614 when "fii2d" 615 $asm.puts "lds #{operands[0].sh4Operand}, fpul" 616 $asm.puts "fsts fpul, #{operands[2].sh4SingleLo}" 617 $asm.puts "lds #{operands[1].sh4Operand}, fpul" 618 $asm.puts "fsts fpul, #{operands[2].sh4SingleHi}" 619 when "fd2ii" 620 $asm.puts "flds #{operands[0].sh4SingleLo}, fpul" 621 $asm.puts "sts fpul, #{operands[1].sh4Operand}" 622 $asm.puts "flds #{operands[0].sh4SingleHi}, fpul" 623 $asm.puts "sts fpul, #{operands[2].sh4Operand}" 624 when "addd", "subd", "muld", "divd" 625 sh4opcode = opcode[0..-2] 626 $asm.puts "f#{sh4opcode} #{sh4Operands(operands)}" 627 when "bcd2i" 628 $asm.puts "ftrc #{operands[0].sh4Operand}, fpul" 629 $asm.puts "sts fpul, #{operands[1].sh4Operand}" 630 $asm.puts "float fpul, #{SH4_TMP_FPRS[0].sh4Operand}" 631 $asm.puts "fcmp/eq #{sh4Operands([operands[0], SH4_TMP_FPRS[0]])}" 632 $asm.puts "bf #{operands[2].asmLabel}" 633 $asm.puts "tst #{sh4Operands([operands[1], operands[1]])}" 634 $asm.puts "bt #{operands[2].asmLabel}" 635 when "bdnan" 636 emitSH4BranchIfNaN(operands) 637 when "bdneq" 638 emitSH4DoubleCondBranch("eq", true, operands) 639 when "bdgteq" 640 emitSH4DoubleCondBranch("lt", true, operands) 641 when "bdlt" 642 emitSH4DoubleCondBranch("lt", false, operands) 643 when "bdlteq" 644 emitSH4DoubleCondBranch("gt", true, operands) 645 when "bdgt" 646 emitSH4DoubleCondBranch("gt", false, operands) 647 when "baddio", "baddpo", "bsubio", "bsubpo" 648 raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 3 649 $asm.puts "#{opcode[1, 3]}v #{sh4Operands([operands[0], operands[1]])}" 650 $asm.puts "bt #{operands[2].asmLabel}" 651 when "bmulio", "bmulpo" 652 raise "Invalid operands number (#{operands.size})" unless operands.size == 5 653 $asm.puts "dmuls.l #{sh4Operands([operands[2], operands[3]])}" 654 $asm.puts "sts macl, #{operands[3].sh4Operand}" 655 $asm.puts "sts mach, #{operands[0].sh4Operand}" 656 $asm.puts "cmp/pz #{operands[3].sh4Operand}" 657 $asm.puts "movt #{operands[1].sh4Operand}" 658 $asm.puts "dt #{operands[1].sh4Operand}" 659 $asm.puts "cmp/eq #{sh4Operands([operands[0], operands[1]])}" 660 $asm.puts "bf #{operands[4].asmLabel}" 661 when "btiz", "btpz", "btbz", "btinz", "btpnz", "btbnz" 662 if operands.size == 3 663 $asm.puts "tst #{sh4Operands([operands[0], operands[1]])}" 664 else 665 if operands[0].sh4Operand == "r0" 666 $asm.puts "cmp/eq #0, r0" 667 else 668 $asm.puts "tst #{sh4Operands([operands[0], operands[0]])}" 669 end 670 end 671 emitSH4BranchIfT(operands[-1], (opcode[-2, 2] == "nz")) 672 when "cieq", "cpeq", "cbeq" 673 emitSH4CompareSet("eq", false, operands) 674 when "cineq", "cpneq", "cbneq" 675 emitSH4CompareSet("eq", true, operands) 676 when "cib", "cpb", "cbb" 677 emitSH4CompareSet("hs", true, operands) 678 when "bieq", "bpeq", "bbeq" 679 emitSH4CondBranch("eq", false, operands) 680 when "bineq", "bpneq", "bbneq" 681 emitSH4CondBranch("eq", true, operands) 682 when "bib", "bpb", "bbb" 683 emitSH4CondBranch("hs", true, operands) 684 when "bia", "bpa", "bba" 685 emitSH4CondBranch("hi", false, operands) 686 when "biaeq", "bpaeq" 687 emitSH4CondBranch("hs", false, operands) 688 when "bigteq", "bpgteq", "bbgteq" 689 emitSH4CondBranch("ge", false, operands) 690 when "bilt", "bplt", "bblt" 691 emitSH4CondBranch("ge", true, operands) 692 when "bigt", "bpgt", "bbgt" 693 emitSH4CondBranch("gt", false, operands) 694 when "bilteq", "bplteq", "bblteq" 695 emitSH4CondBranch("gt", true, operands) 696 when "bs" 697 $asm.puts "cmp/pz #{operands[0].sh4Operand}" 698 $asm.puts "bf #{operands[1].asmLabel}" 699 when "call" 700 if operands[0].is_a? LocalLabelReference 701 $asm.puts "bsr #{operands[0].asmLabel}" 702 $asm.puts "nop" 703 elsif operands[0].is_a? RegisterID or operands[0].is_a? SpecialRegister 704 emitSH4Branch("jsr", operands[0]) 705 else 706 raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}" 707 end 708 when "callf" 709 $asm.puts ".balign 4" 710 $asm.puts "mov r0, #{operands[0].sh4Operand}" 711 $asm.puts "mova @(14, PC), r0" 712 $asm.puts "lds r0, pr" 713 $asm.puts "mov.l @(6, PC), #{operands[1].sh4Operand}" 714 $asm.puts "jmp @#{operands[1].sh4Operand}" 715 $asm.puts "mov #{operands[0].sh4Operand}, r0" 716 $asm.puts ".long #{operands[2].asmLabel}" 717 when "jmp" 718 if operands[0].is_a? LocalLabelReference 719 $asm.puts "bra #{operands[0].asmLabel}" 720 $asm.puts "nop" 721 elsif operands[0].is_a? RegisterID or operands[0].is_a? SpecialRegister 722 emitSH4Branch("jmp", operands[0]) 723 else 724 raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}" 725 end 726 when "jmpf" 727 emitSH4Load32AndJump(operands[1].asmLabel, operands[0]) 728 when "ret" 729 $asm.puts "rts" 730 $asm.puts "nop" 731 when "loadb" 732 $asm.puts "mov.b #{sh4Operands(operands)}" 733 $asm.puts "extu.b #{sh4Operands([operands[1], operands[1]])}" 734 when "loadh" 735 $asm.puts "mov.w #{sh4Operands(operands)}" 736 $asm.puts "extu.w #{sh4Operands([operands[1], operands[1]])}" 737 when "loadi", "loadis", "loadp", "storei", "storep" 738 $asm.puts "mov.l #{sh4Operands(operands)}" 739 when "move" 740 if operands[0].is_a? LabelReference 741 emitSH4Load32(operands[0].asmLabel, operands[1]) 742 elsif operands[0].is_a? Immediate 743 emitSH4LoadImm(operands) 744 else 745 $asm.puts "mov #{sh4Operands(operands)}" 746 end 747 when "leap" 748 if operands[0].is_a? BaseIndex 749 biop = operands[0] 750 if biop.scale > 0 751 $asm.puts "mov #{sh4Operands([biop.index, operands[1]])}" 752 if biop.scaleShift > 0 753 emitSH4ShiftImm(biop.scaleShift, operands[1], "l") 754 end 755 $asm.puts "add #{sh4Operands([biop.base, operands[1]])}" 756 else 757 $asm.puts "mov #{sh4Operands([biop.base, operands[1]])}" 758 end 759 if biop.offset.value != 0 760 $asm.puts "add #{sh4Operands([biop.offset, operands[1]])}" 761 end 762 elsif operands[0].is_a? Address 763 if operands[0].base != operands[1] 764 $asm.puts "mov #{sh4Operands([operands[0].base, operands[1]])}" 765 end 766 if operands[0].offset.value != 0 767 $asm.puts "add #{sh4Operands([operands[0].offset, operands[1]])}" 768 end 769 else 770 raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}" 771 end 772 when "ldspr" 773 $asm.puts "lds #{sh4Operands(operands)}, pr" 774 when "stspr" 775 $asm.puts "sts pr, #{sh4Operands(operands)}" 776 when "break" 777 # This special opcode always generates an illegal instruction exception. 778 $asm.puts ".word 0xfffd" 779 else 780 raise "Unhandled opcode #{opcode} at #{codeOriginString}" 781 end 782 end 783end 784 785