1/*
2 * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23package org.graalvm.compiler.lir.aarch64;
24
25import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
26import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.ARITHMETIC;
27import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.LOGICAL;
28import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.NONE;
29import static org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.SHIFT;
30import static jdk.vm.ci.aarch64.AArch64.zr;
31import static jdk.vm.ci.code.ValueUtil.asRegister;
32
33import org.graalvm.compiler.asm.aarch64.AArch64Assembler;
34import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag;
35import org.graalvm.compiler.debug.GraalError;
36import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
37import org.graalvm.compiler.lir.LIRInstructionClass;
38import org.graalvm.compiler.lir.Opcode;
39import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
40
41import jdk.vm.ci.code.Register;
42import jdk.vm.ci.meta.AllocatableValue;
43import jdk.vm.ci.meta.JavaConstant;
44
45public enum AArch64ArithmeticOp {
46    // TODO At least add and sub *can* be used with SP, so this should be supported
47    NEG,
48    NOT,
49    ADD(ARITHMETIC),
50    ADDS(ARITHMETIC),
51    SUB(ARITHMETIC),
52    SUBS(ARITHMETIC),
53    MUL,
54    MULVS,
55    DIV,
56    SMULH,
57    UMULH,
58    REM,
59    UDIV,
60    UREM,
61    AND(LOGICAL),
62    ANDS(LOGICAL),
63    OR(LOGICAL),
64    XOR(LOGICAL),
65    SHL(SHIFT),
66    LSHR(SHIFT),
67    ASHR(SHIFT),
68    ABS,
69
70    FADD,
71    FSUB,
72    FMUL,
73    FDIV,
74    FREM,
75    FNEG,
76    FABS,
77    SQRT;
78
79    /**
80     * Specifies what constants can be used directly without having to be loaded into a register
81     * with the given instruction.
82     */
83    public enum ARMv8ConstantCategory {
84        NONE,
85        LOGICAL,
86        ARITHMETIC,
87        SHIFT
88    }
89
90    public final ARMv8ConstantCategory category;
91
92    AArch64ArithmeticOp(ARMv8ConstantCategory category) {
93        this.category = category;
94    }
95
96    AArch64ArithmeticOp() {
97        this(NONE);
98    }
99
100    public static class UnaryOp extends AArch64LIRInstruction {
101        private static final LIRInstructionClass<UnaryOp> TYPE = LIRInstructionClass.create(UnaryOp.class);
102
103        @Opcode private final AArch64ArithmeticOp opcode;
104        @Def({REG}) protected AllocatableValue result;
105        @Use({REG}) protected AllocatableValue x;
106
107        public UnaryOp(AArch64ArithmeticOp opcode, AllocatableValue result, AllocatableValue x) {
108            super(TYPE);
109            this.opcode = opcode;
110            this.result = result;
111            this.x = x;
112        }
113
114        @Override
115        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
116            Register dst = asRegister(result);
117            Register src = asRegister(x);
118            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
119            switch (opcode) {
120                case NEG:
121                    masm.sub(size, dst, zr, src);
122                    break;
123                case FNEG:
124                    masm.fneg(size, dst, src);
125                    break;
126                case NOT:
127                    masm.not(size, dst, src);
128                    break;
129                case ABS:
130                    masm.cmp(size, src, 0);
131                    masm.csneg(size, dst, src, ConditionFlag.LT);
132                    break;
133                case FABS:
134                    masm.fabs(size, dst, src);
135                    break;
136                case SQRT:
137                    masm.fsqrt(size, dst, src);
138                    break;
139                default:
140                    throw GraalError.shouldNotReachHere("op=" + opcode.name());
141            }
142        }
143    }
144
145    public static class BinaryConstOp extends AArch64LIRInstruction {
146        private static final LIRInstructionClass<BinaryConstOp> TYPE = LIRInstructionClass.create(BinaryConstOp.class);
147
148        @Opcode private final AArch64ArithmeticOp op;
149        @Def({REG}) protected AllocatableValue result;
150        @Use({REG}) protected AllocatableValue a;
151        private final JavaConstant b;
152
153        public BinaryConstOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, JavaConstant b) {
154            super(TYPE);
155            this.op = op;
156            this.result = result;
157            this.a = a;
158            this.b = b;
159        }
160
161        @Override
162        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
163            assert op.category != NONE;
164            Register dst = asRegister(result);
165            Register src = asRegister(a);
166            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
167            switch (op) {
168                case ADD:
169                    // Don't use asInt() here, since we can't use asInt on a long variable, even
170                    // if the constant easily fits as an int.
171                    assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
172                    masm.add(size, dst, src, (int) b.asLong());
173                    break;
174                case SUB:
175                    // Don't use asInt() here, since we can't use asInt on a long variable, even
176                    // if the constant easily fits as an int.
177                    assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
178                    masm.sub(size, dst, src, (int) b.asLong());
179                    break;
180                case ADDS:
181                    assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
182                    masm.adds(size, dst, src, (int) b.asLong());
183                    break;
184                case SUBS:
185                    assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
186                    masm.subs(size, dst, src, (int) b.asLong());
187                    break;
188                case AND:
189                    // XXX Should this be handled somewhere else?
190                    if (size == 32 && b.asLong() == 0xFFFF_FFFFL) {
191                        masm.mov(size, dst, src);
192                    } else {
193                        masm.and(size, dst, src, b.asLong());
194                    }
195                    break;
196                case ANDS:
197                    masm.ands(size, dst, src, b.asLong());
198                    break;
199                case OR:
200                    masm.or(size, dst, src, b.asLong());
201                    break;
202                case XOR:
203                    masm.eor(size, dst, src, b.asLong());
204                    break;
205                case SHL:
206                    masm.shl(size, dst, src, b.asLong());
207                    break;
208                case LSHR:
209                    masm.lshr(size, dst, src, b.asLong());
210                    break;
211                case ASHR:
212                    masm.ashr(size, dst, src, b.asLong());
213                    break;
214                default:
215                    throw GraalError.shouldNotReachHere("op=" + op.name());
216            }
217        }
218    }
219
220    public static class BinaryOp extends AArch64LIRInstruction {
221        private static final LIRInstructionClass<BinaryOp> TYPE = LIRInstructionClass.create(BinaryOp.class);
222
223        @Opcode private final AArch64ArithmeticOp op;
224        @Def({REG}) protected AllocatableValue result;
225        @Use({REG}) protected AllocatableValue a;
226        @Use({REG}) protected AllocatableValue b;
227
228        public BinaryOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, AllocatableValue b) {
229            super(TYPE);
230            this.op = op;
231            this.result = result;
232            this.a = a;
233            this.b = b;
234        }
235
236        @Override
237        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
238            Register dst = asRegister(result);
239            Register src1 = asRegister(a);
240            Register src2 = asRegister(b);
241            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
242            switch (op) {
243                case ADD:
244                    masm.add(size, dst, src1, src2);
245                    break;
246                case ADDS:
247                    masm.adds(size, dst, src1, src2);
248                    break;
249                case SUB:
250                    masm.sub(size, dst, src1, src2);
251                    break;
252                case SUBS:
253                    masm.subs(size, dst, src1, src2);
254                    break;
255                case MUL:
256                    masm.mul(size, dst, src1, src2);
257                    break;
258                case UMULH:
259                    masm.umulh(size, dst, src1, src2);
260                    break;
261                case SMULH:
262                    masm.smulh(size, dst, src1, src2);
263                    break;
264                case DIV:
265                    masm.sdiv(size, dst, src1, src2);
266                    break;
267                case UDIV:
268                    masm.udiv(size, dst, src1, src2);
269                    break;
270                case AND:
271                    masm.and(size, dst, src1, src2);
272                    break;
273                case ANDS:
274                    masm.ands(size, dst, src1, src2);
275                    break;
276                case OR:
277                    masm.or(size, dst, src1, src2);
278                    break;
279                case XOR:
280                    masm.eor(size, dst, src1, src2);
281                    break;
282                case SHL:
283                    masm.shl(size, dst, src1, src2);
284                    break;
285                case LSHR:
286                    masm.lshr(size, dst, src1, src2);
287                    break;
288                case ASHR:
289                    masm.ashr(size, dst, src1, src2);
290                    break;
291                case FADD:
292                    masm.fadd(size, dst, src1, src2);
293                    break;
294                case FSUB:
295                    masm.fsub(size, dst, src1, src2);
296                    break;
297                case FMUL:
298                    masm.fmul(size, dst, src1, src2);
299                    break;
300                case FDIV:
301                    masm.fdiv(size, dst, src1, src2);
302                    break;
303                case MULVS:
304                    masm.mulvs(size, dst, src1, src2);
305                    break;
306                default:
307                    throw GraalError.shouldNotReachHere("op=" + op.name());
308            }
309        }
310    }
311
312    /**
313     * Class used for instructions that have to reuse one of their arguments. This only applies to
314     * the remainder instructions at the moment, since we have to compute n % d using rem = n -
315     * TruncatingDivision(n, d) * d
316     *
317     * TODO (das) Replace the remainder nodes in the LIR.
318     */
319    public static class BinaryCompositeOp extends AArch64LIRInstruction {
320        private static final LIRInstructionClass<BinaryCompositeOp> TYPE = LIRInstructionClass.create(BinaryCompositeOp.class);
321        @Opcode private final AArch64ArithmeticOp op;
322        @Def({REG}) protected AllocatableValue result;
323        @Alive({REG}) protected AllocatableValue a;
324        @Alive({REG}) protected AllocatableValue b;
325
326        public BinaryCompositeOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, AllocatableValue b) {
327            super(TYPE);
328            this.op = op;
329            this.result = result;
330            this.a = a;
331            this.b = b;
332        }
333
334        @Override
335        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
336            Register dst = asRegister(result);
337            Register src1 = asRegister(a);
338            Register src2 = asRegister(b);
339            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
340            switch (op) {
341                case REM:
342                    masm.rem(size, dst, src1, src2);
343                    break;
344                case UREM:
345                    masm.urem(size, dst, src1, src2);
346                    break;
347                case FREM:
348                    masm.frem(size, dst, src1, src2);
349                    break;
350                default:
351                    throw GraalError.shouldNotReachHere();
352            }
353        }
354    }
355
356    public static class AddSubShiftOp extends AArch64LIRInstruction {
357        private static final LIRInstructionClass<AddSubShiftOp> TYPE = LIRInstructionClass.create(AddSubShiftOp.class);
358
359        @Opcode private final AArch64ArithmeticOp op;
360        @Def(REG) protected AllocatableValue result;
361        @Use(REG) protected AllocatableValue src1;
362        @Use(REG) protected AllocatableValue src2;
363        private final AArch64MacroAssembler.ShiftType shiftType;
364        private final int shiftAmt;
365
366        /**
367         * Computes <code>result = src1 <op> src2 <shiftType> <shiftAmt></code>.
368         */
369        public AddSubShiftOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AArch64MacroAssembler.ShiftType shiftType, int shiftAmt) {
370            super(TYPE);
371            assert op == ADD || op == SUB;
372            this.op = op;
373            this.result = result;
374            this.src1 = src1;
375            this.src2 = src2;
376            this.shiftType = shiftType;
377            this.shiftAmt = shiftAmt;
378        }
379
380        @Override
381        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
382            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
383            switch (op) {
384                case ADD:
385                    masm.add(size, asRegister(result), asRegister(src1), asRegister(src2), shiftType, shiftAmt);
386                    break;
387                case SUB:
388                    masm.sub(size, asRegister(result), asRegister(src1), asRegister(src2), shiftType, shiftAmt);
389                    break;
390                default:
391                    throw GraalError.shouldNotReachHere();
392            }
393        }
394    }
395
396    public static class ExtendedAddShiftOp extends AArch64LIRInstruction {
397        private static final LIRInstructionClass<ExtendedAddShiftOp> TYPE = LIRInstructionClass.create(ExtendedAddShiftOp.class);
398        @Def(REG) protected AllocatableValue result;
399        @Use(REG) protected AllocatableValue src1;
400        @Use(REG) protected AllocatableValue src2;
401        private final AArch64Assembler.ExtendType extendType;
402        private final int shiftAmt;
403
404        /**
405         * Computes <code>result = src1 + extendType(src2) << shiftAmt</code>.
406         *
407         * @param extendType defines how src2 is extended to the same size as src1.
408         * @param shiftAmt must be in range 0 to 4.
409         */
410        public ExtendedAddShiftOp(AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AArch64Assembler.ExtendType extendType, int shiftAmt) {
411            super(TYPE);
412            this.result = result;
413            this.src1 = src1;
414            this.src2 = src2;
415            this.extendType = extendType;
416            this.shiftAmt = shiftAmt;
417        }
418
419        @Override
420        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
421            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
422            masm.add(size, asRegister(result), asRegister(src1), asRegister(src2), extendType, shiftAmt);
423        }
424    }
425
426}
427