1/*
2 * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23package org.graalvm.compiler.core.aarch64;
24
25import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
26import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
27
28import java.util.function.Function;
29
30import org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode;
31import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag;
32import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
33import org.graalvm.compiler.core.common.LIRKind;
34import org.graalvm.compiler.core.common.calc.Condition;
35import org.graalvm.compiler.core.common.spi.LIRKindTool;
36import org.graalvm.compiler.debug.GraalError;
37import org.graalvm.compiler.lir.LIRFrameState;
38import org.graalvm.compiler.lir.LIRValueUtil;
39import org.graalvm.compiler.lir.LabelRef;
40import org.graalvm.compiler.lir.StandardOp;
41import org.graalvm.compiler.lir.SwitchStrategy;
42import org.graalvm.compiler.lir.Variable;
43import org.graalvm.compiler.lir.aarch64.AArch64AddressValue;
44import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp;
45import org.graalvm.compiler.lir.aarch64.AArch64Compare;
46import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow;
47import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.BranchOp;
48import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CondMoveOp;
49import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.StrategySwitchOp;
50import org.graalvm.compiler.lir.aarch64.AArch64Move;
51import org.graalvm.compiler.lir.aarch64.AArch64Move.CompareAndSwapOp;
52import org.graalvm.compiler.lir.aarch64.AArch64Move.MembarOp;
53import org.graalvm.compiler.lir.aarch64.AArch64PauseOp;
54import org.graalvm.compiler.lir.gen.LIRGenerationResult;
55import org.graalvm.compiler.lir.gen.LIRGenerator;
56import org.graalvm.compiler.phases.util.Providers;
57
58import jdk.vm.ci.aarch64.AArch64Kind;
59import jdk.vm.ci.code.RegisterValue;
60import jdk.vm.ci.meta.AllocatableValue;
61import jdk.vm.ci.meta.JavaConstant;
62import jdk.vm.ci.meta.JavaKind;
63import jdk.vm.ci.meta.PlatformKind;
64import jdk.vm.ci.meta.PrimitiveConstant;
65import jdk.vm.ci.meta.Value;
66import jdk.vm.ci.meta.ValueKind;
67
68public abstract class AArch64LIRGenerator extends LIRGenerator {
69
70    public AArch64LIRGenerator(LIRKindTool lirKindTool, AArch64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) {
71        super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes);
72    }
73
74    /**
75     * Checks whether the supplied constant can be used without loading it into a register for store
76     * operations, i.e., on the right hand side of a memory access.
77     *
78     * @param c The constant to check.
79     * @return True if the constant can be used directly, false if the constant needs to be in a
80     *         register.
81     */
82    protected static final boolean canStoreConstant(JavaConstant c) {
83        // Our own code never calls this since we can't make a definite statement about whether or
84        // not we can inline a constant without knowing what kind of operation we execute. Let's be
85        // optimistic here and fix up mistakes later.
86        return true;
87    }
88
89    /**
90     * AArch64 cannot use anything smaller than a word in any instruction other than load and store.
91     */
92    @Override
93    public <K extends ValueKind<K>> K toRegisterKind(K kind) {
94        switch ((AArch64Kind) kind.getPlatformKind()) {
95            case BYTE:
96            case WORD:
97                return kind.changeType(AArch64Kind.DWORD);
98            default:
99                return kind;
100        }
101    }
102
103    @Override
104    public void emitNullCheck(Value address, LIRFrameState state) {
105        append(new AArch64Move.NullCheckOp(asAddressValue(address), state));
106    }
107
108    @Override
109    public Variable emitAddress(AllocatableValue stackslot) {
110        Variable result = newVariable(LIRKind.value(target().arch.getWordKind()));
111        append(new AArch64Move.StackLoadAddressOp(result, stackslot));
112        return result;
113    }
114
115    public AArch64AddressValue asAddressValue(Value address) {
116        if (address instanceof AArch64AddressValue) {
117            return (AArch64AddressValue) address;
118        } else {
119            return new AArch64AddressValue(address.getValueKind(), asAllocatable(address), Value.ILLEGAL, 0, false, AddressingMode.BASE_REGISTER_ONLY);
120        }
121    }
122
123    @Override
124    public Variable emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) {
125        Variable result = newVariable(trueValue.getValueKind());
126        Variable scratch = newVariable(LIRKind.value(AArch64Kind.WORD));
127        append(new CompareAndSwapOp(result, loadNonCompareConst(expectedValue), loadReg(newValue), asAllocatable(address), scratch));
128        return result;
129    }
130
131    @Override
132    public void emitMembar(int barriers) {
133        int necessaryBarriers = target().arch.requiredBarriers(barriers);
134        if (target().isMP && necessaryBarriers != 0) {
135            append(new MembarOp(necessaryBarriers));
136        }
137    }
138
139    @Override
140    public void emitJump(LabelRef label) {
141        assert label != null;
142        append(new StandardOp.JumpOp(label));
143    }
144
145    @Override
146    public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpKind, double overflowProbability) {
147        append(new AArch64ControlFlow.BranchOp(ConditionFlag.VS, overflow, noOverflow, overflowProbability));
148    }
149
150    /**
151     * Branches to label if (left & right) == 0. If negated is true branchse on non-zero instead.
152     *
153     * @param left Integer kind. Non null.
154     * @param right Integer kind. Non null.
155     * @param trueDestination destination if left & right == 0. Non null.
156     * @param falseDestination destination if left & right != 0. Non null
157     * @param trueSuccessorProbability hoistoric probability that comparison is true
158     */
159    @Override
160    public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueSuccessorProbability) {
161        assert ((AArch64Kind) left.getPlatformKind()).isInteger() && left.getPlatformKind() == right.getPlatformKind();
162        ((AArch64ArithmeticLIRGenerator) getArithmetic()).emitBinary(LIRKind.combine(left, right), AArch64ArithmeticOp.ANDS, true, left, right);
163        append(new AArch64ControlFlow.BranchOp(ConditionFlag.EQ, trueDestination, falseDestination, trueSuccessorProbability));
164    }
165
166    /**
167     * Conditionally move trueValue into new variable if cond + unorderedIsTrue is true, else
168     * falseValue.
169     *
170     * @param left Arbitrary value. Has to have same type as right. Non null.
171     * @param right Arbitrary value. Has to have same type as left. Non null.
172     * @param cond condition that decides whether to move trueValue or falseValue into result. Non
173     *            null.
174     * @param unorderedIsTrue defines whether floating-point comparisons consider unordered true or
175     *            not. Ignored for integer comparisons.
176     * @param trueValue arbitrary value same type as falseValue. Non null.
177     * @param falseValue arbitrary value same type as trueValue. Non null.
178     * @return value containing trueValue if cond + unorderedIsTrue is true, else falseValue. Non
179     *         null.
180     */
181    @Override
182    public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
183        boolean mirrored = emitCompare(cmpKind, left, right, cond, unorderedIsTrue);
184        Condition finalCondition = mirrored ? cond.mirror() : cond;
185        boolean finalUnorderedIsTrue = mirrored ? !unorderedIsTrue : unorderedIsTrue;
186        ConditionFlag cmpCondition = toConditionFlag(((AArch64Kind) cmpKind).isInteger(), finalCondition, finalUnorderedIsTrue);
187        Variable result = newVariable(trueValue.getValueKind());
188        append(new CondMoveOp(result, cmpCondition, loadReg(trueValue), loadReg(falseValue)));
189        return result;
190    }
191
192    @Override
193    public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination,
194                    double trueDestinationProbability) {
195        boolean mirrored = emitCompare(cmpKind, left, right, cond, unorderedIsTrue);
196        Condition finalCondition = mirrored ? cond.mirror() : cond;
197        boolean finalUnorderedIsTrue = mirrored ? !unorderedIsTrue : unorderedIsTrue;
198        ConditionFlag cmpCondition = toConditionFlag(((AArch64Kind) cmpKind).isInteger(), finalCondition, finalUnorderedIsTrue);
199        append(new BranchOp(cmpCondition, trueDestination, falseDestination, trueDestinationProbability));
200    }
201
202    private static ConditionFlag toConditionFlag(boolean isInt, Condition cond, boolean unorderedIsTrue) {
203        return isInt ? toIntConditionFlag(cond) : toFloatConditionFlag(cond, unorderedIsTrue);
204    }
205
206    /**
207     * Takes a Condition and unorderedIsTrue flag and returns the correct Aarch64 specific
208     * ConditionFlag. Note: This is only correct if the emitCompare code for floats has correctly
209     * handled the case of 'EQ && unorderedIsTrue', respectively 'NE && !unorderedIsTrue'!
210     */
211    private static ConditionFlag toFloatConditionFlag(Condition cond, boolean unorderedIsTrue) {
212        switch (cond) {
213            case LT:
214                return unorderedIsTrue ? ConditionFlag.LT : ConditionFlag.LO;
215            case LE:
216                return unorderedIsTrue ? ConditionFlag.LE : ConditionFlag.LS;
217            case GE:
218                return unorderedIsTrue ? ConditionFlag.PL : ConditionFlag.GE;
219            case GT:
220                return unorderedIsTrue ? ConditionFlag.HI : ConditionFlag.GT;
221            case EQ:
222                return ConditionFlag.EQ;
223            case NE:
224                return ConditionFlag.NE;
225            default:
226                throw GraalError.shouldNotReachHere();
227        }
228    }
229
230    /**
231     * Takes a Condition and returns the correct Aarch64 specific ConditionFlag.
232     */
233    private static ConditionFlag toIntConditionFlag(Condition cond) {
234        switch (cond) {
235            case EQ:
236                return ConditionFlag.EQ;
237            case NE:
238                return ConditionFlag.NE;
239            case LT:
240                return ConditionFlag.LT;
241            case LE:
242                return ConditionFlag.LE;
243            case GT:
244                return ConditionFlag.GT;
245            case GE:
246                return ConditionFlag.GE;
247            case AE:
248                return ConditionFlag.HS;
249            case BE:
250                return ConditionFlag.LS;
251            case AT:
252                return ConditionFlag.HI;
253            case BT:
254                return ConditionFlag.LO;
255            default:
256                throw GraalError.shouldNotReachHere();
257        }
258    }
259
260    /**
261     * This method emits the compare instruction, and may reorder the operands. It returns true if
262     * it did so.
263     *
264     * @param a the left operand of the comparison. Has to have same type as b. Non null.
265     * @param b the right operand of the comparison. Has to have same type as a. Non null.
266     * @return true if mirrored (i.e. "b cmp a" instead of "a cmp b" was done).
267     */
268    protected boolean emitCompare(PlatformKind cmpKind, Value a, Value b, Condition condition, boolean unorderedIsTrue) {
269        Value left;
270        Value right;
271        boolean mirrored;
272        AArch64Kind kind = (AArch64Kind) cmpKind;
273        if (kind.isInteger()) {
274            if (LIRValueUtil.isVariable(b)) {
275                left = load(b);
276                right = loadNonConst(a);
277                mirrored = true;
278            } else {
279                left = load(a);
280                right = loadNonConst(b);
281                mirrored = false;
282            }
283            append(new AArch64Compare.CompareOp(left, loadNonCompareConst(right)));
284        } else if (kind.isSIMD()) {
285            if (AArch64Compare.FloatCompareOp.isFloatCmpConstant(a, condition, unorderedIsTrue)) {
286                left = load(b);
287                right = a;
288                mirrored = true;
289            } else if (AArch64Compare.FloatCompareOp.isFloatCmpConstant(b, condition, unorderedIsTrue)) {
290                left = load(a);
291                right = b;
292                mirrored = false;
293            } else {
294                left = load(a);
295                right = loadReg(b);
296                mirrored = false;
297            }
298            append(new AArch64Compare.FloatCompareOp(left, asAllocatable(right), condition, unorderedIsTrue));
299        } else {
300            throw GraalError.shouldNotReachHere();
301        }
302        return mirrored;
303    }
304
305    /**
306     * If value is a constant that cannot be used directly with a gpCompare instruction load it into
307     * a register and return the register, otherwise return constant value unchanged.
308     */
309    protected Value loadNonCompareConst(Value value) {
310        if (!isCompareConstant(value)) {
311            return loadReg(value);
312        }
313        return value;
314    }
315
316    /**
317     * Checks whether value can be used directly with a gpCompare instruction. This is <b>not</b>
318     * the same as {@link AArch64ArithmeticLIRGenerator#isArithmeticConstant(JavaConstant)}, because
319     * 0.0 is a valid compare constant for floats, while there are no arithmetic constants for
320     * floats.
321     *
322     * @param value any type. Non null.
323     * @return true if value can be used directly in comparison instruction, false otherwise.
324     */
325    public boolean isCompareConstant(Value value) {
326        if (isJavaConstant(value)) {
327            JavaConstant constant = asJavaConstant(value);
328            if (constant instanceof PrimitiveConstant) {
329                final long longValue = constant.asLong();
330                long maskedValue;
331                switch (constant.getJavaKind()) {
332                    case Boolean:
333                    case Byte:
334                        maskedValue = longValue & 0xFF;
335                        break;
336                    case Char:
337                    case Short:
338                        maskedValue = longValue & 0xFFFF;
339                        break;
340                    case Int:
341                        maskedValue = longValue & 0xFFFF_FFFF;
342                        break;
343                    case Long:
344                        maskedValue = longValue;
345                        break;
346                    default:
347                        throw GraalError.shouldNotReachHere();
348                }
349                return AArch64MacroAssembler.isArithmeticImmediate(maskedValue);
350            } else {
351                return constant.isDefaultForKind();
352            }
353        }
354        return false;
355    }
356
357    /**
358     * Moves trueValue into result if (left & right) == 0, else falseValue.
359     *
360     * @param left Integer kind. Non null.
361     * @param right Integer kind. Non null.
362     * @param trueValue Integer kind. Non null.
363     * @param falseValue Integer kind. Non null.
364     * @return virtual register containing trueValue if (left & right) == 0, else falseValue.
365     */
366    @Override
367    public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
368        assert ((AArch64Kind) left.getPlatformKind()).isInteger() && ((AArch64Kind) right.getPlatformKind()).isInteger();
369        assert ((AArch64Kind) trueValue.getPlatformKind()).isInteger() && ((AArch64Kind) falseValue.getPlatformKind()).isInteger();
370        ((AArch64ArithmeticLIRGenerator) getArithmetic()).emitBinary(trueValue.getValueKind(), AArch64ArithmeticOp.ANDS, true, left, right);
371        Variable result = newVariable(trueValue.getValueKind());
372        append(new CondMoveOp(result, ConditionFlag.EQ, load(trueValue), load(falseValue)));
373        return result;
374    }
375
376    @Override
377    public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) {
378        append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, newVariable(key.getValueKind()), AArch64LIRGenerator::toIntConditionFlag));
379    }
380
381    protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue scratchValue,
382                    Function<Condition, ConditionFlag> converter) {
383        return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, scratchValue, converter);
384    }
385
386    @Override
387    protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) {
388        // Make copy of key since the TableSwitch destroys its input.
389        Variable tmp = emitMove(key);
390        Variable scratch = newVariable(LIRKind.value(AArch64Kind.WORD));
391        append(new AArch64ControlFlow.TableSwitchOp(lowKey, defaultTarget, targets, tmp, scratch));
392    }
393
394    @Override
395    public Variable emitByteSwap(Value operand) {
396        // TODO (das) Do not generate until we support vector instructions
397        throw GraalError.unimplemented("Do not generate until we support vector instructions");
398    }
399
400    @Override
401    public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) {
402        // TODO (das) Do not generate until we support vector instructions
403        throw GraalError.unimplemented("Do not generate until we support vector instructions");
404    }
405
406    @Override
407    protected JavaConstant zapValueForKind(PlatformKind kind) {
408        long dead = 0xDEADDEADDEADDEADL;
409        switch ((AArch64Kind) kind) {
410            case BYTE:
411                return JavaConstant.forByte((byte) dead);
412            case WORD:
413                return JavaConstant.forShort((short) dead);
414            case DWORD:
415                return JavaConstant.forInt((int) dead);
416            case QWORD:
417                return JavaConstant.forLong(dead);
418            case SINGLE:
419                return JavaConstant.forFloat(Float.intBitsToFloat((int) dead));
420            case DOUBLE:
421                return JavaConstant.forDouble(Double.longBitsToDouble(dead));
422            default:
423                throw GraalError.shouldNotReachHere();
424        }
425    }
426
427    /**
428     * Loads value into virtual register. Contrary to {@link #load(Value)} this handles
429     * RegisterValues (i.e. values corresponding to fixed physical registers) correctly, by not
430     * creating an unnecessary move into a virtual register.
431     *
432     * This avoids generating the following code: mov x0, x19 # x19 is fixed thread register ldr x0,
433     * [x0] instead of: ldr x0, [x19].
434     */
435    protected AllocatableValue loadReg(Value val) {
436        if (!(val instanceof Variable || val instanceof RegisterValue)) {
437            return emitMove(val);
438        }
439        return (AllocatableValue) val;
440    }
441
442    @Override
443    public void emitPause() {
444        append(new AArch64PauseOp());
445    }
446}
447