1/*
2 * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "ThunkGenerators.h"
28
29#include "CodeBlock.h"
30#include "Operations.h"
31#include "SpecializedThunkJIT.h"
32#include <wtf/InlineASM.h>
33#include <wtf/StringPrintStream.h>
34#include <wtf/text/StringImpl.h>
35
36#if ENABLE(JIT)
37
38namespace JSC {
39
40static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
41{
42    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
43    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
44    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
45
46    // Also initialize ReturnPC and CodeBlock, like a JS function would.
47    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
48    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
49    jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
50
51    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
52    jit.restoreArgumentReference();
53    JSInterfaceJIT::Call callNotJSFunction = jit.call();
54    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
55    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
56    jit.ret();
57
58    return callNotJSFunction;
59}
60
61static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
62{
63    JSInterfaceJIT jit;
64
65    JSInterfaceJIT::JumpList slowCase;
66
67#if USE(JSVALUE64)
68    slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
69    slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
70#else // USE(JSVALUE64)
71    slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
72    slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
73#endif // USE(JSVALUE64)
74
75    // Finish canonical initialization before JS function call.
76    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
77    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
78
79    // Also initialize ReturnPC for use by lazy linking and exceptions.
80    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
81    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
82
83    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
84    jit.restoreArgumentReference();
85    JSInterfaceJIT::Call callLazyLink = jit.call();
86    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
87    jit.jump(JSInterfaceJIT::regT0);
88
89    slowCase.link(&jit);
90    JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
91
92    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
93    patchBuffer.link(callLazyLink, lazyLink);
94    patchBuffer.link(callNotJSFunction, notJSFunction);
95
96    return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
97}
98
99MacroAssemblerCodeRef linkCallGenerator(VM* vm)
100{
101    return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
102}
103
104MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
105{
106    return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
107}
108
109MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
110{
111    return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
112}
113
114static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
115{
116    JSInterfaceJIT jit;
117
118    JSInterfaceJIT::JumpList slowCase;
119
120#if USE(JSVALUE64)
121    slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
122#else // USE(JSVALUE64)
123    slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
124#endif // USE(JSVALUE64)
125    slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
126
127    // Finish canonical initialization before JS function call.
128    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
129    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
130
131    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
132    JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
133    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
134    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
135    jit.restoreArgumentReference();
136    JSInterfaceJIT::Call callCompile = jit.call();
137    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
138    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
139
140    hasCodeBlock1.link(&jit);
141    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
142    jit.jump(JSInterfaceJIT::regT0);
143
144    slowCase.link(&jit);
145    JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
146
147    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
148    patchBuffer.link(callCompile, compile);
149    patchBuffer.link(callNotJSFunction, notJSFunction);
150
151    return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
152}
153
154MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
155{
156    return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
157}
158
159MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
160{
161    return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
162}
163
164MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
165{
166    JSInterfaceJIT jit;
167
168#if USE(JSVALUE64)
169    // Check eax is a string
170    JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
171    JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
172        JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
173            JSInterfaceJIT::regT0, JSCell::structureOffset()),
174        JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
175
176    // Checks out okay! - get the length from the Ustring.
177    jit.load32(
178        JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
179        JSInterfaceJIT::regT0);
180
181    JSInterfaceJIT::Jump failureCases3 = jit.branch32(
182        JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
183
184    // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
185    jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
186
187#else // USE(JSVALUE64)
188    // regT0 holds payload, regT1 holds tag
189
190    JSInterfaceJIT::Jump failureCases1 = jit.branch32(
191        JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
192        JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
193    JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
194        JSInterfaceJIT::NotEqual,
195        JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
196        JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
197
198    // Checks out okay! - get the length from the Ustring.
199    jit.load32(
200        JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
201        JSInterfaceJIT::regT2);
202
203    JSInterfaceJIT::Jump failureCases3 = jit.branch32(
204        JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
205    jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
206    jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
207#endif // USE(JSVALUE64)
208
209    jit.ret();
210
211    JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
212    JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
213    JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
214
215    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
216
217    patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
218    patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
219    patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
220
221    return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
222}
223
224static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
225{
226    int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
227
228    JSInterfaceJIT jit;
229
230    jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
231    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
232
233#if CPU(X86)
234    // Load caller frame's scope chain into this callframe so that whatever we call can
235    // get to its global data.
236    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
237    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
238    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
239
240    jit.peek(JSInterfaceJIT::regT1);
241    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
242
243    // Calling convention:      f(ecx, edx, ...);
244    // Host function signature: f(ExecState*);
245    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
246
247    jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
248
249    // call the function
250    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
251    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
252    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
253    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
254
255    jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
256
257#elif CPU(X86_64)
258    // Load caller frame's scope chain into this callframe so that whatever we call can
259    // get to its global data.
260    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
261    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
262    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
263
264    jit.peek(JSInterfaceJIT::regT1);
265    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
266
267#if !OS(WINDOWS)
268    // Calling convention:      f(edi, esi, edx, ecx, ...);
269    // Host function signature: f(ExecState*);
270    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
271
272    jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
273
274    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
275    jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
276    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
277    jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
278
279    jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
280#else
281    // Calling convention:      f(ecx, edx, r8, r9, ...);
282    // Host function signature: f(ExecState*);
283    jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
284
285    // Leave space for the callee parameter home addresses and align the stack.
286    jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
287
288    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
289    jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
290    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
291    jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
292
293    jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
294#endif
295
296#elif CPU(ARM)
297    // Load caller frame's scope chain into this callframe so that whatever we call can
298    // get to its global data.
299    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
300    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
301    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
302
303    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
304    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
305
306    // Calling convention:      f(r0 == regT0, r1 == regT1, ...);
307    // Host function signature: f(ExecState*);
308    jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
309
310    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
311    jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
312    jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
313    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
314
315    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
316
317#elif CPU(SH4)
318    // Load caller frame's scope chain into this callframe so that whatever we call can
319    // get to its global data.
320    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
321    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
322    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
323
324    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
325    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
326
327    // Calling convention: f(r0 == regT4, r1 == regT5, ...);
328    // Host function signature: f(ExecState*);
329    jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
330
331    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
332    jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
333    jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
334
335    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
336    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
337
338#elif CPU(MIPS)
339    // Load caller frame's scope chain into this callframe so that whatever we call can
340    // get to its global data.
341    jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
342    jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
343    jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
344
345    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
346    jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
347
348    // Calling convention:      f(a0, a1, a2, a3);
349    // Host function signature: f(ExecState*);
350
351    // Allocate stack space for 16 bytes (8-byte aligned)
352    // 16 bytes (unused) for 4 arguments
353    jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
354
355    // Setup arg0
356    jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
357
358    // Call
359    jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
360    jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
361    jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
362    jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
363
364    // Restore stack space
365    jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
366
367    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
368#else
369#error "JIT not supported on this platform."
370    UNUSED_PARAM(executableOffsetToFunction);
371    breakpoint();
372#endif
373
374    // Check for an exception
375#if USE(JSVALUE64)
376    jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
377    JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
378#else
379    JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
380        JSInterfaceJIT::NotEqual,
381        JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
382        JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
383#endif
384
385    // Return.
386    jit.ret();
387
388    // Handle an exception
389    exceptionHandler.link(&jit);
390
391    // Grab the return address.
392    jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
393
394    jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
395    jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
396    jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
397
398    jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
399    // Set the return address.
400    jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
401    jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
402
403    jit.ret();
404
405    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
406    return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
407}
408
409MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
410{
411    return nativeForGenerator(vm, CodeForCall);
412}
413
414MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
415{
416    return nativeForGenerator(vm, CodeForConstruct);
417}
418
419static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
420{
421    // load string
422    jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
423
424    // Load string length to regT2, and start the process of loading the data pointer into regT0
425    jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
426    jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
427    jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
428
429    // load index
430    jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
431
432    // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
433    jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
434
435    // Load the character
436    SpecializedThunkJIT::JumpList is16Bit;
437    SpecializedThunkJIT::JumpList cont8Bit;
438    // Load the string flags
439    jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
440    jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
441    is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
442    jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
443    cont8Bit.append(jit.jump());
444    is16Bit.link(&jit);
445    jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
446    cont8Bit.link(&jit);
447}
448
449static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
450{
451    jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
452    jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
453    jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
454    jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
455}
456
457MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
458{
459    SpecializedThunkJIT jit(1);
460    stringCharLoad(jit, vm);
461    jit.returnInt32(SpecializedThunkJIT::regT0);
462    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
463}
464
465MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
466{
467    SpecializedThunkJIT jit(1);
468    stringCharLoad(jit, vm);
469    charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
470    jit.returnJSCell(SpecializedThunkJIT::regT0);
471    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
472}
473
474MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
475{
476    SpecializedThunkJIT jit(1);
477    // load char code
478    jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
479    charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
480    jit.returnJSCell(SpecializedThunkJIT::regT0);
481    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
482}
483
484MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
485{
486    SpecializedThunkJIT jit(1);
487    if (!jit.supportsFloatingPointSqrt())
488        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
489
490    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
491    jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
492    jit.returnDouble(SpecializedThunkJIT::fpRegT0);
493    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
494}
495
496
497#define UnaryDoubleOpWrapper(function) function##Wrapper
498enum MathThunkCallingConvention { };
499typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
500extern "C" {
501
502double jsRound(double) REFERENCED_FROM_ASM;
503double jsRound(double d)
504{
505    double integer = ceil(d);
506    return integer - (integer - d > 0.5);
507}
508
509}
510
511#if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
512
513#define defineUnaryDoubleOpWrapper(function) \
514    asm( \
515        ".text\n" \
516        ".globl " SYMBOL_STRING(function##Thunk) "\n" \
517        HIDE_SYMBOL(function##Thunk) "\n" \
518        SYMBOL_STRING(function##Thunk) ":" "\n" \
519        "call " GLOBAL_REFERENCE(function) "\n" \
520        "ret\n" \
521    );\
522    extern "C" { \
523        MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
524    } \
525    static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
526
527#elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
528#define defineUnaryDoubleOpWrapper(function) \
529    asm( \
530        ".text\n" \
531        ".globl " SYMBOL_STRING(function##Thunk) "\n" \
532        HIDE_SYMBOL(function##Thunk) "\n" \
533        SYMBOL_STRING(function##Thunk) ":" "\n" \
534        "subl $8, %esp\n" \
535        "movsd %xmm0, (%esp) \n" \
536        "call " GLOBAL_REFERENCE(function) "\n" \
537        "fstpl (%esp) \n" \
538        "movsd (%esp), %xmm0 \n" \
539        "addl $8, %esp\n" \
540        "ret\n" \
541    );\
542    extern "C" { \
543        MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
544    } \
545    static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
546
547#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
548
549#define defineUnaryDoubleOpWrapper(function) \
550    asm( \
551        ".text\n" \
552        ".align 2\n" \
553        ".globl " SYMBOL_STRING(function##Thunk) "\n" \
554        HIDE_SYMBOL(function##Thunk) "\n" \
555        ".thumb\n" \
556        ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
557        SYMBOL_STRING(function##Thunk) ":" "\n" \
558        "push {lr}\n" \
559        "vmov r0, r1, d0\n" \
560        "blx " GLOBAL_REFERENCE(function) "\n" \
561        "vmov d0, r0, r1\n" \
562        "pop {lr}\n" \
563        "bx lr\n" \
564    ); \
565    extern "C" { \
566        MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
567    } \
568    static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
569#else
570
571#define defineUnaryDoubleOpWrapper(function) \
572    static MathThunk UnaryDoubleOpWrapper(function) = 0
573#endif
574
575defineUnaryDoubleOpWrapper(jsRound);
576defineUnaryDoubleOpWrapper(exp);
577defineUnaryDoubleOpWrapper(log);
578defineUnaryDoubleOpWrapper(floor);
579defineUnaryDoubleOpWrapper(ceil);
580
581static const double oneConstant = 1.0;
582static const double negativeHalfConstant = -0.5;
583static const double zeroConstant = 0.0;
584static const double halfConstant = 0.5;
585
586MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
587{
588    SpecializedThunkJIT jit(1);
589    MacroAssembler::Jump nonIntJump;
590    if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
591        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
592    jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
593    jit.returnInt32(SpecializedThunkJIT::regT0);
594    nonIntJump.link(&jit);
595    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
596    SpecializedThunkJIT::Jump intResult;
597    SpecializedThunkJIT::JumpList doubleResult;
598    if (jit.supportsFloatingPointTruncate()) {
599        jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
600        doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
601        SpecializedThunkJIT::JumpList slowPath;
602        // Handle the negative doubles in the slow path for now.
603        slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
604        slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
605        intResult = jit.jump();
606        slowPath.link(&jit);
607    }
608    jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
609    jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
610    if (jit.supportsFloatingPointTruncate())
611        intResult.link(&jit);
612    jit.returnInt32(SpecializedThunkJIT::regT0);
613    doubleResult.link(&jit);
614    jit.returnDouble(SpecializedThunkJIT::fpRegT0);
615    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
616}
617
618MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
619{
620    SpecializedThunkJIT jit(1);
621    if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
622        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
623    MacroAssembler::Jump nonIntJump;
624    jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
625    jit.returnInt32(SpecializedThunkJIT::regT0);
626    nonIntJump.link(&jit);
627    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
628    jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
629    SpecializedThunkJIT::JumpList doubleResult;
630    jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
631    jit.returnInt32(SpecializedThunkJIT::regT0);
632    doubleResult.link(&jit);
633    jit.returnDouble(SpecializedThunkJIT::fpRegT0);
634    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
635}
636
637MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
638{
639    SpecializedThunkJIT jit(1);
640    if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
641        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
642    MacroAssembler::Jump nonIntJump;
643    jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
644    jit.returnInt32(SpecializedThunkJIT::regT0);
645    nonIntJump.link(&jit);
646    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
647    SpecializedThunkJIT::Jump intResult;
648    SpecializedThunkJIT::JumpList doubleResult;
649    if (jit.supportsFloatingPointTruncate()) {
650        jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
651        doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
652        SpecializedThunkJIT::JumpList slowPath;
653        // Handle the negative doubles in the slow path for now.
654        slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
655        jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
656        jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
657        slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
658        intResult = jit.jump();
659        slowPath.link(&jit);
660    }
661    jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
662    jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
663    if (jit.supportsFloatingPointTruncate())
664        intResult.link(&jit);
665    jit.returnInt32(SpecializedThunkJIT::regT0);
666    doubleResult.link(&jit);
667    jit.returnDouble(SpecializedThunkJIT::fpRegT0);
668    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
669}
670
671MacroAssemblerCodeRef expThunkGenerator(VM* vm)
672{
673    if (!UnaryDoubleOpWrapper(exp))
674        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
675    SpecializedThunkJIT jit(1);
676    if (!jit.supportsFloatingPoint())
677        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
678    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
679    jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
680    jit.returnDouble(SpecializedThunkJIT::fpRegT0);
681    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
682}
683
684MacroAssemblerCodeRef logThunkGenerator(VM* vm)
685{
686    if (!UnaryDoubleOpWrapper(log))
687        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
688    SpecializedThunkJIT jit(1);
689    if (!jit.supportsFloatingPoint())
690        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
691    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
692    jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
693    jit.returnDouble(SpecializedThunkJIT::fpRegT0);
694    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
695}
696
697MacroAssemblerCodeRef absThunkGenerator(VM* vm)
698{
699    SpecializedThunkJIT jit(1);
700    if (!jit.supportsFloatingPointAbs())
701        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
702    MacroAssembler::Jump nonIntJump;
703    jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
704    jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
705    jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
706    jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
707    jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
708    jit.returnInt32(SpecializedThunkJIT::regT0);
709    nonIntJump.link(&jit);
710    // Shame about the double int conversion here.
711    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
712    jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
713    jit.returnDouble(SpecializedThunkJIT::fpRegT1);
714    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
715}
716
717MacroAssemblerCodeRef powThunkGenerator(VM* vm)
718{
719    SpecializedThunkJIT jit(2);
720    if (!jit.supportsFloatingPoint())
721        return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
722
723    jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
724    jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
725    MacroAssembler::Jump nonIntExponent;
726    jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
727    jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
728
729    MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
730    MacroAssembler::Label startLoop(jit.label());
731
732    MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
733    jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
734    exponentIsEven.link(&jit);
735    jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
736    jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
737    jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
738
739    exponentIsZero.link(&jit);
740
741    {
742        SpecializedThunkJIT::JumpList doubleResult;
743        jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
744        jit.returnInt32(SpecializedThunkJIT::regT0);
745        doubleResult.link(&jit);
746        jit.returnDouble(SpecializedThunkJIT::fpRegT1);
747    }
748
749    if (jit.supportsFloatingPointSqrt()) {
750        nonIntExponent.link(&jit);
751        jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
752        jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
753        jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
754        jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
755        jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
756        jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
757
758        SpecializedThunkJIT::JumpList doubleResult;
759        jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
760        jit.returnInt32(SpecializedThunkJIT::regT0);
761        doubleResult.link(&jit);
762        jit.returnDouble(SpecializedThunkJIT::fpRegT1);
763    } else
764        jit.appendFailure(nonIntExponent);
765
766    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
767}
768
769MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
770{
771    SpecializedThunkJIT jit(2);
772    MacroAssembler::Jump nonIntArg0Jump;
773    jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
774    SpecializedThunkJIT::Label doneLoadingArg0(&jit);
775    MacroAssembler::Jump nonIntArg1Jump;
776    jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
777    SpecializedThunkJIT::Label doneLoadingArg1(&jit);
778    jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
779    jit.returnInt32(SpecializedThunkJIT::regT0);
780
781    if (jit.supportsFloatingPointTruncate()) {
782        nonIntArg0Jump.link(&jit);
783        jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
784        jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
785        jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
786        jit.jump(doneLoadingArg0);
787    } else
788        jit.appendFailure(nonIntArg0Jump);
789
790    if (jit.supportsFloatingPointTruncate()) {
791        nonIntArg1Jump.link(&jit);
792        jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
793        jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
794        jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
795        jit.jump(doneLoadingArg1);
796    } else
797        jit.appendFailure(nonIntArg1Jump);
798
799    return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
800}
801
802}
803
804#endif // ENABLE(JIT)
805