solaris_x86_64.il revision 3171:da4be62fb889
11541Srgrimes// 21541Srgrimes// Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 31541Srgrimes// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 41541Srgrimes// 51541Srgrimes// This code is free software; you can redistribute it and/or modify it 61541Srgrimes// under the terms of the GNU General Public License version 2 only, as 71541Srgrimes// published by the Free Software Foundation. 81541Srgrimes// 91541Srgrimes// This code is distributed in the hope that it will be useful, but WITHOUT 101541Srgrimes// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 111541Srgrimes// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 121541Srgrimes// version 2 for more details (a copy is included in the LICENSE file that 131541Srgrimes// accompanied this code). 141541Srgrimes// 151541Srgrimes// You should have received a copy of the GNU General Public License version 161541Srgrimes// 2 along with this work; if not, write to the Free Software Foundation, 171541Srgrimes// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 181541Srgrimes// 191541Srgrimes// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 201541Srgrimes// or visit www.oracle.com if you need additional information or have any 211541Srgrimes// questions. 221541Srgrimes// 231541Srgrimes// 241541Srgrimes 251541Srgrimes // The argument size of each inline directive is ignored by the compiler 261541Srgrimes // and is set to the number of arguments as documentation. 271541Srgrimes 281541Srgrimes // Get the raw thread ID from %gs:0 291541Srgrimes .inline _raw_thread_id,0 301541Srgrimes movq %fs:0, %rax 311541Srgrimes .end 321541Srgrimes 331541Srgrimes // Get current sp 341541Srgrimes .inline _get_current_sp,0 351541Srgrimes .volatile 361541Srgrimes movq %rsp, %rax 371541Srgrimes .end 381541Srgrimes 391541Srgrimes // Get current fp 401541Srgrimes .inline _get_current_fp,0 411541Srgrimes .volatile 421541Srgrimes movq %rbp, %rax 431541Srgrimes .end 441541Srgrimes 451541Srgrimes // Support for os::rdtsc() 461541Srgrimes .inline _raw_rdtsc,0 471541Srgrimes rdtsc 481541Srgrimes salq $32, %rdx 491541Srgrimes orq %rdx, %rax 501541Srgrimes .end 511541Srgrimes 521541Srgrimes // Support for jint Atomic::add(jint add_value, volatile jint* dest) 531541Srgrimes .inline _Atomic_add,2 541541Srgrimes movl %edi, %eax // save add_value for return 551541Srgrimes lock 561541Srgrimes xaddl %edi, (%rsi) 571541Srgrimes addl %edi, %eax 581541Srgrimes .end 591541Srgrimes 601541Srgrimes // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) 611541Srgrimes .inline _Atomic_add_long,2 621541Srgrimes movq %rdi, %rax // save add_value for return 631541Srgrimes lock 641541Srgrimes xaddq %rdi, (%rsi) 651541Srgrimes addq %rdi, %rax 661541Srgrimes .end 671541Srgrimes 681541Srgrimes // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 691541Srgrimes .inline _Atomic_xchg,2 701541Srgrimes xchgl (%rsi), %edi 711541Srgrimes movl %edi, %eax 721541Srgrimes .end 731541Srgrimes 741541Srgrimes // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest). 751541Srgrimes .inline _Atomic_xchg_long,2 761541Srgrimes xchgq (%rsi), %rdi 771541Srgrimes movq %rdi, %rax 781541Srgrimes .end 791541Srgrimes 801541Srgrimes // Support for jint Atomic::cmpxchg(jint exchange_value, 811541Srgrimes // volatile jint *dest, 821541Srgrimes // jint compare_value) 831541Srgrimes .inline _Atomic_cmpxchg,3 841541Srgrimes movl %edx, %eax // compare_value 851541Srgrimes lock 861541Srgrimes cmpxchgl %edi, (%rsi) 871541Srgrimes .end 881541Srgrimes 891541Srgrimes // Support for jlong Atomic::cmpxchg(jlong exchange_value, 901541Srgrimes // volatile jlong* dest, 911541Srgrimes // jlong compare_value) 921541Srgrimes .inline _Atomic_cmpxchg_long,3 931541Srgrimes movq %rdx, %rax // compare_value 941541Srgrimes lock 951541Srgrimes cmpxchgq %rdi, (%rsi) 961541Srgrimes .end 971541Srgrimes 981541Srgrimes // Support for OrderAccess::acquire() 991541Srgrimes .inline _OrderAccess_acquire,0 1001541Srgrimes movl 0(%rsp), %eax 1011541Srgrimes .end 1021541Srgrimes 1031541Srgrimes // Support for OrderAccess::fence() 1041541Srgrimes .inline _OrderAccess_fence,0 1051541Srgrimes lock 1061541Srgrimes addl $0, (%rsp) 1071541Srgrimes .end 1081541Srgrimes 1091541Srgrimes // Support for u2 Bytes::swap_u2(u2 x) 1101541Srgrimes .inline _raw_swap_u2,1 1111541Srgrimes movw %di, %ax 1121541Srgrimes rorw $8, %ax 1131541Srgrimes .end 1141541Srgrimes 1151541Srgrimes // Support for u4 Bytes::swap_u4(u4 x) 1161541Srgrimes .inline _raw_swap_u4,1 1171541Srgrimes movl %edi, %eax 1181541Srgrimes bswapl %eax 1191541Srgrimes .end 1201541Srgrimes 1211541Srgrimes // Support for u8 Bytes::swap_u8(u8 x) 1221541Srgrimes .inline _raw_swap_u8,1 1231541Srgrimes movq %rdi, %rax 1241541Srgrimes bswapq %rax 1251541Srgrimes .end 1261541Srgrimes 1271541Srgrimes // Support for void Prefetch::read 1281541Srgrimes .inline _Prefetch_read,2 1291541Srgrimes prefetcht0 (%rdi, %rsi, 1) 1301541Srgrimes .end 1311541Srgrimes 1321541Srgrimes // Support for void Prefetch::write 1331541Srgrimes // We use prefetcht0 because em64t doesn't support prefetchw. 1341541Srgrimes // prefetchw is a 3dnow instruction. 1351541Srgrimes .inline _Prefetch_write,2 1361541Srgrimes prefetcht0 (%rdi, %rsi, 1) 1371541Srgrimes .end 138