1/* 2 * Copyright (c) 2003-2009 Apple, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#include <sys/appleapiopts.h> 30#include <machine/cpu_capabilities.h> 31#include <machine/commpage.h> 32#include <mach/i386/syscall_sw.h> 33 34/* Temporary definitions. Replace by #including the correct file when available. */ 35 36#define PTHRW_EBIT 0x01 37#define PTHRW_LBIT 0x02 38#define PTHRW_YBIT 0x04 39#define PTHRW_WBIT 0x08 40#define PTHRW_UBIT 0x10 41#define PTHRW_RETRYBIT 0x20 42#define PTHRW_TRYLKBIT 0x40 43 44#define PTHRW_INC 0x100 45#define PTHRW_BIT_MASK 0x000000ff; 46 47#define PTHRW_COUNT_SHIFT 8 48#define PTHRW_COUNT_MASK 0xffffff00 49#define PTHRW_MAX_READERS 0xffffff00 50 51#define KSYN_MLWAIT 301 /* mutex lock wait syscall */ 52 53#define PTHRW_STATUS_ACQUIRED 0 54#define PTHRW_STATUS_SYSCALL 1 55#define PTHRW_STATUS_ERROR 2 56 57#define PTHRW_LVAL 0 58#define PTHRW_UVAL 4 59 60 61 62/* PREEMPTION FREE ZONE (PFZ) 63 * 64 * A portion of the commpage is speacial-cased by the kernel to be "preemption free", 65 * ie as if we had disabled interrupts in user mode. This facilitates writing 66 * "nearly-lockless" code, for example code that must be serialized by a spinlock but 67 * which we do not want to preempt while the spinlock is held. 68 * 69 * The PFZ is implemented by collecting all the "preemption-free" code into a single 70 * contiguous region of the commpage. Register %ebx is used as a flag register; 71 * before entering the PFZ, %ebx is cleared. If some event occurs that would normally 72 * result in a premption while in the PFZ, the kernel sets %ebx nonzero instead of 73 * preempting. Then, when the routine leaves the PFZ we check %ebx and 74 * if nonzero execute a special "pfz_exit" syscall to take the delayed preemption. 75 * 76 * PFZ code must bound the amount of time spent in the PFZ, in order to control 77 * latency. Backward branches are dangerous and must not be used in a way that 78 * could inadvertently create a long-running loop. 79 * 80 * Because we need to avoid being preempted between changing the mutex stateword 81 * and entering the kernel to relinquish, some low-level pthread mutex manipulations 82 * are located in the PFZ. 83 */ 84 85/* Work around 10062261 with a dummy non-local symbol */ 86pthreads_dummy_symbol: 87 88/* Internal routine to handle pthread mutex lock operation. This is in the PFZ. 89 * %edi == ptr to LVAL/UVAL pair 90 * %esi == ptr to argument list on stack 91 * %ebx == preempion pending flag (kernel sets nonzero if we should preempt) 92 */ 93COMMPAGE_FUNCTION_START(pfz_mutex_lock, 32, 4) 94 pushl %ebp // set up frame for backtrace 95 movl %esp,%ebp 961: 97 movl 16(%esi),%ecx // get mask (ie, PTHRW_EBIT etc) 982: 99 movl PTHRW_LVAL(%edi),%eax // get mutex LVAL 100 testl %eax,%ecx // is mutex available? 101 jnz 5f // no 102 103 /* lock is available (if we act fast) */ 104 lea PTHRW_INC(%eax),%edx // copy original lval and bump sequence count 105 orl $PTHRW_EBIT, %edx // set EBIT 106 lock 107 cmpxchgl %edx,PTHRW_LVAL(%edi) // try to acquire lock for real 108 jz 4f // got it 1093: 110 testl %ebx,%ebx // kernel trying to preempt us? 111 jz 2b // no, so loop and try again 112 COMMPAGE_CALL(_COMM_PAGE_PREEMPT,_COMM_PAGE_PFZ_MUTEX_LOCK,pfz_mutex_lock) 113 jmp 1b // loop to try again 114 115 /* we acquired the mutex */ 1164: 117 movl 20(%esi),%eax // get ptr to TID field of mutex 118 movl 8(%esi),%ecx // get 64-bit mtid 119 movl 12(%esi),%edx 120 movl %ecx,0(%eax) // store my TID in mutex structure 121 movl %edx,4(%eax) 122 movl $PTHRW_STATUS_ACQUIRED,%eax 123 popl %ebp 124 ret 125 126 /* cannot acquire mutex, so update seq count, set "W", and block in kernel */ 127 /* this is where we cannot tolerate preemption or being killed */ 1285: 129 lea PTHRW_INC(%eax),%edx // copy original lval and bump sequence count 130 orl $PTHRW_WBIT, %edx // set WBIT 131 lock 132 cmpxchgl %edx,PTHRW_LVAL(%edi) // try to update lock status atomically 133 jnz 3b // failed 134 movl 20(%esi),%eax // get ptr to TID field of mutex 135 pushl 4(%esi) // arg 5: flags from arg list 136 pushl 4(%eax) // arg 4: tid field from mutex 137 pushl 0(%eax) 138 pushl PTHRW_UVAL(%edi) // arg 3: uval field from mutex 139 pushl %edx // arg 2: new value of mutex lval field 140 pushl %edi // arg 1: ptr to LVAL/UVAL pair in mutex 141 call 6f // make ksyn_mlwait call 142 jc 6f // immediately reissue syscall if error 143 movl 24(%esi),%edx // get ptr to syscall_return arg 144 movl %eax,(%edx) // save syscall return value 145 movl $PTHRW_STATUS_SYSCALL,%eax // we had to make syscall 146 addl $28,%esp // pop off syscall args and return address 147 popl %ebp // pop off frame ptr 148 ret 149 150 /* subroutine to make a ksyn_mlwait syscall */ 1516: 152 movl (%esp),%edx // get return address but leave on stack 153 movl %esp,%ecx // save stack ptr here 154 movl $KSYN_MLWAIT,%eax // get syscall code 155 orl $0x00180000,%eax // copy 24 bytes of arguments in trampoline 156 xorl %ebx,%ebx // clear preemption flag 157 sysenter 158COMMPAGE_DESCRIPTOR(pfz_mutex_lock,_COMM_PAGE_PFZ_MUTEX_LOCK) 159 160 161 162/************************* x86_64 versions follow **************************/ 163 164 165 166/* Internal routine to handle pthread mutex lock operation. This is in the PFZ. 167 * %rdi = lvalp 168 * %esi = flags 169 * %rdx = mtid 170 * %ecx = mask 171 * %r8 = tidp 172 * %r9 = &syscall_return 173 * %ebx = preempion pending flag (kernel sets nonzero if we should preempt) 174 */ 175COMMPAGE_FUNCTION_START(pfz_mutex_lock_64, 64, 4) 176 pushq %rbp // set up frame for backtrace 177 movq %rsp,%rbp 1781: 179 movl PTHRW_LVAL(%rdi),%eax // get old lval from mutex 1802: 181 testl %eax,%ecx // can we acquire the lock? 182 jnz 5f // no 183 184 /* lock is available (if we act fast) */ 185 lea PTHRW_INC(%rax),%r11 // copy original lval and bump sequence count 186 orl $PTHRW_EBIT, %r11d // set EBIT 187 lock 188 cmpxchgl %r11d,PTHRW_LVAL(%rdi) // try to acquire lock 189 jz 4f // got it 1903: 191 testl %ebx,%ebx // kernel trying to preempt us? 192 jz 2b // no, so loop and try again 193 COMMPAGE_CALL(_COMM_PAGE_PREEMPT,_COMM_PAGE_PFZ_MUTEX_LOCK,pfz_mutex_lock_64) 194 jmp 1b // loop to try again 195 196 /* we acquired the mutex */ 1974: 198 movq %rdx,(%r8) // store mtid in mutex structure 199 movl $PTHRW_STATUS_ACQUIRED,%eax 200 popq %rbp 201 ret 202 203 /* cannot acquire mutex, so update seq count and block in kernel */ 204 /* this is where we cannot tolerate preemption or being killed */ 2055: 206 lea PTHRW_INC(%rax),%r11 // copy original lval and bump sequence count 207 orl $PTHRW_WBIT, %r11d // set WBIT 208 lock 209 cmpxchgl %r11d,PTHRW_LVAL(%rdi) // try to update lock status atomically 210 jnz 3b // failed 211 movq (%r8),%r10 // arg 4: tid field from mutex [NB: passed in R10] 212 movl %esi,%r8d // arg 5: flags from arg list 213 movl PTHRW_UVAL(%rdi),%edx // arg 3: uval field from mutex 214 movl %r11d,%esi // arg 2: new value of mutex lval field 215 // arg 1: LVAL/UVAL ptr already in %rdi 2166: 217 movl $(SYSCALL_CONSTRUCT_UNIX(KSYN_MLWAIT)),%eax 218 pushq %rdx // some syscalls destroy %rdx so save it 219 xorl %ebx,%ebx // clear preemption flag 220 syscall 221 popq %rdx // restore in case we need to re-execute syscall 222 jc 6b // immediately re-execute syscall if error 223 movl %eax,(%r9) // store kernel return value 224 movl $PTHRW_STATUS_SYSCALL,%eax // we made syscall 225 popq %rbp 226 ret 227COMMPAGE_DESCRIPTOR(pfz_mutex_lock_64,_COMM_PAGE_PFZ_MUTEX_LOCK) 228 229