Searched refs:shift (Results 1 - 21 of 21) sorted by relevance

/xnu-2782.1.97/osfmk/i386/vmx/
H A Dvmx_cpu.h73 #define VMX_CAP(msr, shift, mask) (rdmsr64(msr) & ((mask) << (shift)))
H A Dvmx_cpu.c380 #define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE;
/xnu-2782.1.97/bsd/sys/
H A Dquota.h217 #define dqhash1(id, shift, mask) \
218 ((((id) * 2654435761U) >> (shift)) & (mask))
229 * Compute the hash shift value.
237 int shift; local
239 for (shift = 32; size > 1; size >>= 1, --shift)
241 return (shift);
262 int qf_shift; /* primary hash shift */
/xnu-2782.1.97/libkern/gen/
H A DOSAtomicOperations.c112 int shift = (UInt32) *(((UInt8 *) &shiftValues) + alignment); local
117 mask <<= shift; local
120 oldValue = (oldValue & ~mask) | (oldValue8 << shift);
121 newValue = (oldValue & ~mask) | (newValue8 << shift);
220 UInt32 shift = (UInt32) *(((UInt8 *) &shiftValues) + alignment); local
225 mask <<= shift; local
228 oldValue = (oldValue & ~mask) | (oldValue16 << shift);
229 newValue = (oldValue & ~mask) | (newValue16 << shift);
/xnu-2782.1.97/osfmk/i386/
H A Drtclock.c119 * with this ratio expressed as a 32-bit scale and shift
123 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
132 commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
145 _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
208 _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
326 uint32_t shift = 0; local
331 shift++;
337 rntp->shift = shift;
358 if (rntp->shift !
[all...]
H A Dpal_native.h75 uint32_t shift; /* shift is nonzero only on "slow" machines, */ member in struct:pal_rtc_nanotime
H A Dpal_routines.h173 uint32_t shift,
H A DpmCPU.h91 uint32_t shift; /* tsc -> nanosec shift/div */ member in struct:pm_rtc_nanotime
H A DpmCPU.c859 rtc_nanotime->shift = pal_rtc_nanotime_info.shift;
H A Dgenassym.c486 offsetof(pal_rtc_nanotime_t, shift));
/xnu-2782.1.97/osfmk/i386/commpage/
H A Dcommpage.h143 extern void commpage_set_nanotime(uint64_t tsc_base, uint64_t ns_base, uint32_t scale, uint32_t shift);
H A Dcommpage.c525 uint32_t shift )
539 if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) )
558 p32->nt_shift = shift;
559 p64->nt_shift = shift;
/xnu-2782.1.97/iokit/Kernel/
H A DIOLib.cpp1046 register int shift; local
1049 for (shift = 1; shift < intsize; shift++) {
1051 return (IOAlignment)(intsize - shift);
/xnu-2782.1.97/bsd/dev/i386/
H A Dsysctl.c753 SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, shift,
755 (uint32_t *)&pal_rtc_nanotime_info.shift, 0, "");
H A Dfasttrap_isa.c220 int shift = function_entry ? 1 : 0; local
246 stack = regs64->isf.rsp + sizeof(uint64_t) * (argno - 6 + shift);
253 value = dtrace_fuword32((user_addr_t)(unsigned long)&stack[argno + shift]);
/xnu-2782.1.97/osfmk/default_pager/
H A Ddp_backing_store.c107 * 0 means no shift to pages, so == 1 page/cluster. 1 would mean
245 static inline void ps_vnode_trim_more(struct ps_vnode_trim_data *data, struct vs_map *map, unsigned int shift, dp_size_t length);
495 * Set up default page shift, but only if not already
531 * Keep cluster size in bit shift because it's quicker
1162 ("device=0x%x,offset=0x%x,count=0x%x,record_size=0x%x,shift=%d,total_size=0x%x\n",
1411 unsigned int shift,
1434 ASSERT(ps->ps_clshift >= shift);
1526 (ps->ps_clshift >= shift)) {
1601 * Must pass cluster shift to find the most appropriate segment.
4356 ("device=0x%x,offset=0x%x,count=0x%x,record_size=0x%x,shift
1410 ps_select_segment( unsigned int shift, int *psindex) argument
4451 ps_vnode_trim_more(struct ps_vnode_trim_data *data, struct vs_map *map, unsigned int shift, dp_size_t length) argument
[all...]
/xnu-2782.1.97/bsd/vfs/
H A Dvfs_subr.c3549 int shift; local
3552 * Work out how far we have to shift the block count down to make it fit.
3553 * Note that it's possible to have to shift so far that the resulting
3560 for (shift = 0; shift < 32; shift++) {
3561 if ((sp->f_blocks >> shift) <= INT_MAX)
3563 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX)
3567 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
3568 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
[all...]
H A Dvfs_syscalls.c10295 int shift; local
10298 * Work out how far we have to shift the block count down to make it fit.
10299 * Note that it's possible to have to shift so far that the resulting
10306 for (shift = 0; shift < 32; shift++) {
10307 if ((sfsp->f_blocks >> shift) <= INT_MAX)
10309 if ((sfsp->f_bsize << (shift + 1)) > INT_MAX)
10313 sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sfsp->f_blocks, shift);
10314 sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sfsp->f_bfree, shift);
[all...]
/xnu-2782.1.97/osfmk/kern/
H A Dsched_prim.c663 uint32_t shift; local
693 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
695 sched_fixed_shift = shift;
/xnu-2782.1.97/bsd/net/pktsched/
H A Dpktsched_qfq.c1620 qfq_round_down(u_int64_t ts, u_int32_t shift) argument
1622 return (ts & ~((1ULL << shift) - 1));
/xnu-2782.1.97/bsd/dev/dtrace/
H A Ddtrace.c1268 * Shift the 128-bit value in a by b. If b is positive, shift left.
1269 * If b is negative, shift right.
5522 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; local
5560 while (shift >= 0) {
5561 mask = (uintptr_t)0xf << shift;
5563 if (val >= ((uintptr_t)1 << shift))
5564 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5565 shift -= 4;

Completed in 136 milliseconds