1/*	$NetBSD: subr_copy.c,v 1.19 2023/05/22 14:07:24 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
5 *	The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 1982, 1986, 1991, 1993
36 *	The Regents of the University of California.  All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
42 *
43 * Copyright (c) 1992, 1993
44 *	The Regents of the University of California.  All rights reserved.
45 *
46 * This software was developed by the Computer Systems Engineering group
47 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
48 * contributed to Berkeley.
49 *
50 * All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 *	This product includes software developed by the University of
53 *	California, Lawrence Berkeley Laboratory.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 *    notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 *    notice, this list of conditions and the following disclaimer in the
62 *    documentation and/or other materials provided with the distribution.
63 * 3. Neither the name of the University nor the names of its contributors
64 *    may be used to endorse or promote products derived from this software
65 *    without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
80 */
81
82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.19 2023/05/22 14:07:24 riastradh Exp $");
84
85#define	__UFETCHSTORE_PRIVATE
86#define	__UCAS_PRIVATE
87
88#include <sys/param.h>
89#include <sys/fcntl.h>
90#include <sys/proc.h>
91#include <sys/systm.h>
92
93#include <uvm/uvm_extern.h>
94
95void
96uio_setup_sysspace(struct uio *uio)
97{
98
99	uio->uio_vmspace = vmspace_kernel();
100}
101
102int
103uiomove(void *buf, size_t n, struct uio *uio)
104{
105	struct vmspace *vm = uio->uio_vmspace;
106	struct iovec *iov;
107	size_t cnt;
108	int error = 0;
109	char *cp = buf;
110
111	ASSERT_SLEEPABLE();
112
113	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
114	while (n > 0 && uio->uio_resid) {
115		KASSERT(uio->uio_iovcnt > 0);
116		iov = uio->uio_iov;
117		cnt = iov->iov_len;
118		if (cnt == 0) {
119			KASSERT(uio->uio_iovcnt > 1);
120			uio->uio_iov++;
121			uio->uio_iovcnt--;
122			continue;
123		}
124		if (cnt > n)
125			cnt = n;
126		if (!VMSPACE_IS_KERNEL_P(vm)) {
127			preempt_point();
128		}
129
130		if (uio->uio_rw == UIO_READ) {
131			error = copyout_vmspace(vm, cp, iov->iov_base,
132			    cnt);
133		} else {
134			error = copyin_vmspace(vm, iov->iov_base, cp,
135			    cnt);
136		}
137		if (error) {
138			break;
139		}
140		iov->iov_base = (char *)iov->iov_base + cnt;
141		iov->iov_len -= cnt;
142		uio->uio_resid -= cnt;
143		uio->uio_offset += cnt;
144		cp += cnt;
145		KDASSERT(cnt <= n);
146		n -= cnt;
147	}
148
149	return (error);
150}
151
152/*
153 * Wrapper for uiomove() that validates the arguments against a known-good
154 * kernel buffer.
155 */
156int
157uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
158{
159	size_t offset;
160
161	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
162	    (offset = uio->uio_offset) != uio->uio_offset)
163		return (EINVAL);
164	if (offset >= buflen)
165		return (0);
166	return (uiomove((char *)buf + offset, buflen - offset, uio));
167}
168
169int
170uiopeek(void *buf, size_t n, struct uio *uio)
171{
172	struct vmspace *vm = uio->uio_vmspace;
173	struct iovec *iov;
174	size_t cnt;
175	int error = 0;
176	char *cp = buf;
177	size_t resid = uio->uio_resid;
178	int iovcnt = uio->uio_iovcnt;
179	char *base;
180	size_t len;
181
182	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
183
184	if (n == 0 || resid == 0)
185		return 0;
186	iov = uio->uio_iov;
187	base = iov->iov_base;
188	len = iov->iov_len;
189
190	while (n > 0 && resid > 0) {
191		KASSERT(iovcnt > 0);
192		cnt = len;
193		if (cnt == 0) {
194			KASSERT(iovcnt > 1);
195			iov++;
196			iovcnt--;
197			base = iov->iov_base;
198			len = iov->iov_len;
199			continue;
200		}
201		if (cnt > n)
202			cnt = n;
203		if (!VMSPACE_IS_KERNEL_P(vm)) {
204			preempt_point();
205		}
206
207		if (uio->uio_rw == UIO_READ) {
208			error = copyout_vmspace(vm, cp, base, cnt);
209		} else {
210			error = copyin_vmspace(vm, base, cp, cnt);
211		}
212		if (error) {
213			break;
214		}
215		base += cnt;
216		len -= cnt;
217		resid -= cnt;
218		cp += cnt;
219		KDASSERT(cnt <= n);
220		n -= cnt;
221	}
222
223	return error;
224}
225
226void
227uioskip(size_t n, struct uio *uio)
228{
229	struct iovec *iov;
230	size_t cnt;
231
232	KASSERTMSG(n <= uio->uio_resid, "n=%zu resid=%zu", n, uio->uio_resid);
233
234	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
235	while (n > 0 && uio->uio_resid) {
236		KASSERT(uio->uio_iovcnt > 0);
237		iov = uio->uio_iov;
238		cnt = iov->iov_len;
239		if (cnt == 0) {
240			KASSERT(uio->uio_iovcnt > 1);
241			uio->uio_iov++;
242			uio->uio_iovcnt--;
243			continue;
244		}
245		if (cnt > n)
246			cnt = n;
247		iov->iov_base = (char *)iov->iov_base + cnt;
248		iov->iov_len -= cnt;
249		uio->uio_resid -= cnt;
250		uio->uio_offset += cnt;
251		KDASSERT(cnt <= n);
252		n -= cnt;
253	}
254}
255
256/*
257 * Give next character to user as result of read.
258 */
259int
260ureadc(int c, struct uio *uio)
261{
262	struct iovec *iov;
263
264	if (uio->uio_resid <= 0)
265		panic("ureadc: non-positive resid");
266again:
267	if (uio->uio_iovcnt <= 0)
268		panic("ureadc: non-positive iovcnt");
269	iov = uio->uio_iov;
270	if (iov->iov_len <= 0) {
271		uio->uio_iovcnt--;
272		uio->uio_iov++;
273		goto again;
274	}
275	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
276		int error;
277		if ((error = ustore_char(iov->iov_base, c)) != 0)
278			return (error);
279	} else {
280		*(char *)iov->iov_base = c;
281	}
282	iov->iov_base = (char *)iov->iov_base + 1;
283	iov->iov_len--;
284	uio->uio_resid--;
285	uio->uio_offset++;
286	return (0);
287}
288
289/*
290 * Like copyin(), but operates on an arbitrary vmspace.
291 */
292int
293copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
294{
295	struct iovec iov;
296	struct uio uio;
297	int error;
298
299	if (len == 0)
300		return (0);
301
302	if (VMSPACE_IS_KERNEL_P(vm)) {
303		return kcopy(uaddr, kaddr, len);
304	}
305	if (__predict_true(vm == curproc->p_vmspace)) {
306		return copyin(uaddr, kaddr, len);
307	}
308
309	iov.iov_base = kaddr;
310	iov.iov_len = len;
311	uio.uio_iov = &iov;
312	uio.uio_iovcnt = 1;
313	uio.uio_offset = (off_t)(uintptr_t)uaddr;
314	uio.uio_resid = len;
315	uio.uio_rw = UIO_READ;
316	UIO_SETUP_SYSSPACE(&uio);
317	error = uvm_io(&vm->vm_map, &uio, 0);
318
319	return (error);
320}
321
322/*
323 * Like copyout(), but operates on an arbitrary vmspace.
324 */
325int
326copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
327{
328	struct iovec iov;
329	struct uio uio;
330	int error;
331
332	if (len == 0)
333		return (0);
334
335	if (VMSPACE_IS_KERNEL_P(vm)) {
336		return kcopy(kaddr, uaddr, len);
337	}
338	if (__predict_true(vm == curproc->p_vmspace)) {
339		return copyout(kaddr, uaddr, len);
340	}
341
342	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
343	iov.iov_len = len;
344	uio.uio_iov = &iov;
345	uio.uio_iovcnt = 1;
346	uio.uio_offset = (off_t)(uintptr_t)uaddr;
347	uio.uio_resid = len;
348	uio.uio_rw = UIO_WRITE;
349	UIO_SETUP_SYSSPACE(&uio);
350	error = uvm_io(&vm->vm_map, &uio, 0);
351
352	return (error);
353}
354
355/*
356 * Like copyin(), but operates on an arbitrary process.
357 */
358int
359copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
360{
361	struct vmspace *vm;
362	int error;
363
364	error = proc_vmspace_getref(p, &vm);
365	if (error) {
366		return error;
367	}
368	error = copyin_vmspace(vm, uaddr, kaddr, len);
369	uvmspace_free(vm);
370
371	return error;
372}
373
374/*
375 * Like copyout(), but operates on an arbitrary process.
376 */
377int
378copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
379{
380	struct vmspace *vm;
381	int error;
382
383	error = proc_vmspace_getref(p, &vm);
384	if (error) {
385		return error;
386	}
387	error = copyout_vmspace(vm, kaddr, uaddr, len);
388	uvmspace_free(vm);
389
390	return error;
391}
392
393/*
394 * Like copyin(), but operates on an arbitrary pid.
395 */
396int
397copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
398{
399	struct proc *p;
400	struct vmspace *vm;
401	int error;
402
403	mutex_enter(&proc_lock);
404	p = proc_find(pid);
405	if (p == NULL) {
406		mutex_exit(&proc_lock);
407		return ESRCH;
408	}
409	mutex_enter(p->p_lock);
410	error = proc_vmspace_getref(p, &vm);
411	mutex_exit(p->p_lock);
412	mutex_exit(&proc_lock);
413
414	if (error == 0) {
415		error = copyin_vmspace(vm, uaddr, kaddr, len);
416		uvmspace_free(vm);
417	}
418	return error;
419}
420
421/*
422 * Like copyin(), except it operates on kernel addresses when the FKIOCTL
423 * flag is passed in `ioctlflags' from the ioctl call.
424 */
425int
426ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
427{
428	if (ioctlflags & FKIOCTL)
429		return kcopy(src, dst, len);
430	return copyin(src, dst, len);
431}
432
433/*
434 * Like copyout(), except it operates on kernel addresses when the FKIOCTL
435 * flag is passed in `ioctlflags' from the ioctl call.
436 */
437int
438ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
439{
440	if (ioctlflags & FKIOCTL)
441		return kcopy(src, dst, len);
442	return copyout(src, dst, len);
443}
444
445/*
446 * User-space CAS / fetch / store
447 */
448
449#ifdef __NO_STRICT_ALIGNMENT
450#define	CHECK_ALIGNMENT(x)	__nothing
451#else /* ! __NO_STRICT_ALIGNMENT */
452static bool
453ufetchstore_aligned(uintptr_t uaddr, size_t size)
454{
455	return (uaddr & (size - 1)) == 0;
456}
457
458#define	CHECK_ALIGNMENT()						\
459do {									\
460	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
461		return EFAULT;						\
462} while (/*CONSTCOND*/0)
463#endif /* __NO_STRICT_ALIGNMENT */
464
465/*
466 * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
467 * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
468 *
469 * In all other cases, we provide generic implementations that work on
470 * all platforms.
471 */
472
473#if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
474#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
475#include <sys/atomic.h>
476#include <sys/cpu.h>
477#include <sys/once.h>
478#include <sys/mutex.h>
479#include <sys/ipi.h>
480
481static int ucas_critical_splcookie;
482static volatile u_int ucas_critical_pausing_cpus;
483static u_int ucas_critical_ipi;
484static ONCE_DECL(ucas_critical_init_once)
485
486static void
487ucas_critical_cpu_gate(void *arg __unused)
488{
489	int count = SPINLOCK_BACKOFF_MIN;
490
491	KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
492
493	/*
494	 * Notify ucas_critical_wait that we have stopped.  Using
495	 * store-release ensures all our memory operations up to the
496	 * IPI happen before the ucas -- no buffered stores on our end
497	 * can clobber it later on, for instance.
498	 *
499	 * Matches atomic_load_acquire in ucas_critical_wait -- turns
500	 * the following atomic_dec_uint into a store-release.
501	 */
502	membar_release();
503	atomic_dec_uint(&ucas_critical_pausing_cpus);
504
505	/*
506	 * Wait for ucas_critical_exit to reopen the gate and let us
507	 * proceed.  Using a load-acquire ensures the ucas happens
508	 * before any of our memory operations when we return from the
509	 * IPI and proceed -- we won't observe any stale cached value
510	 * that the ucas overwrote, for instance.
511	 *
512	 * Matches atomic_store_release in ucas_critical_exit.
513	 */
514	while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
515		SPINLOCK_BACKOFF(count);
516	}
517}
518
519static int
520ucas_critical_init(void)
521{
522
523	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
524	return 0;
525}
526
527static void
528ucas_critical_wait(void)
529{
530	int count = SPINLOCK_BACKOFF_MIN;
531
532	/*
533	 * Wait for all CPUs to stop at the gate.  Using a load-acquire
534	 * ensures all memory operations before they stop at the gate
535	 * happen before the ucas -- no buffered stores in other CPUs
536	 * can clobber it later on, for instance.
537	 *
538	 * Matches membar_release/atomic_dec_uint (store-release) in
539	 * ucas_critical_cpu_gate.
540	 */
541	while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
542		SPINLOCK_BACKOFF(count);
543	}
544}
545#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
546
547static inline void
548ucas_critical_enter(lwp_t * const l)
549{
550
551#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
552	if (ncpu > 1) {
553		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
554
555		/*
556		 * Acquire the mutex first, then go to splhigh() and
557		 * broadcast the IPI to lock all of the other CPUs
558		 * behind the gate.
559		 *
560		 * N.B. Going to splhigh() implicitly disables preemption,
561		 * so there's no need to do it explicitly.
562		 */
563		mutex_enter(&cpu_lock);
564		ucas_critical_splcookie = splhigh();
565		ucas_critical_pausing_cpus = ncpu - 1;
566		ipi_trigger_broadcast(ucas_critical_ipi, true);
567		ucas_critical_wait();
568		return;
569	}
570#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
571
572	KPREEMPT_DISABLE(l);
573}
574
575static inline void
576ucas_critical_exit(lwp_t * const l)
577{
578
579#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
580	if (ncpu > 1) {
581		/*
582		 * Open the gate and notify all CPUs in
583		 * ucas_critical_cpu_gate that they can now proceed.
584		 * Using a store-release ensures the ucas happens
585		 * before any memory operations they issue after the
586		 * IPI -- they won't observe any stale cache of the
587		 * target word, for instance.
588		 *
589		 * Matches atomic_load_acquire in ucas_critical_cpu_gate.
590		 */
591		atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
592		splx(ucas_critical_splcookie);
593		mutex_exit(&cpu_lock);
594		return;
595	}
596#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
597
598	KPREEMPT_ENABLE(l);
599}
600
601int
602_ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
603{
604	lwp_t * const l = curlwp;
605	uint32_t *uva = ((void *)(uintptr_t)uaddr);
606	int error;
607
608	/*
609	 * Wire the user address down to avoid taking a page fault during
610	 * the critical section.
611	 */
612	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
613			   VM_PROT_READ | VM_PROT_WRITE);
614	if (error)
615		return error;
616
617	ucas_critical_enter(l);
618	error = _ufetch_32(uva, ret);
619	if (error == 0 && *ret == old) {
620		error = _ustore_32(uva, new);
621	}
622	ucas_critical_exit(l);
623
624	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
625
626	return error;
627}
628
629#ifdef _LP64
630int
631_ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
632{
633	lwp_t * const l = curlwp;
634	uint64_t *uva = ((void *)(uintptr_t)uaddr);
635	int error;
636
637	/*
638	 * Wire the user address down to avoid taking a page fault during
639	 * the critical section.
640	 */
641	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
642			   VM_PROT_READ | VM_PROT_WRITE);
643	if (error)
644		return error;
645
646	ucas_critical_enter(l);
647	error = _ufetch_64(uva, ret);
648	if (error == 0 && *ret == old) {
649		error = _ustore_64(uva, new);
650	}
651	ucas_critical_exit(l);
652
653	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
654
655	return error;
656}
657#endif /* _LP64 */
658#endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
659
660int
661ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
662{
663
664	ASSERT_SLEEPABLE();
665	CHECK_ALIGNMENT();
666#if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
667    !defined(_RUMPKERNEL)
668	if (ncpu > 1) {
669		return _ucas_32_mp(uaddr, old, new, ret);
670	}
671#endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
672	return _ucas_32(uaddr, old, new, ret);
673}
674
675#ifdef _LP64
676int
677ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
678{
679
680	ASSERT_SLEEPABLE();
681	CHECK_ALIGNMENT();
682#if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
683    !defined(_RUMPKERNEL)
684	if (ncpu > 1) {
685		return _ucas_64_mp(uaddr, old, new, ret);
686	}
687#endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
688	return _ucas_64(uaddr, old, new, ret);
689}
690#endif /* _LP64 */
691
692__strong_alias(ucas_int,ucas_32);
693#ifdef _LP64
694__strong_alias(ucas_ptr,ucas_64);
695#else
696__strong_alias(ucas_ptr,ucas_32);
697#endif /* _LP64 */
698
699int
700ufetch_8(const uint8_t *uaddr, uint8_t *valp)
701{
702
703	ASSERT_SLEEPABLE();
704	CHECK_ALIGNMENT();
705	return _ufetch_8(uaddr, valp);
706}
707
708int
709ufetch_16(const uint16_t *uaddr, uint16_t *valp)
710{
711
712	ASSERT_SLEEPABLE();
713	CHECK_ALIGNMENT();
714	return _ufetch_16(uaddr, valp);
715}
716
717int
718ufetch_32(const uint32_t *uaddr, uint32_t *valp)
719{
720
721	ASSERT_SLEEPABLE();
722	CHECK_ALIGNMENT();
723	return _ufetch_32(uaddr, valp);
724}
725
726#ifdef _LP64
727int
728ufetch_64(const uint64_t *uaddr, uint64_t *valp)
729{
730
731	ASSERT_SLEEPABLE();
732	CHECK_ALIGNMENT();
733	return _ufetch_64(uaddr, valp);
734}
735#endif /* _LP64 */
736
737__strong_alias(ufetch_char,ufetch_8);
738__strong_alias(ufetch_short,ufetch_16);
739__strong_alias(ufetch_int,ufetch_32);
740#ifdef _LP64
741__strong_alias(ufetch_long,ufetch_64);
742__strong_alias(ufetch_ptr,ufetch_64);
743#else
744__strong_alias(ufetch_long,ufetch_32);
745__strong_alias(ufetch_ptr,ufetch_32);
746#endif /* _LP64 */
747
748int
749ustore_8(uint8_t *uaddr, uint8_t val)
750{
751
752	ASSERT_SLEEPABLE();
753	CHECK_ALIGNMENT();
754	return _ustore_8(uaddr, val);
755}
756
757int
758ustore_16(uint16_t *uaddr, uint16_t val)
759{
760
761	ASSERT_SLEEPABLE();
762	CHECK_ALIGNMENT();
763	return _ustore_16(uaddr, val);
764}
765
766int
767ustore_32(uint32_t *uaddr, uint32_t val)
768{
769
770	ASSERT_SLEEPABLE();
771	CHECK_ALIGNMENT();
772	return _ustore_32(uaddr, val);
773}
774
775#ifdef _LP64
776int
777ustore_64(uint64_t *uaddr, uint64_t val)
778{
779
780	ASSERT_SLEEPABLE();
781	CHECK_ALIGNMENT();
782	return _ustore_64(uaddr, val);
783}
784#endif /* _LP64 */
785
786__strong_alias(ustore_char,ustore_8);
787__strong_alias(ustore_short,ustore_16);
788__strong_alias(ustore_int,ustore_32);
789#ifdef _LP64
790__strong_alias(ustore_long,ustore_64);
791__strong_alias(ustore_ptr,ustore_64);
792#else
793__strong_alias(ustore_long,ustore_32);
794__strong_alias(ustore_ptr,ustore_32);
795#endif /* _LP64 */
796