Lines Matching refs:to

14  * Architectures that support memory tagging (assigning tags to memory regions,
15 * embedding these tags into addresses that point to these memory regions, and
17 * redefine this macro to strip tags from pointers.
19 * Passing down mm_struct allows to define untagging rules on per-process
36 * Architectures should provide two primitives (raw_copy_{to,from}_user())
37 * and get rid of their private instances of copy_{to,from}_user() and
38 * __copy_{to,from}_user{,_inatomic}().
40 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
41 * return the amount left to copy. They should assume that access_ok() has
45 * Both of these functions should attempt to copy size bytes starting at from
46 * into the area starting at to. They must not fetch or store anything
50 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
51 * at to must become equal to the bytes fetched from the corresponding area
52 * starting at from. All data past to + size - N must be left unmodified.
55 * fetched, it is permitted to copy less than had been fetched; the only
58 * have to squeeze as much as possible - it is allowed, but not necessary.
60 * For raw_copy_from_user() to always points to kernel memory and no faults
64 * Both can be inlined - it's up to architectures whether it wants to bother
65 * with that. They should not be used directly; they are used to implement
66 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
68 * copy_{to,from}_user() might or might not be inlined. If you want them
75 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
81 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
85 instrument_copy_from_user_before(to, from, n);
86 check_object_size(to, n, false);
87 res = raw_copy_from_user(to, from, n);
88 instrument_copy_from_user_after(to, from, n, res);
93 __copy_from_user(void *to, const void __user *from, unsigned long n)
98 instrument_copy_from_user_before(to, from, n);
101 check_object_size(to, n, false);
102 res = raw_copy_from_user(to, from, n);
103 instrument_copy_from_user_after(to, from, n, res);
109 * @to: Destination address, in user space.
111 * @n: Number of bytes to copy.
115 * Copy data from kernel space to user space. Caller must check
121 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
125 instrument_copy_to_user(to, from, n);
127 return raw_copy_to_user(to, from, n);
131 __copy_to_user(void __user *to, const void *from, unsigned long n)
136 instrument_copy_to_user(to, from, n);
138 return raw_copy_to_user(to, from, n);
143 _copy_from_user(void *to, const void __user *from, unsigned long n)
148 instrument_copy_from_user_before(to, from, n);
149 res = raw_copy_from_user(to, from, n);
150 instrument_copy_from_user_after(to, from, n, res);
153 memset(to + (n - res), 0, res);
163 _copy_to_user(void __user *to, const void *from, unsigned long n)
168 if (access_ok(to, n)) {
169 instrument_copy_to_user(to, from, n);
170 n = raw_copy_to_user(to, from, n);
180 copy_from_user(void *to, const void __user *from, unsigned long n)
182 if (check_copy_size(to, n, false))
183 n = _copy_from_user(to, from, n);
188 copy_to_user(void __user *to, const void *from, unsigned long n)
191 n = _copy_to_user(to, from, n);
220 * not take any locks and go straight to the fixup table.
229 * make sure to have issued the store before a pagefault
238 * make sure to issue those last loads/stores before enabling
258 * stick to pagefault_disabled().
259 * Please NEVER use preempt_disable() to disable the fault handler. With
290 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
293 return __copy_from_user_inatomic(to, from, n);
308 * Copies a struct from userspace to kernel space, in a way that guarantees
310 * struct extensions are made such that all new fields are *appended* to the
334 * There are three cases to consider:
336 * * If @usize < @ksize, then the userspace has passed an old struct to a
338 * are to be zero-filled.
339 * * If @usize > @ksize, then the userspace has passed a new struct to an
340 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
341 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
345 * * -EFAULT: access to userspace failed.
408 * get_kernel_nofault(): safely attempt to read from a location
410 * @ptr: address to read from