1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  include/linux/userfaultfd_k.h
4 *
5 *  Copyright (C) 2015  Red Hat, Inc.
6 *
7 */
8
9#ifndef _LINUX_USERFAULTFD_K_H
10#define _LINUX_USERFAULTFD_K_H
11
12#ifdef CONFIG_USERFAULTFD
13
14#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15
16#include <linux/fcntl.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <asm-generic/pgtable_uffd.h>
21#include <linux/hugetlb_inline.h>
22
23/* The set of all possible UFFD-related VM flags. */
24#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
25
26/*
27 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
28 * new flags, since they might collide with O_* ones. We want
29 * to re-use O_* flags that couldn't possibly have a meaning
30 * from userfaultfd, in order to leave a free define-space for
31 * shared O_* flags.
32 */
33#define UFFD_CLOEXEC O_CLOEXEC
34#define UFFD_NONBLOCK O_NONBLOCK
35
36#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
37#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
38
39/*
40 * Start with fault_pending_wqh and fault_wqh so they're more likely
41 * to be in the same cacheline.
42 *
43 * Locking order:
44 *	fd_wqh.lock
45 *		fault_pending_wqh.lock
46 *			fault_wqh.lock
47 *		event_wqh.lock
48 *
49 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
50 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
51 * also taken in IRQ context.
52 */
53struct userfaultfd_ctx {
54	/* waitqueue head for the pending (i.e. not read) userfaults */
55	wait_queue_head_t fault_pending_wqh;
56	/* waitqueue head for the userfaults */
57	wait_queue_head_t fault_wqh;
58	/* waitqueue head for the pseudo fd to wakeup poll/read */
59	wait_queue_head_t fd_wqh;
60	/* waitqueue head for events */
61	wait_queue_head_t event_wqh;
62	/* a refile sequence protected by fault_pending_wqh lock */
63	seqcount_spinlock_t refile_seq;
64	/* pseudo fd refcounting */
65	refcount_t refcount;
66	/* userfaultfd syscall flags */
67	unsigned int flags;
68	/* features requested from the userspace */
69	unsigned int features;
70	/* released */
71	bool released;
72	/*
73	 * Prevents userfaultfd operations (fill/move/wp) from happening while
74	 * some non-cooperative event(s) is taking place. Increments are done
75	 * in write-mode. Whereas, userfaultfd operations, which includes
76	 * reading mmap_changing, is done under read-mode.
77	 */
78	struct rw_semaphore map_changing_lock;
79	/* memory mappings are changing because of non-cooperative event */
80	atomic_t mmap_changing;
81	/* mm with one ore more vmas attached to this userfaultfd_ctx */
82	struct mm_struct *mm;
83};
84
85extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
86
87/* A combined operation mode + behavior flags. */
88typedef unsigned int __bitwise uffd_flags_t;
89
90/* Mutually exclusive modes of operation. */
91enum mfill_atomic_mode {
92	MFILL_ATOMIC_COPY,
93	MFILL_ATOMIC_ZEROPAGE,
94	MFILL_ATOMIC_CONTINUE,
95	MFILL_ATOMIC_POISON,
96	NR_MFILL_ATOMIC_MODES,
97};
98
99#define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
100#define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
101#define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
102#define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
103
104static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
105{
106	return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
107}
108
109static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
110{
111	flags &= ~MFILL_ATOMIC_MODE_MASK;
112	return flags | ((__force uffd_flags_t) mode);
113}
114
115/* Flags controlling behavior. These behavior changes are mode-independent. */
116#define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
117
118extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
119				    struct vm_area_struct *dst_vma,
120				    unsigned long dst_addr, struct page *page,
121				    bool newly_allocated, uffd_flags_t flags);
122
123extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
124				 unsigned long src_start, unsigned long len,
125				 uffd_flags_t flags);
126extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
127				     unsigned long dst_start,
128				     unsigned long len);
129extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start,
130				     unsigned long len, uffd_flags_t flags);
131extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
132				   unsigned long len, uffd_flags_t flags);
133extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
134			       unsigned long len, bool enable_wp);
135extern long uffd_wp_range(struct vm_area_struct *vma,
136			  unsigned long start, unsigned long len, bool enable_wp);
137
138/* move_pages */
139void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
140void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
141ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
142		   unsigned long src_start, unsigned long len, __u64 flags);
143int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
144			struct vm_area_struct *dst_vma,
145			struct vm_area_struct *src_vma,
146			unsigned long dst_addr, unsigned long src_addr);
147
148/* mm helpers */
149static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
150					struct vm_userfaultfd_ctx vm_ctx)
151{
152	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
153}
154
155/*
156 * Never enable huge pmd sharing on some uffd registered vmas:
157 *
158 * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
159 *
160 * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
161 *   VMAs which share huge pmds. (If you have two mappings to the same
162 *   underlying pages, and fault in the non-UFFD-registered one with a write,
163 *   with huge pmd sharing this would *also* setup the second UFFD-registered
164 *   mapping, and we'd not get minor faults.)
165 */
166static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
167{
168	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
169}
170
171/*
172 * Don't do fault around for either WP or MINOR registered uffd range.  For
173 * MINOR registered range, fault around will be a total disaster and ptes can
174 * be installed without notifications; for WP it should mostly be fine as long
175 * as the fault around checks for pte_none() before the installation, however
176 * to be super safe we just forbid it.
177 */
178static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
179{
180	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
181}
182
183static inline bool userfaultfd_missing(struct vm_area_struct *vma)
184{
185	return vma->vm_flags & VM_UFFD_MISSING;
186}
187
188static inline bool userfaultfd_wp(struct vm_area_struct *vma)
189{
190	return vma->vm_flags & VM_UFFD_WP;
191}
192
193static inline bool userfaultfd_minor(struct vm_area_struct *vma)
194{
195	return vma->vm_flags & VM_UFFD_MINOR;
196}
197
198static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
199				      pte_t pte)
200{
201	return userfaultfd_wp(vma) && pte_uffd_wp(pte);
202}
203
204static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
205					   pmd_t pmd)
206{
207	return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
208}
209
210static inline bool userfaultfd_armed(struct vm_area_struct *vma)
211{
212	return vma->vm_flags & __VM_UFFD_FLAGS;
213}
214
215static inline bool vma_can_userfault(struct vm_area_struct *vma,
216				     unsigned long vm_flags,
217				     bool wp_async)
218{
219	vm_flags &= __VM_UFFD_FLAGS;
220
221	if ((vm_flags & VM_UFFD_MINOR) &&
222	    (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
223		return false;
224
225	/*
226	 * If wp async enabled, and WP is the only mode enabled, allow any
227	 * memory type.
228	 */
229	if (wp_async && (vm_flags == VM_UFFD_WP))
230		return true;
231
232#ifndef CONFIG_PTE_MARKER_UFFD_WP
233	/*
234	 * If user requested uffd-wp but not enabled pte markers for
235	 * uffd-wp, then shmem & hugetlbfs are not supported but only
236	 * anonymous.
237	 */
238	if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
239		return false;
240#endif
241
242	/* By default, allow any of anon|shmem|hugetlb */
243	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
244	    vma_is_shmem(vma);
245}
246
247extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
248extern void dup_userfaultfd_complete(struct list_head *);
249
250extern void mremap_userfaultfd_prep(struct vm_area_struct *,
251				    struct vm_userfaultfd_ctx *);
252extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
253					unsigned long from, unsigned long to,
254					unsigned long len);
255
256extern bool userfaultfd_remove(struct vm_area_struct *vma,
257			       unsigned long start,
258			       unsigned long end);
259
260extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
261		unsigned long start, unsigned long end, struct list_head *uf);
262extern void userfaultfd_unmap_complete(struct mm_struct *mm,
263				       struct list_head *uf);
264extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
265extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
266
267#else /* CONFIG_USERFAULTFD */
268
269/* mm helpers */
270static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
271				unsigned long reason)
272{
273	return VM_FAULT_SIGBUS;
274}
275
276static inline long uffd_wp_range(struct vm_area_struct *vma,
277				 unsigned long start, unsigned long len,
278				 bool enable_wp)
279{
280	return false;
281}
282
283static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
284					struct vm_userfaultfd_ctx vm_ctx)
285{
286	return true;
287}
288
289static inline bool userfaultfd_missing(struct vm_area_struct *vma)
290{
291	return false;
292}
293
294static inline bool userfaultfd_wp(struct vm_area_struct *vma)
295{
296	return false;
297}
298
299static inline bool userfaultfd_minor(struct vm_area_struct *vma)
300{
301	return false;
302}
303
304static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
305				      pte_t pte)
306{
307	return false;
308}
309
310static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
311					   pmd_t pmd)
312{
313	return false;
314}
315
316
317static inline bool userfaultfd_armed(struct vm_area_struct *vma)
318{
319	return false;
320}
321
322static inline int dup_userfaultfd(struct vm_area_struct *vma,
323				  struct list_head *l)
324{
325	return 0;
326}
327
328static inline void dup_userfaultfd_complete(struct list_head *l)
329{
330}
331
332static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
333					   struct vm_userfaultfd_ctx *ctx)
334{
335}
336
337static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
338					       unsigned long from,
339					       unsigned long to,
340					       unsigned long len)
341{
342}
343
344static inline bool userfaultfd_remove(struct vm_area_struct *vma,
345				      unsigned long start,
346				      unsigned long end)
347{
348	return true;
349}
350
351static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
352					 unsigned long start, unsigned long end,
353					 struct list_head *uf)
354{
355	return 0;
356}
357
358static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
359					      struct list_head *uf)
360{
361}
362
363static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
364{
365	return false;
366}
367
368static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
369{
370	return false;
371}
372
373static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
374{
375	return false;
376}
377
378#endif /* CONFIG_USERFAULTFD */
379
380static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
381{
382	/* Only wr-protect mode uses pte markers */
383	if (!userfaultfd_wp(vma))
384		return false;
385
386	/* File-based uffd-wp always need markers */
387	if (!vma_is_anonymous(vma))
388		return true;
389
390	/*
391	 * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
392	 * enabled (to apply markers on zero pages).
393	 */
394	return userfaultfd_wp_unpopulated(vma);
395}
396
397static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
398{
399#ifdef CONFIG_PTE_MARKER_UFFD_WP
400	return is_pte_marker_entry(entry) &&
401	    (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
402#else
403	return false;
404#endif
405}
406
407static inline bool pte_marker_uffd_wp(pte_t pte)
408{
409#ifdef CONFIG_PTE_MARKER_UFFD_WP
410	swp_entry_t entry;
411
412	if (!is_swap_pte(pte))
413		return false;
414
415	entry = pte_to_swp_entry(pte);
416
417	return pte_marker_entry_uffd_wp(entry);
418#else
419	return false;
420#endif
421}
422
423/*
424 * Returns true if this is a swap pte and was uffd-wp wr-protected in either
425 * forms (pte marker or a normal swap pte), false otherwise.
426 */
427static inline bool pte_swp_uffd_wp_any(pte_t pte)
428{
429#ifdef CONFIG_PTE_MARKER_UFFD_WP
430	if (!is_swap_pte(pte))
431		return false;
432
433	if (pte_swp_uffd_wp(pte))
434		return true;
435
436	if (pte_marker_uffd_wp(pte))
437		return true;
438#endif
439	return false;
440}
441
442#endif /* _LINUX_USERFAULTFD_K_H */
443