1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MM_TYPES_H
3#define _LINUX_MM_TYPES_H
4
5#include <linux/mm_types_task.h>
6
7#include <linux/auxvec.h>
8#include <linux/kref.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11#include <linux/rbtree.h>
12#include <linux/maple_tree.h>
13#include <linux/rwsem.h>
14#include <linux/completion.h>
15#include <linux/cpumask.h>
16#include <linux/uprobes.h>
17#include <linux/rcupdate.h>
18#include <linux/page-flags-layout.h>
19#include <linux/workqueue.h>
20#include <linux/seqlock.h>
21#include <linux/percpu_counter.h>
22
23#include <asm/mmu.h>
24
25#ifndef AT_VECTOR_SIZE_ARCH
26#define AT_VECTOR_SIZE_ARCH 0
27#endif
28#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
29
30#define INIT_PASID	0
31
32struct address_space;
33struct mem_cgroup;
34
35/*
36 * Each physical page in the system has a struct page associated with
37 * it to keep track of whatever it is we are using the page for at the
38 * moment. Note that we have no way to track which tasks are using
39 * a page, though if it is a pagecache page, rmap structures can tell us
40 * who is mapping it.
41 *
42 * If you allocate the page using alloc_pages(), you can use some of the
43 * space in struct page for your own purposes.  The five words in the main
44 * union are available, except for bit 0 of the first word which must be
45 * kept clear.  Many users use this word to store a pointer to an object
46 * which is guaranteed to be aligned.  If you use the same storage as
47 * page->mapping, you must restore it to NULL before freeing the page.
48 *
49 * If your page will not be mapped to userspace, you can also use the four
50 * bytes in the mapcount union, but you must call page_mapcount_reset()
51 * before freeing it.
52 *
53 * If you want to use the refcount field, it must be used in such a way
54 * that other CPUs temporarily incrementing and then decrementing the
55 * refcount does not cause problems.  On receiving the page from
56 * alloc_pages(), the refcount will be positive.
57 *
58 * If you allocate pages of order > 0, you can use some of the fields
59 * in each subpage, but you may need to restore some of their values
60 * afterwards.
61 *
62 * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
63 * That requires that freelist & counters in struct slab be adjacent and
64 * double-word aligned. Because struct slab currently just reinterprets the
65 * bits of struct page, we align all struct pages to double-word boundaries,
66 * and ensure that 'freelist' is aligned within struct slab.
67 */
68#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
69#define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
70#else
71#define _struct_page_alignment	__aligned(sizeof(unsigned long))
72#endif
73
74struct page {
75	unsigned long flags;		/* Atomic flags, some possibly
76					 * updated asynchronously */
77	/*
78	 * Five words (20/40 bytes) are available in this union.
79	 * WARNING: bit 0 of the first word is used for PageTail(). That
80	 * means the other users of this union MUST NOT use the bit to
81	 * avoid collision and false-positive PageTail().
82	 */
83	union {
84		struct {	/* Page cache and anonymous pages */
85			/**
86			 * @lru: Pageout list, eg. active_list protected by
87			 * lruvec->lru_lock.  Sometimes used as a generic list
88			 * by the page owner.
89			 */
90			union {
91				struct list_head lru;
92
93				/* Or, for the Unevictable "LRU list" slot */
94				struct {
95					/* Always even, to negate PageTail */
96					void *__filler;
97					/* Count page's or folio's mlocks */
98					unsigned int mlock_count;
99				};
100
101				/* Or, free page */
102				struct list_head buddy_list;
103				struct list_head pcp_list;
104			};
105			/* See page-flags.h for PAGE_MAPPING_FLAGS */
106			struct address_space *mapping;
107			union {
108				pgoff_t index;		/* Our offset within mapping. */
109				unsigned long share;	/* share count for fsdax */
110			};
111			/**
112			 * @private: Mapping-private opaque data.
113			 * Usually used for buffer_heads if PagePrivate.
114			 * Used for swp_entry_t if PageSwapCache.
115			 * Indicates order in the buddy system if PageBuddy.
116			 */
117			unsigned long private;
118		};
119		struct {	/* page_pool used by netstack */
120			/**
121			 * @pp_magic: magic value to avoid recycling non
122			 * page_pool allocated pages.
123			 */
124			unsigned long pp_magic;
125			struct page_pool *pp;
126			unsigned long _pp_mapping_pad;
127			unsigned long dma_addr;
128			atomic_long_t pp_ref_count;
129		};
130		struct {	/* Tail pages of compound page */
131			unsigned long compound_head;	/* Bit zero is set */
132		};
133		struct {	/* ZONE_DEVICE pages */
134			/** @pgmap: Points to the hosting device page map. */
135			struct dev_pagemap *pgmap;
136			void *zone_device_data;
137			/*
138			 * ZONE_DEVICE private pages are counted as being
139			 * mapped so the next 3 words hold the mapping, index,
140			 * and private fields from the source anonymous or
141			 * page cache page while the page is migrated to device
142			 * private memory.
143			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
144			 * use the mapping, index, and private fields when
145			 * pmem backed DAX files are mapped.
146			 */
147		};
148
149		/** @rcu_head: You can use this to free a page by RCU. */
150		struct rcu_head rcu_head;
151	};
152
153	union {		/* This union is 4 bytes in size. */
154		/*
155		 * If the page can be mapped to userspace, encodes the number
156		 * of times this page is referenced by a page table.
157		 */
158		atomic_t _mapcount;
159
160		/*
161		 * If the page is neither PageSlab nor mappable to userspace,
162		 * the value stored here may help determine what this page
163		 * is used for.  See page-flags.h for a list of page types
164		 * which are currently stored here.
165		 */
166		unsigned int page_type;
167	};
168
169	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
170	atomic_t _refcount;
171
172#ifdef CONFIG_MEMCG
173	unsigned long memcg_data;
174#endif
175
176	/*
177	 * On machines where all RAM is mapped into kernel address space,
178	 * we can simply calculate the virtual address. On machines with
179	 * highmem some memory is mapped into kernel virtual memory
180	 * dynamically, so we need a place to store that address.
181	 * Note that this field could be 16 bits on x86 ... ;)
182	 *
183	 * Architectures with slow multiplication can define
184	 * WANT_PAGE_VIRTUAL in asm/page.h
185	 */
186#if defined(WANT_PAGE_VIRTUAL)
187	void *virtual;			/* Kernel virtual address (NULL if
188					   not kmapped, ie. highmem) */
189#endif /* WANT_PAGE_VIRTUAL */
190
191#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
192	int _last_cpupid;
193#endif
194
195#ifdef CONFIG_KMSAN
196	/*
197	 * KMSAN metadata for this page:
198	 *  - shadow page: every bit indicates whether the corresponding
199	 *    bit of the original page is initialized (0) or not (1);
200	 *  - origin page: every 4 bytes contain an id of the stack trace
201	 *    where the uninitialized value was created.
202	 */
203	struct page *kmsan_shadow;
204	struct page *kmsan_origin;
205#endif
206} _struct_page_alignment;
207
208/*
209 * struct encoded_page - a nonexistent type marking this pointer
210 *
211 * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
212 * with the low bits of the pointer indicating extra context-dependent
213 * information. Only used in mmu_gather handling, and this acts as a type
214 * system check on that use.
215 *
216 * We only really have two guaranteed bits in general, although you could
217 * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
218 * for more.
219 *
220 * Use the supplied helper functions to endcode/decode the pointer and bits.
221 */
222struct encoded_page;
223
224#define ENCODED_PAGE_BITS			3ul
225
226/* Perform rmap removal after we have flushed the TLB. */
227#define ENCODED_PAGE_BIT_DELAY_RMAP		1ul
228
229/*
230 * The next item in an encoded_page array is the "nr_pages" argument, specifying
231 * the number of consecutive pages starting from this page, that all belong to
232 * the same folio. For example, "nr_pages" corresponds to the number of folio
233 * references that must be dropped. If this bit is not set, "nr_pages" is
234 * implicitly 1.
235 */
236#define ENCODED_PAGE_BIT_NR_PAGES_NEXT		2ul
237
238static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
239{
240	BUILD_BUG_ON(flags > ENCODED_PAGE_BITS);
241	return (struct encoded_page *)(flags | (unsigned long)page);
242}
243
244static inline unsigned long encoded_page_flags(struct encoded_page *page)
245{
246	return ENCODED_PAGE_BITS & (unsigned long)page;
247}
248
249static inline struct page *encoded_page_ptr(struct encoded_page *page)
250{
251	return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page);
252}
253
254static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr)
255{
256	VM_WARN_ON_ONCE((nr << 2) >> 2 != nr);
257	return (struct encoded_page *)(nr << 2);
258}
259
260static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page)
261{
262	return ((unsigned long)page) >> 2;
263}
264
265/*
266 * A swap entry has to fit into a "unsigned long", as the entry is hidden
267 * in the "index" field of the swapper address space.
268 */
269typedef struct {
270	unsigned long val;
271} swp_entry_t;
272
273/**
274 * struct folio - Represents a contiguous set of bytes.
275 * @flags: Identical to the page flags.
276 * @lru: Least Recently Used list; tracks how recently this folio was used.
277 * @mlock_count: Number of times this folio has been pinned by mlock().
278 * @mapping: The file this page belongs to, or refers to the anon_vma for
279 *    anonymous memory.
280 * @index: Offset within the file, in units of pages.  For anonymous memory,
281 *    this is the index from the beginning of the mmap.
282 * @private: Filesystem per-folio data (see folio_attach_private()).
283 * @swap: Used for swp_entry_t if folio_test_swapcache().
284 * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
285 *    find out how many times this folio is mapped by userspace.
286 * @_refcount: Do not access this member directly.  Use folio_ref_count()
287 *    to find how many references there are to this folio.
288 * @memcg_data: Memory Control Group data.
289 * @virtual: Virtual address in the kernel direct map.
290 * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
291 * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
292 * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
293 * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
294 * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
295 * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
296 * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
297 * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
298 * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
299 * @_deferred_list: Folios to be split under memory pressure.
300 *
301 * A folio is a physically, virtually and logically contiguous set
302 * of bytes.  It is a power-of-two in size, and it is aligned to that
303 * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
304 * in the page cache, it is at a file offset which is a multiple of that
305 * power-of-two.  It may be mapped into userspace at an address which is
306 * at an arbitrary page offset, but its kernel virtual address is aligned
307 * to its size.
308 */
309struct folio {
310	/* private: don't document the anon union */
311	union {
312		struct {
313	/* public: */
314			unsigned long flags;
315			union {
316				struct list_head lru;
317	/* private: avoid cluttering the output */
318				struct {
319					void *__filler;
320	/* public: */
321					unsigned int mlock_count;
322	/* private: */
323				};
324	/* public: */
325			};
326			struct address_space *mapping;
327			pgoff_t index;
328			union {
329				void *private;
330				swp_entry_t swap;
331			};
332			atomic_t _mapcount;
333			atomic_t _refcount;
334#ifdef CONFIG_MEMCG
335			unsigned long memcg_data;
336#endif
337#if defined(WANT_PAGE_VIRTUAL)
338			void *virtual;
339#endif
340#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
341			int _last_cpupid;
342#endif
343	/* private: the union with struct page is transitional */
344		};
345		struct page page;
346	};
347	union {
348		struct {
349			unsigned long _flags_1;
350			unsigned long _head_1;
351			unsigned long _folio_avail;
352	/* public: */
353			atomic_t _entire_mapcount;
354			atomic_t _nr_pages_mapped;
355			atomic_t _pincount;
356#ifdef CONFIG_64BIT
357			unsigned int _folio_nr_pages;
358#endif
359	/* private: the union with struct page is transitional */
360		};
361		struct page __page_1;
362	};
363	union {
364		struct {
365			unsigned long _flags_2;
366			unsigned long _head_2;
367	/* public: */
368			void *_hugetlb_subpool;
369			void *_hugetlb_cgroup;
370			void *_hugetlb_cgroup_rsvd;
371			void *_hugetlb_hwpoison;
372	/* private: the union with struct page is transitional */
373		};
374		struct {
375			unsigned long _flags_2a;
376			unsigned long _head_2a;
377	/* public: */
378			struct list_head _deferred_list;
379	/* private: the union with struct page is transitional */
380		};
381		struct page __page_2;
382	};
383};
384
385#define FOLIO_MATCH(pg, fl)						\
386	static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
387FOLIO_MATCH(flags, flags);
388FOLIO_MATCH(lru, lru);
389FOLIO_MATCH(mapping, mapping);
390FOLIO_MATCH(compound_head, lru);
391FOLIO_MATCH(index, index);
392FOLIO_MATCH(private, private);
393FOLIO_MATCH(_mapcount, _mapcount);
394FOLIO_MATCH(_refcount, _refcount);
395#ifdef CONFIG_MEMCG
396FOLIO_MATCH(memcg_data, memcg_data);
397#endif
398#if defined(WANT_PAGE_VIRTUAL)
399FOLIO_MATCH(virtual, virtual);
400#endif
401#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
402FOLIO_MATCH(_last_cpupid, _last_cpupid);
403#endif
404#undef FOLIO_MATCH
405#define FOLIO_MATCH(pg, fl)						\
406	static_assert(offsetof(struct folio, fl) ==			\
407			offsetof(struct page, pg) + sizeof(struct page))
408FOLIO_MATCH(flags, _flags_1);
409FOLIO_MATCH(compound_head, _head_1);
410#undef FOLIO_MATCH
411#define FOLIO_MATCH(pg, fl)						\
412	static_assert(offsetof(struct folio, fl) ==			\
413			offsetof(struct page, pg) + 2 * sizeof(struct page))
414FOLIO_MATCH(flags, _flags_2);
415FOLIO_MATCH(compound_head, _head_2);
416FOLIO_MATCH(flags, _flags_2a);
417FOLIO_MATCH(compound_head, _head_2a);
418#undef FOLIO_MATCH
419
420/**
421 * struct ptdesc -    Memory descriptor for page tables.
422 * @__page_flags:     Same as page flags. Powerpc only.
423 * @pt_rcu_head:      For freeing page table pages.
424 * @pt_list:          List of used page tables. Used for s390 and x86.
425 * @_pt_pad_1:        Padding that aliases with page's compound head.
426 * @pmd_huge_pte:     Protected by ptdesc->ptl, used for THPs.
427 * @__page_mapping:   Aliases with page->mapping. Unused for page tables.
428 * @pt_index:         Used for s390 gmap.
429 * @pt_mm:            Used for x86 pgds.
430 * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
431 * @_pt_pad_2:        Padding to ensure proper alignment.
432 * @ptl:              Lock for the page table.
433 * @__page_type:      Same as page->page_type. Unused for page tables.
434 * @__page_refcount:  Same as page refcount.
435 * @pt_memcg_data:    Memcg data. Tracked for page tables here.
436 *
437 * This struct overlays struct page for now. Do not modify without a good
438 * understanding of the issues.
439 */
440struct ptdesc {
441	unsigned long __page_flags;
442
443	union {
444		struct rcu_head pt_rcu_head;
445		struct list_head pt_list;
446		struct {
447			unsigned long _pt_pad_1;
448			pgtable_t pmd_huge_pte;
449		};
450	};
451	unsigned long __page_mapping;
452
453	union {
454		pgoff_t pt_index;
455		struct mm_struct *pt_mm;
456		atomic_t pt_frag_refcount;
457	};
458
459	union {
460		unsigned long _pt_pad_2;
461#if ALLOC_SPLIT_PTLOCKS
462		spinlock_t *ptl;
463#else
464		spinlock_t ptl;
465#endif
466	};
467	unsigned int __page_type;
468	atomic_t __page_refcount;
469#ifdef CONFIG_MEMCG
470	unsigned long pt_memcg_data;
471#endif
472};
473
474#define TABLE_MATCH(pg, pt)						\
475	static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
476TABLE_MATCH(flags, __page_flags);
477TABLE_MATCH(compound_head, pt_list);
478TABLE_MATCH(compound_head, _pt_pad_1);
479TABLE_MATCH(mapping, __page_mapping);
480TABLE_MATCH(index, pt_index);
481TABLE_MATCH(rcu_head, pt_rcu_head);
482TABLE_MATCH(page_type, __page_type);
483TABLE_MATCH(_refcount, __page_refcount);
484#ifdef CONFIG_MEMCG
485TABLE_MATCH(memcg_data, pt_memcg_data);
486#endif
487#undef TABLE_MATCH
488static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
489
490#define ptdesc_page(pt)			(_Generic((pt),			\
491	const struct ptdesc *:		(const struct page *)(pt),	\
492	struct ptdesc *:		(struct page *)(pt)))
493
494#define ptdesc_folio(pt)		(_Generic((pt),			\
495	const struct ptdesc *:		(const struct folio *)(pt),	\
496	struct ptdesc *:		(struct folio *)(pt)))
497
498#define page_ptdesc(p)			(_Generic((p),			\
499	const struct page *:		(const struct ptdesc *)(p),	\
500	struct page *:			(struct ptdesc *)(p)))
501
502/*
503 * Used for sizing the vmemmap region on some architectures
504 */
505#define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
506
507#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
508#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
509
510/*
511 * page_private can be used on tail pages.  However, PagePrivate is only
512 * checked by the VM on the head page.  So page_private on the tail pages
513 * should be used for data that's ancillary to the head page (eg attaching
514 * buffer heads to tail pages after attaching buffer heads to the head page)
515 */
516#define page_private(page)		((page)->private)
517
518static inline void set_page_private(struct page *page, unsigned long private)
519{
520	page->private = private;
521}
522
523static inline void *folio_get_private(struct folio *folio)
524{
525	return folio->private;
526}
527
528struct page_frag_cache {
529	void * va;
530#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
531	__u16 offset;
532	__u16 size;
533#else
534	__u32 offset;
535#endif
536	/* we maintain a pagecount bias, so that we dont dirty cache line
537	 * containing page->_refcount every time we allocate a fragment.
538	 */
539	unsigned int		pagecnt_bias;
540	bool pfmemalloc;
541};
542
543typedef unsigned long vm_flags_t;
544
545/*
546 * A region containing a mapping of a non-memory backed file under NOMMU
547 * conditions.  These are held in a global tree and are pinned by the VMAs that
548 * map parts of them.
549 */
550struct vm_region {
551	struct rb_node	vm_rb;		/* link in global region tree */
552	vm_flags_t	vm_flags;	/* VMA vm_flags */
553	unsigned long	vm_start;	/* start address of region */
554	unsigned long	vm_end;		/* region initialised to here */
555	unsigned long	vm_top;		/* region allocated to here */
556	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
557	struct file	*vm_file;	/* the backing file or NULL */
558
559	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
560	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
561						* this region */
562};
563
564#ifdef CONFIG_USERFAULTFD
565#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
566struct vm_userfaultfd_ctx {
567	struct userfaultfd_ctx *ctx;
568};
569#else /* CONFIG_USERFAULTFD */
570#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
571struct vm_userfaultfd_ctx {};
572#endif /* CONFIG_USERFAULTFD */
573
574struct anon_vma_name {
575	struct kref kref;
576	/* The name needs to be at the end because it is dynamically sized. */
577	char name[];
578};
579
580#ifdef CONFIG_ANON_VMA_NAME
581/*
582 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
583 * either keep holding the lock while using the returned pointer or it should
584 * raise anon_vma_name refcount before releasing the lock.
585 */
586struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
587struct anon_vma_name *anon_vma_name_alloc(const char *name);
588void anon_vma_name_free(struct kref *kref);
589#else /* CONFIG_ANON_VMA_NAME */
590static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
591{
592	return NULL;
593}
594
595static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
596{
597	return NULL;
598}
599#endif
600
601struct vma_lock {
602	struct rw_semaphore lock;
603};
604
605struct vma_numab_state {
606	/*
607	 * Initialised as time in 'jiffies' after which VMA
608	 * should be scanned.  Delays first scan of new VMA by at
609	 * least sysctl_numa_balancing_scan_delay:
610	 */
611	unsigned long next_scan;
612
613	/*
614	 * Time in jiffies when pids_active[] is reset to
615	 * detect phase change behaviour:
616	 */
617	unsigned long pids_active_reset;
618
619	/*
620	 * Approximate tracking of PIDs that trapped a NUMA hinting
621	 * fault. May produce false positives due to hash collisions.
622	 *
623	 *   [0] Previous PID tracking
624	 *   [1] Current PID tracking
625	 *
626	 * Window moves after next_pid_reset has expired approximately
627	 * every VMA_PID_RESET_PERIOD jiffies:
628	 */
629	unsigned long pids_active[2];
630
631	/* MM scan sequence ID when scan first started after VMA creation */
632	int start_scan_seq;
633
634	/*
635	 * MM scan sequence ID when the VMA was last completely scanned.
636	 * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
637	 */
638	int prev_scan_seq;
639};
640
641/*
642 * This struct describes a virtual memory area. There is one of these
643 * per VM-area/task. A VM area is any part of the process virtual memory
644 * space that has a special rule for the page-fault handlers (ie a shared
645 * library, the executable area etc).
646 */
647struct vm_area_struct {
648	/* The first cache line has the info for VMA tree walking. */
649
650	union {
651		struct {
652			/* VMA covers [vm_start; vm_end) addresses within mm */
653			unsigned long vm_start;
654			unsigned long vm_end;
655		};
656#ifdef CONFIG_PER_VMA_LOCK
657		struct rcu_head vm_rcu;	/* Used for deferred freeing. */
658#endif
659	};
660
661	struct mm_struct *vm_mm;	/* The address space we belong to. */
662	pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
663
664	/*
665	 * Flags, see mm.h.
666	 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
667	 */
668	union {
669		const vm_flags_t vm_flags;
670		vm_flags_t __private __vm_flags;
671	};
672
673#ifdef CONFIG_PER_VMA_LOCK
674	/*
675	 * Can only be written (using WRITE_ONCE()) while holding both:
676	 *  - mmap_lock (in write mode)
677	 *  - vm_lock->lock (in write mode)
678	 * Can be read reliably while holding one of:
679	 *  - mmap_lock (in read or write mode)
680	 *  - vm_lock->lock (in read or write mode)
681	 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
682	 * while holding nothing (except RCU to keep the VMA struct allocated).
683	 *
684	 * This sequence counter is explicitly allowed to overflow; sequence
685	 * counter reuse can only lead to occasional unnecessary use of the
686	 * slowpath.
687	 */
688	int vm_lock_seq;
689	struct vma_lock *vm_lock;
690
691	/* Flag to indicate areas detached from the mm->mm_mt tree */
692	bool detached;
693#endif
694
695	/*
696	 * For areas with an address space and backing store,
697	 * linkage into the address_space->i_mmap interval tree.
698	 *
699	 */
700	struct {
701		struct rb_node rb;
702		unsigned long rb_subtree_last;
703	} shared;
704
705	/*
706	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
707	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
708	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
709	 * or brk vma (with NULL file) can only be in an anon_vma list.
710	 */
711	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
712					  * page_table_lock */
713	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
714
715	/* Function pointers to deal with this struct. */
716	const struct vm_operations_struct *vm_ops;
717
718	/* Information about our backing store: */
719	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
720					   units */
721	struct file * vm_file;		/* File we map to (can be NULL). */
722	void * vm_private_data;		/* was vm_pte (shared mem) */
723
724#ifdef CONFIG_ANON_VMA_NAME
725	/*
726	 * For private and shared anonymous mappings, a pointer to a null
727	 * terminated string containing the name given to the vma, or NULL if
728	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
729	 */
730	struct anon_vma_name *anon_name;
731#endif
732#ifdef CONFIG_SWAP
733	atomic_long_t swap_readahead_info;
734#endif
735#ifndef CONFIG_MMU
736	struct vm_region *vm_region;	/* NOMMU mapping region */
737#endif
738#ifdef CONFIG_NUMA
739	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
740#endif
741#ifdef CONFIG_NUMA_BALANCING
742	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
743#endif
744	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
745} __randomize_layout;
746
747#ifdef CONFIG_NUMA
748#define vma_policy(vma) ((vma)->vm_policy)
749#else
750#define vma_policy(vma) NULL
751#endif
752
753#ifdef CONFIG_SCHED_MM_CID
754struct mm_cid {
755	u64 time;
756	int cid;
757};
758#endif
759
760struct kioctx_table;
761struct iommu_mm_data;
762struct mm_struct {
763	struct {
764		/*
765		 * Fields which are often written to are placed in a separate
766		 * cache line.
767		 */
768		struct {
769			/**
770			 * @mm_count: The number of references to &struct
771			 * mm_struct (@mm_users count as 1).
772			 *
773			 * Use mmgrab()/mmdrop() to modify. When this drops to
774			 * 0, the &struct mm_struct is freed.
775			 */
776			atomic_t mm_count;
777		} ____cacheline_aligned_in_smp;
778
779		struct maple_tree mm_mt;
780#ifdef CONFIG_MMU
781		unsigned long (*get_unmapped_area) (struct file *filp,
782				unsigned long addr, unsigned long len,
783				unsigned long pgoff, unsigned long flags);
784#endif
785		unsigned long mmap_base;	/* base of mmap area */
786		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
787#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
788		/* Base addresses for compatible mmap() */
789		unsigned long mmap_compat_base;
790		unsigned long mmap_compat_legacy_base;
791#endif
792		unsigned long task_size;	/* size of task vm space */
793		pgd_t * pgd;
794
795#ifdef CONFIG_MEMBARRIER
796		/**
797		 * @membarrier_state: Flags controlling membarrier behavior.
798		 *
799		 * This field is close to @pgd to hopefully fit in the same
800		 * cache-line, which needs to be touched by switch_mm().
801		 */
802		atomic_t membarrier_state;
803#endif
804
805		/**
806		 * @mm_users: The number of users including userspace.
807		 *
808		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
809		 * drops to 0 (i.e. when the task exits and there are no other
810		 * temporary reference holders), we also release a reference on
811		 * @mm_count (which may then free the &struct mm_struct if
812		 * @mm_count also drops to 0).
813		 */
814		atomic_t mm_users;
815
816#ifdef CONFIG_SCHED_MM_CID
817		/**
818		 * @pcpu_cid: Per-cpu current cid.
819		 *
820		 * Keep track of the currently allocated mm_cid for each cpu.
821		 * The per-cpu mm_cid values are serialized by their respective
822		 * runqueue locks.
823		 */
824		struct mm_cid __percpu *pcpu_cid;
825		/*
826		 * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
827		 *
828		 * When the next mm_cid scan is due (in jiffies).
829		 */
830		unsigned long mm_cid_next_scan;
831#endif
832#ifdef CONFIG_MMU
833		atomic_long_t pgtables_bytes;	/* size of all page tables */
834#endif
835		int map_count;			/* number of VMAs */
836
837		spinlock_t page_table_lock; /* Protects page tables and some
838					     * counters
839					     */
840		/*
841		 * With some kernel config, the current mmap_lock's offset
842		 * inside 'mm_struct' is at 0x120, which is very optimal, as
843		 * its two hot fields 'count' and 'owner' sit in 2 different
844		 * cachelines,  and when mmap_lock is highly contended, both
845		 * of the 2 fields will be accessed frequently, current layout
846		 * will help to reduce cache bouncing.
847		 *
848		 * So please be careful with adding new fields before
849		 * mmap_lock, which can easily push the 2 fields into one
850		 * cacheline.
851		 */
852		struct rw_semaphore mmap_lock;
853
854		struct list_head mmlist; /* List of maybe swapped mm's.	These
855					  * are globally strung together off
856					  * init_mm.mmlist, and are protected
857					  * by mmlist_lock
858					  */
859#ifdef CONFIG_PER_VMA_LOCK
860		/*
861		 * This field has lock-like semantics, meaning it is sometimes
862		 * accessed with ACQUIRE/RELEASE semantics.
863		 * Roughly speaking, incrementing the sequence number is
864		 * equivalent to releasing locks on VMAs; reading the sequence
865		 * number can be part of taking a read lock on a VMA.
866		 *
867		 * Can be modified under write mmap_lock using RELEASE
868		 * semantics.
869		 * Can be read with no other protection when holding write
870		 * mmap_lock.
871		 * Can be read with ACQUIRE semantics if not holding write
872		 * mmap_lock.
873		 */
874		int mm_lock_seq;
875#endif
876
877
878		unsigned long hiwater_rss; /* High-watermark of RSS usage */
879		unsigned long hiwater_vm;  /* High-water virtual memory usage */
880
881		unsigned long total_vm;	   /* Total pages mapped */
882		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
883		atomic64_t    pinned_vm;   /* Refcount permanently increased */
884		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
885		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
886		unsigned long stack_vm;	   /* VM_STACK */
887		unsigned long def_flags;
888
889		/**
890		 * @write_protect_seq: Locked when any thread is write
891		 * protecting pages mapped by this mm to enforce a later COW,
892		 * for instance during page table copying for fork().
893		 */
894		seqcount_t write_protect_seq;
895
896		spinlock_t arg_lock; /* protect the below fields */
897
898		unsigned long start_code, end_code, start_data, end_data;
899		unsigned long start_brk, brk, start_stack;
900		unsigned long arg_start, arg_end, env_start, env_end;
901
902		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
903
904		struct percpu_counter rss_stat[NR_MM_COUNTERS];
905
906		struct linux_binfmt *binfmt;
907
908		/* Architecture-specific MM context */
909		mm_context_t context;
910
911		unsigned long flags; /* Must use atomic bitops to access */
912
913#ifdef CONFIG_AIO
914		spinlock_t			ioctx_lock;
915		struct kioctx_table __rcu	*ioctx_table;
916#endif
917#ifdef CONFIG_MEMCG
918		/*
919		 * "owner" points to a task that is regarded as the canonical
920		 * user/owner of this mm. All of the following must be true in
921		 * order for it to be changed:
922		 *
923		 * current == mm->owner
924		 * current->mm != mm
925		 * new_owner->mm == mm
926		 * new_owner->alloc_lock is held
927		 */
928		struct task_struct __rcu *owner;
929#endif
930		struct user_namespace *user_ns;
931
932		/* store ref to file /proc/<pid>/exe symlink points to */
933		struct file __rcu *exe_file;
934#ifdef CONFIG_MMU_NOTIFIER
935		struct mmu_notifier_subscriptions *notifier_subscriptions;
936#endif
937#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
938		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
939#endif
940#ifdef CONFIG_NUMA_BALANCING
941		/*
942		 * numa_next_scan is the next time that PTEs will be remapped
943		 * PROT_NONE to trigger NUMA hinting faults; such faults gather
944		 * statistics and migrate pages to new nodes if necessary.
945		 */
946		unsigned long numa_next_scan;
947
948		/* Restart point for scanning and remapping PTEs. */
949		unsigned long numa_scan_offset;
950
951		/* numa_scan_seq prevents two threads remapping PTEs. */
952		int numa_scan_seq;
953#endif
954		/*
955		 * An operation with batched TLB flushing is going on. Anything
956		 * that can move process memory needs to flush the TLB when
957		 * moving a PROT_NONE mapped page.
958		 */
959		atomic_t tlb_flush_pending;
960#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
961		/* See flush_tlb_batched_pending() */
962		atomic_t tlb_flush_batched;
963#endif
964		struct uprobes_state uprobes_state;
965#ifdef CONFIG_PREEMPT_RT
966		struct rcu_head delayed_drop;
967#endif
968#ifdef CONFIG_HUGETLB_PAGE
969		atomic_long_t hugetlb_usage;
970#endif
971		struct work_struct async_put_work;
972
973#ifdef CONFIG_IOMMU_MM_DATA
974		struct iommu_mm_data *iommu_mm;
975#endif
976#ifdef CONFIG_KSM
977		/*
978		 * Represent how many pages of this process are involved in KSM
979		 * merging (not including ksm_zero_pages).
980		 */
981		unsigned long ksm_merging_pages;
982		/*
983		 * Represent how many pages are checked for ksm merging
984		 * including merged and not merged.
985		 */
986		unsigned long ksm_rmap_items;
987		/*
988		 * Represent how many empty pages are merged with kernel zero
989		 * pages when enabling KSM use_zero_pages.
990		 */
991		unsigned long ksm_zero_pages;
992#endif /* CONFIG_KSM */
993#ifdef CONFIG_LRU_GEN_WALKS_MMU
994		struct {
995			/* this mm_struct is on lru_gen_mm_list */
996			struct list_head list;
997			/*
998			 * Set when switching to this mm_struct, as a hint of
999			 * whether it has been used since the last time per-node
1000			 * page table walkers cleared the corresponding bits.
1001			 */
1002			unsigned long bitmap;
1003#ifdef CONFIG_MEMCG
1004			/* points to the memcg of "owner" above */
1005			struct mem_cgroup *memcg;
1006#endif
1007		} lru_gen;
1008#endif /* CONFIG_LRU_GEN_WALKS_MMU */
1009	} __randomize_layout;
1010
1011	/*
1012	 * The mm_cpumask needs to be at the end of mm_struct, because it
1013	 * is dynamically sized based on nr_cpu_ids.
1014	 */
1015	unsigned long cpu_bitmap[];
1016};
1017
1018#define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
1019			 MT_FLAGS_USE_RCU)
1020extern struct mm_struct init_mm;
1021
1022/* Pointer magic because the dynamic array size confuses some compilers. */
1023static inline void mm_init_cpumask(struct mm_struct *mm)
1024{
1025	unsigned long cpu_bitmap = (unsigned long)mm;
1026
1027	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
1028	cpumask_clear((struct cpumask *)cpu_bitmap);
1029}
1030
1031/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
1032static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
1033{
1034	return (struct cpumask *)&mm->cpu_bitmap;
1035}
1036
1037#ifdef CONFIG_LRU_GEN
1038
1039struct lru_gen_mm_list {
1040	/* mm_struct list for page table walkers */
1041	struct list_head fifo;
1042	/* protects the list above */
1043	spinlock_t lock;
1044};
1045
1046#endif /* CONFIG_LRU_GEN */
1047
1048#ifdef CONFIG_LRU_GEN_WALKS_MMU
1049
1050void lru_gen_add_mm(struct mm_struct *mm);
1051void lru_gen_del_mm(struct mm_struct *mm);
1052void lru_gen_migrate_mm(struct mm_struct *mm);
1053
1054static inline void lru_gen_init_mm(struct mm_struct *mm)
1055{
1056	INIT_LIST_HEAD(&mm->lru_gen.list);
1057	mm->lru_gen.bitmap = 0;
1058#ifdef CONFIG_MEMCG
1059	mm->lru_gen.memcg = NULL;
1060#endif
1061}
1062
1063static inline void lru_gen_use_mm(struct mm_struct *mm)
1064{
1065	/*
1066	 * When the bitmap is set, page reclaim knows this mm_struct has been
1067	 * used since the last time it cleared the bitmap. So it might be worth
1068	 * walking the page tables of this mm_struct to clear the accessed bit.
1069	 */
1070	WRITE_ONCE(mm->lru_gen.bitmap, -1);
1071}
1072
1073#else /* !CONFIG_LRU_GEN_WALKS_MMU */
1074
1075static inline void lru_gen_add_mm(struct mm_struct *mm)
1076{
1077}
1078
1079static inline void lru_gen_del_mm(struct mm_struct *mm)
1080{
1081}
1082
1083static inline void lru_gen_migrate_mm(struct mm_struct *mm)
1084{
1085}
1086
1087static inline void lru_gen_init_mm(struct mm_struct *mm)
1088{
1089}
1090
1091static inline void lru_gen_use_mm(struct mm_struct *mm)
1092{
1093}
1094
1095#endif /* CONFIG_LRU_GEN_WALKS_MMU */
1096
1097struct vma_iterator {
1098	struct ma_state mas;
1099};
1100
1101#define VMA_ITERATOR(name, __mm, __addr)				\
1102	struct vma_iterator name = {					\
1103		.mas = {						\
1104			.tree = &(__mm)->mm_mt,				\
1105			.index = __addr,				\
1106			.node = NULL,					\
1107			.status = ma_start,				\
1108		},							\
1109	}
1110
1111static inline void vma_iter_init(struct vma_iterator *vmi,
1112		struct mm_struct *mm, unsigned long addr)
1113{
1114	mas_init(&vmi->mas, &mm->mm_mt, addr);
1115}
1116
1117#ifdef CONFIG_SCHED_MM_CID
1118
1119enum mm_cid_state {
1120	MM_CID_UNSET = -1U,		/* Unset state has lazy_put flag set. */
1121	MM_CID_LAZY_PUT = (1U << 31),
1122};
1123
1124static inline bool mm_cid_is_unset(int cid)
1125{
1126	return cid == MM_CID_UNSET;
1127}
1128
1129static inline bool mm_cid_is_lazy_put(int cid)
1130{
1131	return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
1132}
1133
1134static inline bool mm_cid_is_valid(int cid)
1135{
1136	return !(cid & MM_CID_LAZY_PUT);
1137}
1138
1139static inline int mm_cid_set_lazy_put(int cid)
1140{
1141	return cid | MM_CID_LAZY_PUT;
1142}
1143
1144static inline int mm_cid_clear_lazy_put(int cid)
1145{
1146	return cid & ~MM_CID_LAZY_PUT;
1147}
1148
1149/* Accessor for struct mm_struct's cidmask. */
1150static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
1151{
1152	unsigned long cid_bitmap = (unsigned long)mm;
1153
1154	cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
1155	/* Skip cpu_bitmap */
1156	cid_bitmap += cpumask_size();
1157	return (struct cpumask *)cid_bitmap;
1158}
1159
1160static inline void mm_init_cid(struct mm_struct *mm)
1161{
1162	int i;
1163
1164	for_each_possible_cpu(i) {
1165		struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
1166
1167		pcpu_cid->cid = MM_CID_UNSET;
1168		pcpu_cid->time = 0;
1169	}
1170	cpumask_clear(mm_cidmask(mm));
1171}
1172
1173static inline int mm_alloc_cid(struct mm_struct *mm)
1174{
1175	mm->pcpu_cid = alloc_percpu(struct mm_cid);
1176	if (!mm->pcpu_cid)
1177		return -ENOMEM;
1178	mm_init_cid(mm);
1179	return 0;
1180}
1181
1182static inline void mm_destroy_cid(struct mm_struct *mm)
1183{
1184	free_percpu(mm->pcpu_cid);
1185	mm->pcpu_cid = NULL;
1186}
1187
1188static inline unsigned int mm_cid_size(void)
1189{
1190	return cpumask_size();
1191}
1192#else /* CONFIG_SCHED_MM_CID */
1193static inline void mm_init_cid(struct mm_struct *mm) { }
1194static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
1195static inline void mm_destroy_cid(struct mm_struct *mm) { }
1196static inline unsigned int mm_cid_size(void)
1197{
1198	return 0;
1199}
1200#endif /* CONFIG_SCHED_MM_CID */
1201
1202struct mmu_gather;
1203extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
1204extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
1205extern void tlb_finish_mmu(struct mmu_gather *tlb);
1206
1207struct vm_fault;
1208
1209/**
1210 * typedef vm_fault_t - Return type for page fault handlers.
1211 *
1212 * Page fault handlers return a bitmask of %VM_FAULT values.
1213 */
1214typedef __bitwise unsigned int vm_fault_t;
1215
1216/**
1217 * enum vm_fault_reason - Page fault handlers return a bitmask of
1218 * these values to tell the core VM what happened when handling the
1219 * fault. Used to decide whether a process gets delivered SIGBUS or
1220 * just gets major/minor fault counters bumped up.
1221 *
1222 * @VM_FAULT_OOM:		Out Of Memory
1223 * @VM_FAULT_SIGBUS:		Bad access
1224 * @VM_FAULT_MAJOR:		Page read from storage
1225 * @VM_FAULT_HWPOISON:		Hit poisoned small page
1226 * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
1227 *				in upper bits
1228 * @VM_FAULT_SIGSEGV:		segmentation fault
1229 * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
1230 * @VM_FAULT_LOCKED:		->fault locked the returned page
1231 * @VM_FAULT_RETRY:		->fault blocked, must retry
1232 * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
1233 * @VM_FAULT_DONE_COW:		->fault has fully handled COW
1234 * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
1235 *				fsync() to complete (for synchronous page faults
1236 *				in DAX)
1237 * @VM_FAULT_COMPLETED:		->fault completed, meanwhile mmap lock released
1238 * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
1239 *
1240 */
1241enum vm_fault_reason {
1242	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
1243	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
1244	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
1245	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
1246	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
1247	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
1248	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
1249	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
1250	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
1251	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
1252	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
1253	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
1254	VM_FAULT_COMPLETED      = (__force vm_fault_t)0x004000,
1255	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
1256};
1257
1258/* Encode hstate index for a hwpoisoned large page */
1259#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
1260#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
1261
1262#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
1263			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
1264			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
1265
1266#define VM_FAULT_RESULT_TRACE \
1267	{ VM_FAULT_OOM,                 "OOM" },	\
1268	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
1269	{ VM_FAULT_MAJOR,               "MAJOR" },	\
1270	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
1271	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
1272	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
1273	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
1274	{ VM_FAULT_LOCKED,              "LOCKED" },	\
1275	{ VM_FAULT_RETRY,               "RETRY" },	\
1276	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
1277	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
1278	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" },	\
1279	{ VM_FAULT_COMPLETED,           "COMPLETED" }
1280
1281struct vm_special_mapping {
1282	const char *name;	/* The name, e.g. "[vdso]". */
1283
1284	/*
1285	 * If .fault is not provided, this points to a
1286	 * NULL-terminated array of pages that back the special mapping.
1287	 *
1288	 * This must not be NULL unless .fault is provided.
1289	 */
1290	struct page **pages;
1291
1292	/*
1293	 * If non-NULL, then this is called to resolve page faults
1294	 * on the special mapping.  If used, .pages is not checked.
1295	 */
1296	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
1297				struct vm_area_struct *vma,
1298				struct vm_fault *vmf);
1299
1300	int (*mremap)(const struct vm_special_mapping *sm,
1301		     struct vm_area_struct *new_vma);
1302};
1303
1304enum tlb_flush_reason {
1305	TLB_FLUSH_ON_TASK_SWITCH,
1306	TLB_REMOTE_SHOOTDOWN,
1307	TLB_LOCAL_SHOOTDOWN,
1308	TLB_LOCAL_MM_SHOOTDOWN,
1309	TLB_REMOTE_SEND_IPI,
1310	NR_TLB_FLUSH_REASONS,
1311};
1312
1313/**
1314 * enum fault_flag - Fault flag definitions.
1315 * @FAULT_FLAG_WRITE: Fault was a write fault.
1316 * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
1317 * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
1318 * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
1319 * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
1320 * @FAULT_FLAG_TRIED: The fault has been tried once.
1321 * @FAULT_FLAG_USER: The fault originated in userspace.
1322 * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
1323 * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
1324 * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
1325 * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
1326 *                      COW mapping, making sure that an exclusive anon page is
1327 *                      mapped after the fault.
1328 * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
1329 *                        We should only access orig_pte if this flag set.
1330 * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
1331 *
1332 * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
1333 * whether we would allow page faults to retry by specifying these two
1334 * fault flags correctly.  Currently there can be three legal combinations:
1335 *
1336 * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
1337 *                              this is the first try
1338 *
1339 * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
1340 *                              we've already tried at least once
1341 *
1342 * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
1343 *
1344 * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
1345 * be used.  Note that page faults can be allowed to retry for multiple times,
1346 * in which case we'll have an initial fault with flags (a) then later on
1347 * continuous faults with flags (b).  We should always try to detect pending
1348 * signals before a retry to make sure the continuous page faults can still be
1349 * interrupted if necessary.
1350 *
1351 * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
1352 * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
1353 * applied to mappings that are not COW mappings.
1354 */
1355enum fault_flag {
1356	FAULT_FLAG_WRITE =		1 << 0,
1357	FAULT_FLAG_MKWRITE =		1 << 1,
1358	FAULT_FLAG_ALLOW_RETRY =	1 << 2,
1359	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3,
1360	FAULT_FLAG_KILLABLE =		1 << 4,
1361	FAULT_FLAG_TRIED = 		1 << 5,
1362	FAULT_FLAG_USER =		1 << 6,
1363	FAULT_FLAG_REMOTE =		1 << 7,
1364	FAULT_FLAG_INSTRUCTION =	1 << 8,
1365	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
1366	FAULT_FLAG_UNSHARE =		1 << 10,
1367	FAULT_FLAG_ORIG_PTE_VALID =	1 << 11,
1368	FAULT_FLAG_VMA_LOCK =		1 << 12,
1369};
1370
1371typedef unsigned int __bitwise zap_flags_t;
1372
1373/*
1374 * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
1375 * other. Here is what they mean, and how to use them:
1376 *
1377 *
1378 * FIXME: For pages which are part of a filesystem, mappings are subject to the
1379 * lifetime enforced by the filesystem and we need guarantees that longterm
1380 * users like RDMA and V4L2 only establish mappings which coordinate usage with
1381 * the filesystem.  Ideas for this coordination include revoking the longterm
1382 * pin, delaying writeback, bounce buffer page writeback, etc.  As FS DAX was
1383 * added after the problem with filesystems was found FS DAX VMAs are
1384 * specifically failed.  Filesystem pages are still subject to bugs and use of
1385 * FOLL_LONGTERM should be avoided on those pages.
1386 *
1387 * In the CMA case: long term pins in a CMA region would unnecessarily fragment
1388 * that region.  And so, CMA attempts to migrate the page before pinning, when
1389 * FOLL_LONGTERM is specified.
1390 *
1391 * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
1392 * but an additional pin counting system) will be invoked. This is intended for
1393 * anything that gets a page reference and then touches page data (for example,
1394 * Direct IO). This lets the filesystem know that some non-file-system entity is
1395 * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
1396 * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
1397 * a call to unpin_user_page().
1398 *
1399 * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
1400 * and separate refcounting mechanisms, however, and that means that each has
1401 * its own acquire and release mechanisms:
1402 *
1403 *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
1404 *
1405 *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
1406 *
1407 * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
1408 * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
1409 * calls applied to them, and that's perfectly OK. This is a constraint on the
1410 * callers, not on the pages.)
1411 *
1412 * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
1413 * directly by the caller. That's in order to help avoid mismatches when
1414 * releasing pages: get_user_pages*() pages must be released via put_page(),
1415 * while pin_user_pages*() pages must be released via unpin_user_page().
1416 *
1417 * Please see Documentation/core-api/pin_user_pages.rst for more information.
1418 */
1419
1420enum {
1421	/* check pte is writable */
1422	FOLL_WRITE = 1 << 0,
1423	/* do get_page on page */
1424	FOLL_GET = 1 << 1,
1425	/* give error on hole if it would be zero */
1426	FOLL_DUMP = 1 << 2,
1427	/* get_user_pages read/write w/o permission */
1428	FOLL_FORCE = 1 << 3,
1429	/*
1430	 * if a disk transfer is needed, start the IO and return without waiting
1431	 * upon it
1432	 */
1433	FOLL_NOWAIT = 1 << 4,
1434	/* do not fault in pages */
1435	FOLL_NOFAULT = 1 << 5,
1436	/* check page is hwpoisoned */
1437	FOLL_HWPOISON = 1 << 6,
1438	/* don't do file mappings */
1439	FOLL_ANON = 1 << 7,
1440	/*
1441	 * FOLL_LONGTERM indicates that the page will be held for an indefinite
1442	 * time period _often_ under userspace control.  This is in contrast to
1443	 * iov_iter_get_pages(), whose usages are transient.
1444	 */
1445	FOLL_LONGTERM = 1 << 8,
1446	/* split huge pmd before returning */
1447	FOLL_SPLIT_PMD = 1 << 9,
1448	/* allow returning PCI P2PDMA pages */
1449	FOLL_PCI_P2PDMA = 1 << 10,
1450	/* allow interrupts from generic signals */
1451	FOLL_INTERRUPTIBLE = 1 << 11,
1452	/*
1453	 * Always honor (trigger) NUMA hinting faults.
1454	 *
1455	 * FOLL_WRITE implicitly honors NUMA hinting faults because a
1456	 * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
1457	 * apply). get_user_pages_fast_only() always implicitly honors NUMA
1458	 * hinting faults.
1459	 */
1460	FOLL_HONOR_NUMA_FAULT = 1 << 12,
1461
1462	/* See also internal only FOLL flags in mm/internal.h */
1463};
1464
1465#endif /* _LINUX_MM_TYPES_H */
1466