mm.h revision 329971
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Fran��ois Tigeot
7 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice unmodified, this list of conditions, and the following
15 *    disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/mm.h 329971 2018-02-25 10:34:47Z hselasky $
32 */
33#ifndef	_LINUX_MM_H_
34#define	_LINUX_MM_H_
35
36#include <linux/spinlock.h>
37#include <linux/gfp.h>
38#include <linux/kernel.h>
39#include <linux/mm_types.h>
40#include <linux/pfn.h>
41#include <linux/list.h>
42
43#include <asm/pgtable.h>
44
45#define	PAGE_ALIGN(x)	ALIGN(x, PAGE_SIZE)
46
47/*
48 * Make sure our LinuxKPI defined virtual memory flags don't conflict
49 * with the ones defined by FreeBSD:
50 */
51CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0);
52
53#define	VM_READ			VM_PROT_READ
54#define	VM_WRITE		VM_PROT_WRITE
55#define	VM_EXEC			VM_PROT_EXECUTE
56
57#define	VM_PFNINTERNAL		(1 << 8)	/* FreeBSD private flag to vm_insert_pfn() */
58#define	VM_MIXEDMAP		(1 << 9)
59#define	VM_NORESERVE		(1 << 10)
60#define	VM_PFNMAP		(1 << 11)
61#define	VM_IO			(1 << 12)
62#define	VM_MAYWRITE		(1 << 13)
63#define	VM_DONTCOPY		(1 << 14)
64#define	VM_DONTEXPAND		(1 << 15)
65#define	VM_DONTDUMP		(1 << 16)
66
67#define	VMA_MAX_PREFAULT_RECORD	1
68
69#define	FOLL_WRITE		(1 << 0)
70#define	FOLL_FORCE		(1 << 1)
71
72#define	VM_FAULT_OOM		(1 << 0)
73#define	VM_FAULT_SIGBUS		(1 << 1)
74#define	VM_FAULT_MAJOR		(1 << 2)
75#define	VM_FAULT_WRITE		(1 << 3)
76#define	VM_FAULT_HWPOISON	(1 << 4)
77#define	VM_FAULT_HWPOISON_LARGE	(1 << 5)
78#define	VM_FAULT_SIGSEGV	(1 << 6)
79#define	VM_FAULT_NOPAGE		(1 << 7)
80#define	VM_FAULT_LOCKED		(1 << 8)
81#define	VM_FAULT_RETRY		(1 << 9)
82#define	VM_FAULT_FALLBACK	(1 << 10)
83
84#define	FAULT_FLAG_WRITE	(1 << 0)
85#define	FAULT_FLAG_MKWRITE	(1 << 1)
86#define	FAULT_FLAG_ALLOW_RETRY	(1 << 2)
87#define	FAULT_FLAG_RETRY_NOWAIT	(1 << 3)
88#define	FAULT_FLAG_KILLABLE	(1 << 4)
89#define	FAULT_FLAG_TRIED	(1 << 5)
90#define	FAULT_FLAG_USER		(1 << 6)
91#define	FAULT_FLAG_REMOTE	(1 << 7)
92#define	FAULT_FLAG_INSTRUCTION	(1 << 8)
93
94typedef int (*pte_fn_t)(linux_pte_t *, pgtable_t, unsigned long addr, void *data);
95
96struct vm_area_struct {
97	vm_offset_t vm_start;
98	vm_offset_t vm_end;
99	vm_offset_t vm_pgoff;
100	pgprot_t vm_page_prot;
101	unsigned long vm_flags;
102	struct mm_struct *vm_mm;
103	void   *vm_private_data;
104	const struct vm_operations_struct *vm_ops;
105	struct linux_file *vm_file;
106
107	/* internal operation */
108	vm_paddr_t vm_pfn;		/* PFN for memory map */
109	vm_size_t vm_len;		/* length for memory map */
110	vm_pindex_t vm_pfn_first;
111	int	vm_pfn_count;
112	int    *vm_pfn_pcount;
113	vm_object_t vm_obj;
114	vm_map_t vm_cached_map;
115	TAILQ_ENTRY(vm_area_struct) vm_entry;
116};
117
118struct vm_fault {
119	unsigned int flags;
120	pgoff_t	pgoff;
121	union {
122		/* user-space address */
123		void *virtual_address;
124		unsigned long address;
125	};
126	struct page *page;
127	struct vm_area_struct *vma;
128};
129
130struct vm_operations_struct {
131	void    (*open) (struct vm_area_struct *);
132	void    (*close) (struct vm_area_struct *);
133	int     (*fault) (struct vm_area_struct *, struct vm_fault *);
134};
135
136/*
137 * Compute log2 of the power of two rounded up count of pages
138 * needed for size bytes.
139 */
140static inline int
141get_order(unsigned long size)
142{
143	int order;
144
145	size = (size - 1) >> PAGE_SHIFT;
146	order = 0;
147	while (size) {
148		order++;
149		size >>= 1;
150	}
151	return (order);
152}
153
154static inline void *
155lowmem_page_address(struct page *page)
156{
157	return (page_address(page));
158}
159
160/*
161 * This only works via memory map operations.
162 */
163static inline int
164io_remap_pfn_range(struct vm_area_struct *vma,
165    unsigned long addr, unsigned long pfn, unsigned long size,
166    vm_memattr_t prot)
167{
168	vma->vm_page_prot = prot;
169	vma->vm_pfn = pfn;
170	vma->vm_len = size;
171
172	return (0);
173}
174
175static inline int
176apply_to_page_range(struct mm_struct *mm, unsigned long address,
177    unsigned long size, pte_fn_t fn, void *data)
178{
179	return (-ENOTSUP);
180}
181
182static inline int
183zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
184    unsigned long size)
185{
186	return (-ENOTSUP);
187}
188
189static inline int
190remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
191    unsigned long pfn, unsigned long size, pgprot_t prot)
192{
193	return (-ENOTSUP);
194}
195
196static inline unsigned long
197vma_pages(struct vm_area_struct *vma)
198{
199	return ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
200}
201
202#define	offset_in_page(off)	((off) & (PAGE_SIZE - 1))
203
204static inline void
205set_page_dirty(struct vm_page *page)
206{
207	vm_page_dirty(page);
208}
209
210static inline void
211set_page_dirty_lock(struct vm_page *page)
212{
213	vm_page_lock(page);
214	vm_page_dirty(page);
215	vm_page_unlock(page);
216}
217
218static inline void
219mark_page_accessed(struct vm_page *page)
220{
221	vm_page_reference(page);
222}
223
224static inline void
225get_page(struct vm_page *page)
226{
227	vm_page_lock(page);
228	vm_page_wire(page);
229	vm_page_unlock(page);
230}
231
232extern long
233get_user_pages(unsigned long start, unsigned long nr_pages,
234    int gup_flags, struct page **,
235    struct vm_area_struct **);
236
237extern int
238__get_user_pages_fast(unsigned long start, int nr_pages, int write,
239    struct page **);
240
241extern long
242get_user_pages_remote(struct task_struct *, struct mm_struct *,
243    unsigned long start, unsigned long nr_pages,
244    int gup_flags, struct page **,
245    struct vm_area_struct **);
246
247static inline void
248put_page(struct vm_page *page)
249{
250	vm_page_lock(page);
251	if (vm_page_unwire(page, PQ_ACTIVE) && page->object == NULL)
252		vm_page_free(page);
253	vm_page_unlock(page);
254}
255
256#define	copy_highpage(to, from) pmap_copy_page(from, to)
257
258static inline pgprot_t
259vm_get_page_prot(unsigned long vm_flags)
260{
261	return (vm_flags & VM_PROT_ALL);
262}
263
264static inline vm_page_t
265vmalloc_to_page(const void *addr)
266{
267	vm_paddr_t paddr;
268
269	paddr = pmap_kextract((vm_offset_t)addr);
270	return (PHYS_TO_VM_PAGE(paddr));
271}
272
273extern int is_vmalloc_addr(const void *addr);
274
275#endif					/* _LINUX_MM_H_ */
276