1/*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
35 *
36 *	@(#)vm_unix.c	8.1 (Berkeley) 6/11/93
37 */
38
39#include "opt_compat.h"
40
41/*
42 * Traditional sbrk/grow interface to VM
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/11/sys/vm/vm_unix.c 341467 2018-12-04 15:04:48Z emaste $");
47
48#include <sys/param.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/racct.h>
53#include <sys/resourcevar.h>
54#include <sys/sysent.h>
55#include <sys/sysproto.h>
56#include <sys/systm.h>
57
58#include <vm/vm.h>
59#include <vm/vm_param.h>
60#include <vm/pmap.h>
61#include <vm/vm_map.h>
62
63#ifndef _SYS_SYSPROTO_H_
64struct obreak_args {
65	char *nsize;
66};
67#endif
68
69/*
70 * MPSAFE
71 */
72/* ARGSUSED */
73int
74sys_obreak(struct thread *td, struct obreak_args *uap)
75{
76	struct vmspace *vm = td->td_proc->p_vmspace;
77	vm_map_t map = &vm->vm_map;
78	vm_offset_t new, old, base;
79	rlim_t datalim, lmemlim, vmemlim;
80	int prot, rv;
81	int error = 0;
82	boolean_t do_map_wirefuture;
83
84	datalim = lim_cur(td, RLIMIT_DATA);
85	lmemlim = lim_cur(td, RLIMIT_MEMLOCK);
86	vmemlim = lim_cur(td, RLIMIT_VMEM);
87
88	do_map_wirefuture = FALSE;
89	new = round_page((vm_offset_t)uap->nsize);
90	vm_map_lock(map);
91
92	base = round_page((vm_offset_t) vm->vm_daddr);
93	old = base + ctob(vm->vm_dsize);
94	if (new > base) {
95		/*
96		 * Check the resource limit, but allow a process to reduce
97		 * its usage, even if it remains over the limit.
98		 */
99		if (new - base > datalim && new > old) {
100			error = ENOMEM;
101			goto done;
102		}
103		if (new > vm_map_max(map)) {
104			error = ENOMEM;
105			goto done;
106		}
107	} else if (new < base) {
108		/*
109		 * This is simply an invalid value.  If someone wants to
110		 * do fancy address space manipulations, mmap and munmap
111		 * can do most of what the user would want.
112		 */
113		error = EINVAL;
114		goto done;
115	}
116	if (new > old) {
117		if (!old_mlock && map->flags & MAP_WIREFUTURE) {
118			if (ptoa(pmap_wired_count(map->pmap)) +
119			    (new - old) > lmemlim) {
120				error = ENOMEM;
121				goto done;
122			}
123		}
124		if (map->size + (new - old) > vmemlim) {
125			error = ENOMEM;
126			goto done;
127		}
128#ifdef RACCT
129		if (racct_enable) {
130			PROC_LOCK(td->td_proc);
131			error = racct_set(td->td_proc, RACCT_DATA, new - base);
132			if (error != 0) {
133				PROC_UNLOCK(td->td_proc);
134				error = ENOMEM;
135				goto done;
136			}
137			error = racct_set(td->td_proc, RACCT_VMEM,
138			    map->size + (new - old));
139			if (error != 0) {
140				racct_set_force(td->td_proc, RACCT_DATA,
141				    old - base);
142				PROC_UNLOCK(td->td_proc);
143				error = ENOMEM;
144				goto done;
145			}
146			if (!old_mlock && map->flags & MAP_WIREFUTURE) {
147				error = racct_set(td->td_proc, RACCT_MEMLOCK,
148				    ptoa(pmap_wired_count(map->pmap)) +
149				    (new - old));
150				if (error != 0) {
151					racct_set_force(td->td_proc, RACCT_DATA,
152					    old - base);
153					racct_set_force(td->td_proc, RACCT_VMEM,
154					    map->size);
155					PROC_UNLOCK(td->td_proc);
156					error = ENOMEM;
157					goto done;
158				}
159			}
160			PROC_UNLOCK(td->td_proc);
161		}
162#endif
163		prot = VM_PROT_RW;
164#ifdef COMPAT_FREEBSD32
165#if defined(__amd64__)
166		if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32))
167			prot |= VM_PROT_EXECUTE;
168#endif
169#endif
170		rv = vm_map_insert(map, NULL, 0, old, new, prot, VM_PROT_ALL, 0);
171		if (rv != KERN_SUCCESS) {
172#ifdef RACCT
173			if (racct_enable) {
174				PROC_LOCK(td->td_proc);
175				racct_set_force(td->td_proc,
176				    RACCT_DATA, old - base);
177				racct_set_force(td->td_proc,
178				    RACCT_VMEM, map->size);
179				if (!old_mlock && map->flags & MAP_WIREFUTURE) {
180					racct_set_force(td->td_proc,
181					    RACCT_MEMLOCK,
182					    ptoa(pmap_wired_count(map->pmap)));
183				}
184				PROC_UNLOCK(td->td_proc);
185			}
186#endif
187			error = ENOMEM;
188			goto done;
189		}
190		vm->vm_dsize += btoc(new - old);
191		/*
192		 * Handle the MAP_WIREFUTURE case for legacy applications,
193		 * by marking the newly mapped range of pages as wired.
194		 * We are not required to perform a corresponding
195		 * vm_map_unwire() before vm_map_delete() below, as
196		 * it will forcibly unwire the pages in the range.
197		 *
198		 * XXX If the pages cannot be wired, no error is returned.
199		 */
200		if ((map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) {
201			if (bootverbose)
202				printf("obreak: MAP_WIREFUTURE set\n");
203			do_map_wirefuture = TRUE;
204		}
205	} else if (new < old) {
206		rv = vm_map_delete(map, new, old);
207		if (rv != KERN_SUCCESS) {
208			error = ENOMEM;
209			goto done;
210		}
211		vm->vm_dsize -= btoc(old - new);
212#ifdef RACCT
213		if (racct_enable) {
214			PROC_LOCK(td->td_proc);
215			racct_set_force(td->td_proc, RACCT_DATA, new - base);
216			racct_set_force(td->td_proc, RACCT_VMEM, map->size);
217			if (!old_mlock && map->flags & MAP_WIREFUTURE) {
218				racct_set_force(td->td_proc, RACCT_MEMLOCK,
219				    ptoa(pmap_wired_count(map->pmap)));
220			}
221			PROC_UNLOCK(td->td_proc);
222		}
223#endif
224	}
225done:
226	vm_map_unlock(map);
227
228	if (do_map_wirefuture)
229		(void) vm_map_wire(map, old, new,
230		    VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
231
232	return (error);
233}
234
235#ifndef _SYS_SYSPROTO_H_
236struct ovadvise_args {
237	int anom;
238};
239#endif
240
241/*
242 * MPSAFE
243 */
244/* ARGSUSED */
245int
246sys_ovadvise(struct thread *td, struct ovadvise_args *uap)
247{
248	/* START_GIANT_OPTIONAL */
249	/* END_GIANT_OPTIONAL */
250	return (EINVAL);
251}
252