1/*- 2 * Copyright (c) 2004 Tim J. Robbins 3 * Copyright (c) 2002 Doug Rabson 4 * Copyright (c) 2000 Marcel Moolenaar 5 * Copyright (c) 1994-1995 S��ren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD$"); 36 37#include <sys/capsicum.h> 38#include <sys/file.h> 39#include <sys/imgact.h> 40#include <sys/ktr.h> 41#include <sys/lock.h> 42#include <sys/mman.h> 43#include <sys/proc.h> 44#include <sys/resourcevar.h> 45#include <sys/rwlock.h> 46#include <sys/syscallsubr.h> 47#include <sys/sysent.h> 48#include <sys/sysproto.h> 49 50#include <vm/pmap.h> 51#include <vm/vm_extern.h> 52#include <vm/vm_map.h> 53#include <vm/vm_object.h> 54 55#include <compat/linux/linux_emul.h> 56#include <compat/linux/linux_mmap.h> 57#include <compat/linux/linux_persona.h> 58#include <compat/linux/linux_util.h> 59 60 61#define STACK_SIZE (2 * 1024 * 1024) 62#define GUARD_SIZE (4 * PAGE_SIZE) 63 64#if defined(__amd64__) 65static void linux_fixup_prot(struct thread *td, int *prot); 66#endif 67 68static int 69linux_mmap_check_fp(struct file *fp, int flags, int prot, int maxprot) 70{ 71 72 /* Linux mmap() just fails for O_WRONLY files */ 73 if ((fp->f_flag & FREAD) == 0) 74 return (EACCES); 75 76 return (0); 77} 78 79int 80linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot, 81 int flags, int fd, off_t pos) 82{ 83 struct proc *p = td->td_proc; 84 struct vmspace *vms = td->td_proc->p_vmspace; 85 int bsd_flags, error; 86 struct file *fp; 87 88 LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx", 89 addr, len, prot, flags, fd, pos); 90 91 error = 0; 92 bsd_flags = 0; 93 fp = NULL; 94 95 /* 96 * Linux mmap(2): 97 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE 98 */ 99 if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE))) 100 return (EINVAL); 101 102 if (flags & LINUX_MAP_SHARED) 103 bsd_flags |= MAP_SHARED; 104 if (flags & LINUX_MAP_PRIVATE) 105 bsd_flags |= MAP_PRIVATE; 106 if (flags & LINUX_MAP_FIXED) 107 bsd_flags |= MAP_FIXED; 108 if (flags & LINUX_MAP_ANON) { 109 /* Enforce pos to be on page boundary, then ignore. */ 110 if ((pos & PAGE_MASK) != 0) 111 return (EINVAL); 112 pos = 0; 113 bsd_flags |= MAP_ANON; 114 } else 115 bsd_flags |= MAP_NOSYNC; 116 if (flags & LINUX_MAP_GROWSDOWN) 117 bsd_flags |= MAP_STACK; 118 119#if defined(__amd64__) 120 /* 121 * According to the Linux mmap(2) man page, "MAP_32BIT flag 122 * is ignored when MAP_FIXED is set." 123 */ 124 if ((flags & LINUX_MAP_32BIT) && (flags & LINUX_MAP_FIXED) == 0) 125 bsd_flags |= MAP_32BIT; 126 127 /* 128 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC 129 * on Linux/i386 if the binary requires executable stack. 130 * We do this only for IA32 emulation as on native i386 this is does not 131 * make sense without PAE. 132 * 133 * XXX. Linux checks that the file system is not mounted with noexec. 134 */ 135 linux_fixup_prot(td, &prot); 136#endif 137 138 /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ 139 fd = (bsd_flags & MAP_ANON) ? -1 : fd; 140 if (flags & LINUX_MAP_GROWSDOWN) { 141 /* 142 * The Linux MAP_GROWSDOWN option does not limit auto 143 * growth of the region. Linux mmap with this option 144 * takes as addr the initial BOS, and as len, the initial 145 * region size. It can then grow down from addr without 146 * limit. However, Linux threads has an implicit internal 147 * limit to stack size of STACK_SIZE. Its just not 148 * enforced explicitly in Linux. But, here we impose 149 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 150 * region, since we can do this with our mmap. 151 * 152 * Our mmap with MAP_STACK takes addr as the maximum 153 * downsize limit on BOS, and as len the max size of 154 * the region. It then maps the top SGROWSIZ bytes, 155 * and auto grows the region down, up to the limit 156 * in addr. 157 * 158 * If we don't use the MAP_STACK option, the effect 159 * of this code is to allocate a stack region of a 160 * fixed size of (STACK_SIZE - GUARD_SIZE). 161 */ 162 163 if ((caddr_t)addr + len > vms->vm_maxsaddr) { 164 /* 165 * Some Linux apps will attempt to mmap 166 * thread stacks near the top of their 167 * address space. If their TOS is greater 168 * than vm_maxsaddr, vm_map_growstack() 169 * will confuse the thread stack with the 170 * process stack and deliver a SEGV if they 171 * attempt to grow the thread stack past their 172 * current stacksize rlimit. To avoid this, 173 * adjust vm_maxsaddr upwards to reflect 174 * the current stacksize rlimit rather 175 * than the maximum possible stacksize. 176 * It would be better to adjust the 177 * mmap'ed region, but some apps do not check 178 * mmap's return value. 179 */ 180 PROC_LOCK(p); 181 vms->vm_maxsaddr = (char *)p->p_sysent->sv_usrstack - 182 lim_cur_proc(p, RLIMIT_STACK); 183 PROC_UNLOCK(p); 184 } 185 186 /* 187 * This gives us our maximum stack size and a new BOS. 188 * If we're using VM_STACK, then mmap will just map 189 * the top SGROWSIZ bytes, and let the stack grow down 190 * to the limit at BOS. If we're not using VM_STACK 191 * we map the full stack, since we don't have a way 192 * to autogrow it. 193 */ 194 if (len <= STACK_SIZE - GUARD_SIZE) { 195 addr = addr - (STACK_SIZE - GUARD_SIZE - len); 196 len = STACK_SIZE - GUARD_SIZE; 197 } 198 } 199 200 /* 201 * FreeBSD is free to ignore the address hint if MAP_FIXED wasn't 202 * passed. However, some Linux applications, like the ART runtime, 203 * depend on the hint. If the MAP_FIXED wasn't passed, but the 204 * address is not zero, try with MAP_FIXED and MAP_EXCL first, 205 * and fall back to the normal behaviour if that fails. 206 */ 207 if (addr != 0 && (bsd_flags & MAP_FIXED) == 0 && 208 (bsd_flags & MAP_EXCL) == 0) { 209 error = kern_mmap_fpcheck(td, addr, len, prot, 210 bsd_flags | MAP_FIXED | MAP_EXCL, fd, pos, 211 linux_mmap_check_fp); 212 if (error == 0) 213 goto out; 214 } 215 216 error = kern_mmap_fpcheck(td, addr, len, prot, bsd_flags, fd, pos, 217 linux_mmap_check_fp); 218out: 219 LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]); 220 221 return (error); 222} 223 224int 225linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot) 226{ 227 228 /* XXX Ignore PROT_GROWSDOWN and PROT_GROWSUP for now. */ 229 prot &= ~(LINUX_PROT_GROWSDOWN | LINUX_PROT_GROWSUP); 230 if ((prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) 231 return (EINVAL); 232 233#if defined(__amd64__) 234 linux_fixup_prot(td, &prot); 235#endif 236 return (kern_mprotect(td, addr, len, prot)); 237} 238 239/* 240 * Implement Linux madvise(MADV_DONTNEED), which has unusual semantics: for 241 * anonymous memory, pages in the range are immediately discarded. 242 */ 243static int 244linux_madvise_dontneed(struct thread *td, vm_offset_t start, vm_offset_t end) 245{ 246 vm_map_t map; 247 vm_map_entry_t entry; 248 vm_object_t backing_object, object; 249 vm_offset_t estart, eend; 250 vm_pindex_t pstart, pend; 251 int error; 252 253 map = &td->td_proc->p_vmspace->vm_map; 254 255 if (!vm_map_range_valid(map, start, end)) 256 return (EINVAL); 257 start = trunc_page(start); 258 end = round_page(end); 259 260 error = 0; 261 vm_map_lock_read(map); 262 if (!vm_map_lookup_entry(map, start, &entry)) 263 entry = entry->next; 264 for (; entry->start < end; entry = entry->next) { 265 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 266 continue; 267 268 if (entry->wired_count != 0) { 269 error = EINVAL; 270 break; 271 } 272 273 object = entry->object.vm_object; 274 if (object == NULL) 275 continue; 276 if ((object->flags & (OBJ_UNMANAGED | OBJ_FICTITIOUS)) != 0) 277 continue; 278 279 pstart = OFF_TO_IDX(entry->offset); 280 if (start > entry->start) { 281 pstart += atop(start - entry->start); 282 estart = start; 283 } else { 284 estart = entry->start; 285 } 286 pend = OFF_TO_IDX(entry->offset) + 287 atop(entry->end - entry->start); 288 if (entry->end > end) { 289 pend -= atop(entry->end - end); 290 eend = end; 291 } else { 292 eend = entry->end; 293 } 294 295 if ((object->type == OBJT_DEFAULT || 296 object->type == OBJT_SWAP) && object->handle == NULL && 297 (object->flags & (OBJ_ONEMAPPING | OBJ_NOSPLIT)) == 298 OBJ_ONEMAPPING) { 299 /* 300 * Singly-mapped anonymous memory is discarded. This 301 * does not match Linux's semantics when the object 302 * belongs to a shadow chain of length > 1, since 303 * subsequent faults may retrieve pages from an 304 * intermediate anonymous object. However, handling 305 * this case correctly introduces a fair bit of 306 * complexity. 307 */ 308 VM_OBJECT_WLOCK(object); 309 if ((object->flags & OBJ_ONEMAPPING) != 0) { 310 vm_object_collapse(object); 311 vm_object_page_remove(object, pstart, pend, 0); 312 backing_object = object->backing_object; 313 if (backing_object != NULL && 314 (backing_object->type == OBJT_DEFAULT || 315 backing_object->type == OBJT_SWAP) && 316 backing_object->handle == NULL && 317 (backing_object->flags & OBJ_NOSPLIT) == 0) 318 linux_msg(td, 319 "possibly incorrect MADV_DONTNEED"); 320 VM_OBJECT_WUNLOCK(object); 321 continue; 322 } 323 VM_OBJECT_WUNLOCK(object); 324 } 325 326 /* 327 * Handle shared mappings. Remove them outright instead of 328 * calling pmap_advise(), for consistency with Linux. 329 */ 330 pmap_remove(map->pmap, estart, eend); 331 vm_object_madvise(object, pstart, pend, MADV_DONTNEED); 332 } 333 vm_map_unlock_read(map); 334 335 return (error); 336} 337 338int 339linux_madvise_common(struct thread *td, uintptr_t addr, size_t len, int behav) 340{ 341 342 switch (behav) { 343 case LINUX_MADV_NORMAL: 344 return (kern_madvise(td, addr, len, MADV_NORMAL)); 345 case LINUX_MADV_RANDOM: 346 return (kern_madvise(td, addr, len, MADV_RANDOM)); 347 case LINUX_MADV_SEQUENTIAL: 348 return (kern_madvise(td, addr, len, MADV_SEQUENTIAL)); 349 case LINUX_MADV_WILLNEED: 350 return (kern_madvise(td, addr, len, MADV_WILLNEED)); 351 case LINUX_MADV_DONTNEED: 352 return (linux_madvise_dontneed(td, addr, addr + len)); 353 case LINUX_MADV_FREE: 354 return (kern_madvise(td, addr, len, MADV_FREE)); 355 case LINUX_MADV_REMOVE: 356 linux_msg(curthread, "unsupported madvise MADV_REMOVE"); 357 return (EINVAL); 358 case LINUX_MADV_DONTFORK: 359 return (kern_minherit(td, addr, len, INHERIT_NONE)); 360 case LINUX_MADV_DOFORK: 361 return (kern_minherit(td, addr, len, INHERIT_COPY)); 362 case LINUX_MADV_MERGEABLE: 363 linux_msg(curthread, "unsupported madvise MADV_MERGEABLE"); 364 return (EINVAL); 365 case LINUX_MADV_UNMERGEABLE: 366 /* We don't merge anyway. */ 367 return (0); 368 case LINUX_MADV_HUGEPAGE: 369 /* Ignored; on FreeBSD huge pages are always on. */ 370 return (0); 371 case LINUX_MADV_NOHUGEPAGE: 372#if 0 373 /* 374 * Don't warn - Firefox uses it a lot, and in real Linux it's 375 * an optional feature. 376 */ 377 linux_msg(curthread, "unsupported madvise MADV_NOHUGEPAGE"); 378#endif 379 return (EINVAL); 380 case LINUX_MADV_DONTDUMP: 381 return (kern_madvise(td, addr, len, MADV_NOCORE)); 382 case LINUX_MADV_DODUMP: 383 return (kern_madvise(td, addr, len, MADV_CORE)); 384 case LINUX_MADV_WIPEONFORK: 385 return (kern_minherit(td, addr, len, INHERIT_ZERO)); 386 case LINUX_MADV_KEEPONFORK: 387 return (kern_minherit(td, addr, len, INHERIT_COPY)); 388 case LINUX_MADV_HWPOISON: 389 linux_msg(curthread, "unsupported madvise MADV_HWPOISON"); 390 return (EINVAL); 391 case LINUX_MADV_SOFT_OFFLINE: 392 linux_msg(curthread, "unsupported madvise MADV_SOFT_OFFLINE"); 393 return (EINVAL); 394 default: 395 linux_msg(curthread, "unsupported madvise behav %d", behav); 396 return (EINVAL); 397 } 398} 399 400#if defined(__amd64__) 401static void 402linux_fixup_prot(struct thread *td, int *prot) 403{ 404 struct linux_pemuldata *pem; 405 406 if (SV_PROC_FLAG(td->td_proc, SV_ILP32) && *prot & PROT_READ) { 407 pem = pem_find(td->td_proc); 408 if (pem->persona & LINUX_READ_IMPLIES_EXEC) 409 *prot |= PROT_EXEC; 410 } 411 412} 413#endif 414