sysv_shm.c revision 76908
1/* $FreeBSD: head/sys/kern/sysv_shm.c 76908 2001-05-20 20:37:47Z alfred $ */ 2/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 3 4/* 5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Adam Glass and Charles 18 * Hannum. 19 * 4. The names of the authors may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include "opt_compat.h" 35#include "opt_rlimit.h" 36#include "opt_sysvipc.h" 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/sysctl.h> 43#include <sys/shm.h> 44#include <sys/proc.h> 45#include <sys/malloc.h> 46#include <sys/mutex.h> 47#include <sys/mman.h> 48#include <sys/stat.h> 49#include <sys/syscall.h> 50#include <sys/sysent.h> 51#include <sys/sysproto.h> 52#include <sys/jail.h> 53 54#include <vm/vm.h> 55#include <vm/vm_param.h> 56#include <vm/pmap.h> 57#include <vm/vm_object.h> 58#include <vm/vm_map.h> 59#include <vm/vm_page.h> 60#include <vm/vm_pager.h> 61 62static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 63 64struct oshmctl_args; 65static int oshmctl __P((struct proc *p, struct oshmctl_args *uap)); 66 67static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode)); 68static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum)); 69 70/* XXX casting to (sy_call_t *) is bogus, as usual. */ 71static sy_call_t *shmcalls[] = { 72 (sy_call_t *)shmat, (sy_call_t *)oshmctl, 73 (sy_call_t *)shmdt, (sy_call_t *)shmget, 74 (sy_call_t *)shmctl 75}; 76 77#define SHMSEG_FREE 0x0200 78#define SHMSEG_REMOVED 0x0400 79#define SHMSEG_ALLOCATED 0x0800 80#define SHMSEG_WANTED 0x1000 81 82static int shm_last_free, shm_nused, shm_committed, shmalloced; 83static struct shmid_ds *shmsegs; 84 85struct shm_handle { 86 /* vm_offset_t kva; */ 87 vm_object_t shm_object; 88}; 89 90struct shmmap_state { 91 vm_offset_t va; 92 int shmid; 93}; 94 95static void shm_deallocate_segment __P((struct shmid_ds *)); 96static int shm_find_segment_by_key __P((key_t)); 97static struct shmid_ds *shm_find_segment_by_shmid __P((int)); 98static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); 99static void shmrealloc __P((void)); 100static void shminit __P((void)); 101static int sysvshm_modload __P((struct module *, int, void *)); 102static int shmunload __P((void)); 103static void shmexit_myhook __P((struct proc *p)); 104static void shmfork_myhook __P((struct proc *p1, struct proc *p2)); 105 106/* 107 * Tuneable values 108 */ 109#ifndef SHMMAXPGS 110#define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */ 111#endif 112#ifndef SHMMAX 113#define SHMMAX (SHMMAXPGS*PAGE_SIZE) 114#endif 115#ifndef SHMMIN 116#define SHMMIN 1 117#endif 118#ifndef SHMMNI 119#define SHMMNI 192 120#endif 121#ifndef SHMSEG 122#define SHMSEG 128 123#endif 124#ifndef SHMALL 125#define SHMALL (SHMMAXPGS) 126#endif 127 128struct shminfo shminfo = { 129 SHMMAX, 130 SHMMIN, 131 SHMMNI, 132 SHMSEG, 133 SHMALL 134}; 135 136static int shm_use_phys; 137 138SYSCTL_DECL(_kern_ipc); 139SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 140SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 141SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 142SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RD, &shminfo.shmseg, 0, ""); 143SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 144SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, ""); 145 146static int 147shm_find_segment_by_key(key) 148 key_t key; 149{ 150 int i; 151 152 for (i = 0; i < shmalloced; i++) 153 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 154 shmsegs[i].shm_perm.key == key) 155 return i; 156 return -1; 157} 158 159static struct shmid_ds * 160shm_find_segment_by_shmid(shmid) 161 int shmid; 162{ 163 int segnum; 164 struct shmid_ds *shmseg; 165 166 segnum = IPCID_TO_IX(shmid); 167 if (segnum < 0 || segnum >= shmalloced) 168 return NULL; 169 shmseg = &shmsegs[segnum]; 170 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 171 != SHMSEG_ALLOCATED || 172 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 173 return NULL; 174 return shmseg; 175} 176 177static void 178shm_deallocate_segment(shmseg) 179 struct shmid_ds *shmseg; 180{ 181 struct shm_handle *shm_handle; 182 size_t size; 183 184 shm_handle = shmseg->shm_internal; 185 mtx_lock(&vm_mtx); 186 vm_object_deallocate(shm_handle->shm_object); 187 mtx_unlock(&vm_mtx); 188 free((caddr_t)shm_handle, M_SHM); 189 shmseg->shm_internal = NULL; 190 size = round_page(shmseg->shm_segsz); 191 shm_committed -= btoc(size); 192 shm_nused--; 193 shmseg->shm_perm.mode = SHMSEG_FREE; 194} 195 196static int 197shm_delete_mapping(p, shmmap_s) 198 struct proc *p; 199 struct shmmap_state *shmmap_s; 200{ 201 struct shmid_ds *shmseg; 202 int segnum, result; 203 size_t size; 204 205 segnum = IPCID_TO_IX(shmmap_s->shmid); 206 shmseg = &shmsegs[segnum]; 207 size = round_page(shmseg->shm_segsz); 208 mtx_lock(&vm_mtx); 209 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size); 210 mtx_unlock(&vm_mtx); 211 if (result != KERN_SUCCESS) 212 return EINVAL; 213 shmmap_s->shmid = -1; 214 shmseg->shm_dtime = time_second; 215 if ((--shmseg->shm_nattch <= 0) && 216 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 217 shm_deallocate_segment(shmseg); 218 shm_last_free = segnum; 219 } 220 return 0; 221} 222 223#ifndef _SYS_SYSPROTO_H_ 224struct shmdt_args { 225 void *shmaddr; 226}; 227#endif 228 229int 230shmdt(p, uap) 231 struct proc *p; 232 struct shmdt_args *uap; 233{ 234 struct shmmap_state *shmmap_s; 235 int i; 236 237 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) 238 return (ENOSYS); 239 240 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 241 if (shmmap_s == NULL) 242 return EINVAL; 243 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 244 if (shmmap_s->shmid != -1 && 245 shmmap_s->va == (vm_offset_t)uap->shmaddr) 246 break; 247 if (i == shminfo.shmseg) 248 return EINVAL; 249 return shm_delete_mapping(p, shmmap_s); 250} 251 252#ifndef _SYS_SYSPROTO_H_ 253struct shmat_args { 254 int shmid; 255 void *shmaddr; 256 int shmflg; 257}; 258#endif 259 260int 261shmat(p, uap) 262 struct proc *p; 263 struct shmat_args *uap; 264{ 265 int error, i, flags; 266 struct shmid_ds *shmseg; 267 struct shmmap_state *shmmap_s = NULL; 268 struct shm_handle *shm_handle; 269 vm_offset_t attach_va; 270 vm_prot_t prot; 271 vm_size_t size; 272 int rv; 273 274 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) 275 return (ENOSYS); 276 277 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 278 if (shmmap_s == NULL) { 279 size = shminfo.shmseg * sizeof(struct shmmap_state); 280 shmmap_s = malloc(size, M_SHM, M_WAITOK); 281 for (i = 0; i < shminfo.shmseg; i++) 282 shmmap_s[i].shmid = -1; 283 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 284 } 285 shmseg = shm_find_segment_by_shmid(uap->shmid); 286 if (shmseg == NULL) 287 return EINVAL; 288 error = ipcperm(p, &shmseg->shm_perm, 289 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 290 if (error) 291 return error; 292 for (i = 0; i < shminfo.shmseg; i++) { 293 if (shmmap_s->shmid == -1) 294 break; 295 shmmap_s++; 296 } 297 if (i >= shminfo.shmseg) 298 return EMFILE; 299 size = round_page(shmseg->shm_segsz); 300#ifdef VM_PROT_READ_IS_EXEC 301 prot = VM_PROT_READ | VM_PROT_EXECUTE; 302#else 303 prot = VM_PROT_READ; 304#endif 305 if ((uap->shmflg & SHM_RDONLY) == 0) 306 prot |= VM_PROT_WRITE; 307 flags = MAP_ANON | MAP_SHARED; 308 if (uap->shmaddr) { 309 flags |= MAP_FIXED; 310 if (uap->shmflg & SHM_RND) 311 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 312 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) 313 attach_va = (vm_offset_t)uap->shmaddr; 314 else 315 return EINVAL; 316 } else { 317 /* This is just a hint to vm_map_find() about where to put it. */ 318 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ); 319 } 320 321 shm_handle = shmseg->shm_internal; 322 mtx_lock(&vm_mtx); 323 vm_object_reference(shm_handle->shm_object); 324 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 325 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 326 if (rv != KERN_SUCCESS) { 327 mtx_unlock(&vm_mtx); 328 return ENOMEM; 329 } 330 vm_map_inherit(&p->p_vmspace->vm_map, 331 attach_va, attach_va + size, VM_INHERIT_SHARE); 332 mtx_unlock(&vm_mtx); 333 334 shmmap_s->va = attach_va; 335 shmmap_s->shmid = uap->shmid; 336 shmseg->shm_lpid = p->p_pid; 337 shmseg->shm_atime = time_second; 338 shmseg->shm_nattch++; 339 p->p_retval[0] = attach_va; 340 return 0; 341} 342 343struct oshmid_ds { 344 struct ipc_perm shm_perm; /* operation perms */ 345 int shm_segsz; /* size of segment (bytes) */ 346 ushort shm_cpid; /* pid, creator */ 347 ushort shm_lpid; /* pid, last operation */ 348 short shm_nattch; /* no. of current attaches */ 349 time_t shm_atime; /* last attach time */ 350 time_t shm_dtime; /* last detach time */ 351 time_t shm_ctime; /* last change time */ 352 void *shm_handle; /* internal handle for shm segment */ 353}; 354 355struct oshmctl_args { 356 int shmid; 357 int cmd; 358 struct oshmid_ds *ubuf; 359}; 360 361static int 362oshmctl(p, uap) 363 struct proc *p; 364 struct oshmctl_args *uap; 365{ 366#ifdef COMPAT_43 367 int error; 368 struct shmid_ds *shmseg; 369 struct oshmid_ds outbuf; 370 371 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) 372 return (ENOSYS); 373 374 shmseg = shm_find_segment_by_shmid(uap->shmid); 375 if (shmseg == NULL) 376 return EINVAL; 377 switch (uap->cmd) { 378 case IPC_STAT: 379 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 380 if (error) 381 return error; 382 outbuf.shm_perm = shmseg->shm_perm; 383 outbuf.shm_segsz = shmseg->shm_segsz; 384 outbuf.shm_cpid = shmseg->shm_cpid; 385 outbuf.shm_lpid = shmseg->shm_lpid; 386 outbuf.shm_nattch = shmseg->shm_nattch; 387 outbuf.shm_atime = shmseg->shm_atime; 388 outbuf.shm_dtime = shmseg->shm_dtime; 389 outbuf.shm_ctime = shmseg->shm_ctime; 390 outbuf.shm_handle = shmseg->shm_internal; 391 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 392 if (error) 393 return error; 394 break; 395 default: 396 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 397 return ((sy_call_t *)shmctl)(p, uap); 398 } 399 return 0; 400#else 401 return EINVAL; 402#endif 403} 404 405#ifndef _SYS_SYSPROTO_H_ 406struct shmctl_args { 407 int shmid; 408 int cmd; 409 struct shmid_ds *buf; 410}; 411#endif 412 413int 414shmctl(p, uap) 415 struct proc *p; 416 struct shmctl_args *uap; 417{ 418 int error; 419 struct shmid_ds inbuf; 420 struct shmid_ds *shmseg; 421 422 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) 423 return (ENOSYS); 424 425 shmseg = shm_find_segment_by_shmid(uap->shmid); 426 if (shmseg == NULL) 427 return EINVAL; 428 switch (uap->cmd) { 429 case IPC_STAT: 430 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 431 if (error) 432 return error; 433 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); 434 if (error) 435 return error; 436 break; 437 case IPC_SET: 438 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 439 if (error) 440 return error; 441 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); 442 if (error) 443 return error; 444 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 445 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 446 shmseg->shm_perm.mode = 447 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 448 (inbuf.shm_perm.mode & ACCESSPERMS); 449 shmseg->shm_ctime = time_second; 450 break; 451 case IPC_RMID: 452 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 453 if (error) 454 return error; 455 shmseg->shm_perm.key = IPC_PRIVATE; 456 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 457 if (shmseg->shm_nattch <= 0) { 458 shm_deallocate_segment(shmseg); 459 shm_last_free = IPCID_TO_IX(uap->shmid); 460 } 461 break; 462#if 0 463 case SHM_LOCK: 464 case SHM_UNLOCK: 465#endif 466 default: 467 return EINVAL; 468 } 469 return 0; 470} 471 472#ifndef _SYS_SYSPROTO_H_ 473struct shmget_args { 474 key_t key; 475 size_t size; 476 int shmflg; 477}; 478#endif 479 480static int 481shmget_existing(p, uap, mode, segnum) 482 struct proc *p; 483 struct shmget_args *uap; 484 int mode; 485 int segnum; 486{ 487 struct shmid_ds *shmseg; 488 int error; 489 490 shmseg = &shmsegs[segnum]; 491 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 492 /* 493 * This segment is in the process of being allocated. Wait 494 * until it's done, and look the key up again (in case the 495 * allocation failed or it was freed). 496 */ 497 shmseg->shm_perm.mode |= SHMSEG_WANTED; 498 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 499 if (error) 500 return error; 501 return EAGAIN; 502 } 503 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 504 return EEXIST; 505 error = ipcperm(p, &shmseg->shm_perm, mode); 506 if (error) 507 return error; 508 if (uap->size && uap->size > shmseg->shm_segsz) 509 return EINVAL; 510 p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 511 return 0; 512} 513 514static int 515shmget_allocate_segment(p, uap, mode) 516 struct proc *p; 517 struct shmget_args *uap; 518 int mode; 519{ 520 int i, segnum, shmid, size; 521 struct ucred *cred = p->p_ucred; 522 struct shmid_ds *shmseg; 523 struct shm_handle *shm_handle; 524 525 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 526 return EINVAL; 527 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 528 return ENOSPC; 529 size = round_page(uap->size); 530 if (shm_committed + btoc(size) > shminfo.shmall) 531 return ENOMEM; 532 if (shm_last_free < 0) { 533 shmrealloc(); /* maybe expand the shmsegs[] array */ 534 for (i = 0; i < shmalloced; i++) 535 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 536 break; 537 if (i == shmalloced) 538 return ENOSPC; 539 segnum = i; 540 } else { 541 segnum = shm_last_free; 542 shm_last_free = -1; 543 } 544 shmseg = &shmsegs[segnum]; 545 /* 546 * In case we sleep in malloc(), mark the segment present but deleted 547 * so that noone else tries to create the same key. 548 */ 549 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 550 shmseg->shm_perm.key = uap->key; 551 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 552 shm_handle = (struct shm_handle *) 553 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 554 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 555 556 /* 557 * We make sure that we have allocated a pager before we need 558 * to. 559 */ 560 mtx_lock(&vm_mtx); 561 if (shm_use_phys) { 562 shm_handle->shm_object = 563 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 564 } else { 565 shm_handle->shm_object = 566 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 567 } 568 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 569 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 570 mtx_unlock(&vm_mtx); 571 572 shmseg->shm_internal = shm_handle; 573 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 574 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 575 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 576 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 577 shmseg->shm_segsz = uap->size; 578 shmseg->shm_cpid = p->p_pid; 579 shmseg->shm_lpid = shmseg->shm_nattch = 0; 580 shmseg->shm_atime = shmseg->shm_dtime = 0; 581 shmseg->shm_ctime = time_second; 582 shm_committed += btoc(size); 583 shm_nused++; 584 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 585 /* 586 * Somebody else wanted this key while we were asleep. Wake 587 * them up now. 588 */ 589 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 590 wakeup((caddr_t)shmseg); 591 } 592 p->p_retval[0] = shmid; 593 return 0; 594} 595 596int 597shmget(p, uap) 598 struct proc *p; 599 struct shmget_args *uap; 600{ 601 int segnum, mode, error; 602 603 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) 604 return (ENOSYS); 605 606 mode = uap->shmflg & ACCESSPERMS; 607 if (uap->key != IPC_PRIVATE) { 608 again: 609 segnum = shm_find_segment_by_key(uap->key); 610 if (segnum >= 0) { 611 error = shmget_existing(p, uap, mode, segnum); 612 if (error == EAGAIN) 613 goto again; 614 return error; 615 } 616 if ((uap->shmflg & IPC_CREAT) == 0) 617 return ENOENT; 618 } 619 return shmget_allocate_segment(p, uap, mode); 620} 621 622int 623shmsys(p, uap) 624 struct proc *p; 625 /* XXX actually varargs. */ 626 struct shmsys_args /* { 627 u_int which; 628 int a2; 629 int a3; 630 int a4; 631 } */ *uap; 632{ 633 634 if (!jail_sysvipc_allowed && jailed(p->p_ucred)) 635 return (ENOSYS); 636 637 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 638 return EINVAL; 639 return ((*shmcalls[uap->which])(p, &uap->a2)); 640} 641 642static void 643shmfork_myhook(p1, p2) 644 struct proc *p1, *p2; 645{ 646 struct shmmap_state *shmmap_s; 647 size_t size; 648 int i; 649 650 size = shminfo.shmseg * sizeof(struct shmmap_state); 651 shmmap_s = malloc(size, M_SHM, M_WAITOK); 652 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 653 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 654 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 655 if (shmmap_s->shmid != -1) 656 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 657} 658 659static void 660shmexit_myhook(p) 661 struct proc *p; 662{ 663 struct shmmap_state *shmmap_s; 664 int i; 665 666 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 667 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 668 if (shmmap_s->shmid != -1) 669 shm_delete_mapping(p, shmmap_s); 670 free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 671 p->p_vmspace->vm_shm = NULL; 672} 673 674static void 675shmrealloc(void) 676{ 677 int i; 678 struct shmid_ds *newsegs; 679 680 if (shmalloced >= shminfo.shmmni) 681 return; 682 683 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 684 if (newsegs == NULL) 685 return; 686 for (i = 0; i < shmalloced; i++) 687 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 688 for (; i < shminfo.shmmni; i++) { 689 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 690 shmsegs[i].shm_perm.seq = 0; 691 } 692 free(shmsegs, M_SHM); 693 shmsegs = newsegs; 694 shmalloced = shminfo.shmmni; 695} 696 697static void 698shminit() 699{ 700 int i; 701 702 shmalloced = shminfo.shmmni; 703 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 704 if (shmsegs == NULL) 705 panic("cannot allocate initial memory for sysvshm"); 706 for (i = 0; i < shmalloced; i++) { 707 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 708 shmsegs[i].shm_perm.seq = 0; 709 } 710 shm_last_free = 0; 711 shm_nused = 0; 712 shm_committed = 0; 713 shmexit_hook = &shmexit_myhook; 714 shmfork_hook = &shmfork_myhook; 715} 716 717static int 718shmunload() 719{ 720 721 if (shm_nused > 0) 722 return (EBUSY); 723 724 free(shmsegs, M_SHM); 725 shmexit_hook = NULL; 726 shmfork_hook = NULL; 727 return (0); 728} 729 730static int 731sysvshm_modload(struct module *module, int cmd, void *arg) 732{ 733 int error = 0; 734 735 switch (cmd) { 736 case MOD_LOAD: 737 shminit(); 738 break; 739 case MOD_UNLOAD: 740 error = shmunload(); 741 break; 742 case MOD_SHUTDOWN: 743 break; 744 default: 745 error = EINVAL; 746 break; 747 } 748 return (error); 749} 750 751static moduledata_t sysvshm_mod = { 752 "sysvshm", 753 &sysvshm_modload, 754 NULL 755}; 756 757SYSCALL_MODULE_HELPER(shmsys, 4); 758SYSCALL_MODULE_HELPER(shmat, 3); 759SYSCALL_MODULE_HELPER(shmctl, 3); 760SYSCALL_MODULE_HELPER(shmdt, 1); 761SYSCALL_MODULE_HELPER(shmget, 3); 762 763DECLARE_MODULE(sysvshm, sysvshm_mod, 764 SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 765MODULE_VERSION(sysvshm, 1); 766