Deleted Added
full compact
1/* $Id: sysv_shm.c,v 1.7 1995/08/28 09:18:48 julian Exp $ */
1/* $Id: sysv_shm.c,v 1.8 1995/08/30 00:33:02 bde Exp $ */
2/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Adam Glass and Charles
18 * Hannum.
19 * 4. The names of the authors may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/types.h>
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/shm.h>
38#include <sys/proc.h>
39#include <sys/uio.h>
40#include <sys/time.h>
41#include <sys/malloc.h>
42#include <sys/mman.h>
43#include <sys/systm.h>
44#include <sys/stat.h>
45
46#include <vm/vm.h>
47#include <vm/vm_map.h>
48#include <vm/vm_map.h>
49#include <vm/vm_kern.h>
50
51static void shminit __P((caddr_t));
51static void shminit __P((void *));
52SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL)
53
54int oshmctl();
55int shmat(), shmctl(), shmdt(), shmget();
56int (*shmcalls[])() = { shmat, oshmctl, shmdt, shmget, shmctl };
57
58#define SHMSEG_FREE 0x0200
59#define SHMSEG_REMOVED 0x0400
60#define SHMSEG_ALLOCATED 0x0800
61#define SHMSEG_WANTED 0x1000
62
63vm_map_t sysvshm_map;
64int shm_last_free, shm_nused, shm_committed;
65struct shmid_ds *shmsegs;
66
67struct shm_handle {
68 vm_offset_t kva;
69};
70
71struct shmmap_state {
72 vm_offset_t va;
73 int shmid;
74};
75
76static void shm_deallocate_segment __P((struct shmid_ds *));
77static int shm_find_segment_by_key __P((key_t));
78static struct shmid_ds *shm_find_segment_by_shmid __P((int));
79static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
80
81static int
82shm_find_segment_by_key(key)
83 key_t key;
84{
85 int i;
86
87 for (i = 0; i < shminfo.shmmni; i++)
88 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
89 shmsegs[i].shm_perm.key == key)
90 return i;
91 return -1;
92}
93
94static struct shmid_ds *
95shm_find_segment_by_shmid(shmid)
96 int shmid;
97{
98 int segnum;
99 struct shmid_ds *shmseg;
100
101 segnum = IPCID_TO_IX(shmid);
102 if (segnum < 0 || segnum >= shminfo.shmmni)
103 return NULL;
104 shmseg = &shmsegs[segnum];
105 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
106 != SHMSEG_ALLOCATED ||
107 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
108 return NULL;
109 return shmseg;
110}
111
112static void
113shm_deallocate_segment(shmseg)
114 struct shmid_ds *shmseg;
115{
116 struct shm_handle *shm_handle;
117 size_t size;
118
119 shm_handle = shmseg->shm_internal;
120 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
121 (void) vm_map_remove(sysvshm_map, shm_handle->kva, shm_handle->kva + size);
122 free((caddr_t)shm_handle, M_SHM);
123 shmseg->shm_internal = NULL;
124 shm_committed -= btoc(size);
125 shm_nused--;
126 shmseg->shm_perm.mode = SHMSEG_FREE;
127}
128
129static int
130shm_delete_mapping(p, shmmap_s)
131 struct proc *p;
132 struct shmmap_state *shmmap_s;
133{
134 struct shmid_ds *shmseg;
135 int segnum, result;
136 size_t size;
137
138 segnum = IPCID_TO_IX(shmmap_s->shmid);
139 shmseg = &shmsegs[segnum];
140 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
141 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
142 if (result != KERN_SUCCESS)
143 return EINVAL;
144 shmmap_s->shmid = -1;
145 shmseg->shm_dtime = time.tv_sec;
146 if ((--shmseg->shm_nattch <= 0) &&
147 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
148 shm_deallocate_segment(shmseg);
149 shm_last_free = segnum;
150 }
151 return 0;
152}
153
154struct shmdt_args {
155 void *shmaddr;
156};
157int
158shmdt(p, uap, retval)
159 struct proc *p;
160 struct shmdt_args *uap;
161 int *retval;
162{
163 struct shmmap_state *shmmap_s;
164 int i;
165
166 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
167 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
168 if (shmmap_s->shmid != -1 &&
169 shmmap_s->va == (vm_offset_t)uap->shmaddr)
170 break;
171 if (i == shminfo.shmseg)
172 return EINVAL;
173 return shm_delete_mapping(p, shmmap_s);
174}
175
176struct shmat_args {
177 int shmid;
178 void *shmaddr;
179 int shmflg;
180};
181int
182shmat(p, uap, retval)
183 struct proc *p;
184 struct shmat_args *uap;
185 int *retval;
186{
187 int error, i, flags;
188 struct ucred *cred = p->p_ucred;
189 struct shmid_ds *shmseg;
190 struct shmmap_state *shmmap_s = NULL;
191 vm_offset_t attach_va;
192 vm_prot_t prot;
193 vm_size_t size;
194
195 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
196 if (shmmap_s == NULL) {
197 size = shminfo.shmseg * sizeof(struct shmmap_state);
198 shmmap_s = malloc(size, M_SHM, M_WAITOK);
199 for (i = 0; i < shminfo.shmseg; i++)
200 shmmap_s[i].shmid = -1;
201 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
202 }
203 shmseg = shm_find_segment_by_shmid(uap->shmid);
204 if (shmseg == NULL)
205 return EINVAL;
206 error = ipcperm(cred, &shmseg->shm_perm,
207 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
208 if (error)
209 return error;
210 for (i = 0; i < shminfo.shmseg; i++) {
211 if (shmmap_s->shmid == -1)
212 break;
213 shmmap_s++;
214 }
215 if (i >= shminfo.shmseg)
216 return EMFILE;
217 size = (shmseg->shm_segsz + CLOFSET) & ~CLOFSET;
218 prot = VM_PROT_READ;
219 if ((uap->shmflg & SHM_RDONLY) == 0)
220 prot |= VM_PROT_WRITE;
221 flags = MAP_ANON | MAP_SHARED;
222 if (uap->shmaddr) {
223 flags |= MAP_FIXED;
224 if (uap->shmflg & SHM_RND)
225 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
226 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
227 attach_va = (vm_offset_t)uap->shmaddr;
228 else
229 return EINVAL;
230 } else {
231 /* This is just a hint to vm_mmap() about where to put it. */
232 attach_va = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
233 }
234 error = vm_mmap(&p->p_vmspace->vm_map, &attach_va, size, prot,
235 VM_PROT_DEFAULT, flags, (caddr_t) uap->shmid, 0);
236 if (error)
237 return error;
238 shmmap_s->va = attach_va;
239 shmmap_s->shmid = uap->shmid;
240 shmseg->shm_lpid = p->p_pid;
241 shmseg->shm_atime = time.tv_sec;
242 shmseg->shm_nattch++;
243 *retval = attach_va;
244 return 0;
245}
246
247struct oshmid_ds {
248 struct ipc_perm shm_perm; /* operation perms */
249 int shm_segsz; /* size of segment (bytes) */
250 ushort shm_cpid; /* pid, creator */
251 ushort shm_lpid; /* pid, last operation */
252 short shm_nattch; /* no. of current attaches */
253 time_t shm_atime; /* last attach time */
254 time_t shm_dtime; /* last detach time */
255 time_t shm_ctime; /* last change time */
256 void *shm_handle; /* internal handle for shm segment */
257};
258
259struct oshmctl_args {
260 int shmid;
261 int cmd;
262 struct oshmid_ds *ubuf;
263};
264
265int
266oshmctl(p, uap, retval)
267 struct proc *p;
268 struct oshmctl_args *uap;
269 int *retval;
270{
271#ifdef COMPAT_43
272 int error;
273 struct ucred *cred = p->p_ucred;
274 struct shmid_ds *shmseg;
275 struct oshmid_ds outbuf;
276
277 shmseg = shm_find_segment_by_shmid(uap->shmid);
278 if (shmseg == NULL)
279 return EINVAL;
280 switch (uap->cmd) {
281 case IPC_STAT:
282 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
283 if (error)
284 return error;
285 outbuf.shm_perm = shmseg->shm_perm;
286 outbuf.shm_segsz = shmseg->shm_segsz;
287 outbuf.shm_cpid = shmseg->shm_cpid;
288 outbuf.shm_lpid = shmseg->shm_lpid;
289 outbuf.shm_nattch = shmseg->shm_nattch;
290 outbuf.shm_atime = shmseg->shm_atime;
291 outbuf.shm_dtime = shmseg->shm_dtime;
292 outbuf.shm_ctime = shmseg->shm_ctime;
293 outbuf.shm_handle = shmseg->shm_internal;
294 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
295 if (error)
296 return error;
297 break;
298 default:
299 return shmctl(p, uap, retval);
300 }
301 return 0;
302#else
303 return EINVAL;
304#endif
305}
306
307struct shmctl_args {
308 int shmid;
309 int cmd;
310 struct shmid_ds *ubuf;
311};
312int
313shmctl(p, uap, retval)
314 struct proc *p;
315 struct shmctl_args *uap;
316 int *retval;
317{
318 int error;
319 struct ucred *cred = p->p_ucred;
320 struct shmid_ds inbuf;
321 struct shmid_ds *shmseg;
322
323 shmseg = shm_find_segment_by_shmid(uap->shmid);
324 if (shmseg == NULL)
325 return EINVAL;
326 switch (uap->cmd) {
327 case IPC_STAT:
328 error = ipcperm(cred, &shmseg->shm_perm, IPC_R);
329 if (error)
330 return error;
331 error = copyout((caddr_t)shmseg, uap->ubuf, sizeof(inbuf));
332 if (error)
333 return error;
334 break;
335 case IPC_SET:
336 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
337 if (error)
338 return error;
339 error = copyin(uap->ubuf, (caddr_t)&inbuf, sizeof(inbuf));
340 if (error)
341 return error;
342 shmseg->shm_perm.uid = inbuf.shm_perm.uid;
343 shmseg->shm_perm.gid = inbuf.shm_perm.gid;
344 shmseg->shm_perm.mode =
345 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
346 (inbuf.shm_perm.mode & ACCESSPERMS);
347 shmseg->shm_ctime = time.tv_sec;
348 break;
349 case IPC_RMID:
350 error = ipcperm(cred, &shmseg->shm_perm, IPC_M);
351 if (error)
352 return error;
353 shmseg->shm_perm.key = IPC_PRIVATE;
354 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
355 if (shmseg->shm_nattch <= 0) {
356 shm_deallocate_segment(shmseg);
357 shm_last_free = IPCID_TO_IX(uap->shmid);
358 }
359 break;
360#if 0
361 case SHM_LOCK:
362 case SHM_UNLOCK:
363#endif
364 default:
365 return EINVAL;
366 }
367 return 0;
368}
369
370struct shmget_args {
371 key_t key;
372 size_t size;
373 int shmflg;
374};
375static int
376shmget_existing(p, uap, mode, segnum, retval)
377 struct proc *p;
378 struct shmget_args *uap;
379 int mode;
380 int segnum;
381 int *retval;
382{
383 struct shmid_ds *shmseg;
384 struct ucred *cred = p->p_ucred;
385 int error;
386
387 shmseg = &shmsegs[segnum];
388 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
389 /*
390 * This segment is in the process of being allocated. Wait
391 * until it's done, and look the key up again (in case the
392 * allocation failed or it was freed).
393 */
394 shmseg->shm_perm.mode |= SHMSEG_WANTED;
395 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
396 if (error)
397 return error;
398 return EAGAIN;
399 }
400 error = ipcperm(cred, &shmseg->shm_perm, mode);
401 if (error)
402 return error;
403 if (uap->size && uap->size > shmseg->shm_segsz)
404 return EINVAL;
405 if (uap->shmflg & (IPC_CREAT | IPC_EXCL) == (IPC_CREAT | IPC_EXCL))
406 return EEXIST;
407 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
408 return 0;
409}
410
411static int
412shmget_allocate_segment(p, uap, mode, retval)
413 struct proc *p;
414 struct shmget_args *uap;
415 int mode;
416 int *retval;
417{
418 int i, segnum, result, shmid, size;
419 struct ucred *cred = p->p_ucred;
420 struct shmid_ds *shmseg;
421 struct shm_handle *shm_handle;
422
423 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
424 return EINVAL;
425 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
426 return ENOSPC;
427 size = (uap->size + CLOFSET) & ~CLOFSET;
428 if (shm_committed + btoc(size) > shminfo.shmall)
429 return ENOMEM;
430 if (shm_last_free < 0) {
431 for (i = 0; i < shminfo.shmmni; i++)
432 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
433 break;
434 if (i == shminfo.shmmni)
435 panic("shmseg free count inconsistent");
436 segnum = i;
437 } else {
438 segnum = shm_last_free;
439 shm_last_free = -1;
440 }
441 shmseg = &shmsegs[segnum];
442 /*
443 * In case we sleep in malloc(), mark the segment present but deleted
444 * so that noone else tries to create the same key.
445 */
446 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
447 shmseg->shm_perm.key = uap->key;
448 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
449 shm_handle = (struct shm_handle *)
450 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
451 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
452 result = vm_mmap(sysvshm_map, &shm_handle->kva, size, VM_PROT_ALL,
453 VM_PROT_DEFAULT, MAP_ANON, (caddr_t) shmid, 0);
454 if (result != KERN_SUCCESS) {
455 shmseg->shm_perm.mode = SHMSEG_FREE;
456 shm_last_free = segnum;
457 free((caddr_t)shm_handle, M_SHM);
458 /* Just in case. */
459 wakeup((caddr_t)shmseg);
460 return ENOMEM;
461 }
462 shmseg->shm_internal = shm_handle;
463 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
464 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
465 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
466 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
467 shmseg->shm_segsz = uap->size;
468 shmseg->shm_cpid = p->p_pid;
469 shmseg->shm_lpid = shmseg->shm_nattch = 0;
470 shmseg->shm_atime = shmseg->shm_dtime = 0;
471 shmseg->shm_ctime = time.tv_sec;
472 shm_committed += btoc(size);
473 shm_nused++;
474 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
475 /*
476 * Somebody else wanted this key while we were asleep. Wake
477 * them up now.
478 */
479 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
480 wakeup((caddr_t)shmseg);
481 }
482 *retval = shmid;
483 return 0;
484}
485
486int
487shmget(p, uap, retval)
488 struct proc *p;
489 struct shmget_args *uap;
490 int *retval;
491{
492 int segnum, mode, error;
493
494 mode = uap->shmflg & ACCESSPERMS;
495 if (uap->key != IPC_PRIVATE) {
496 again:
497 segnum = shm_find_segment_by_key(uap->key);
498 if (segnum >= 0) {
499 error = shmget_existing(p, uap, mode, segnum, retval);
500 if (error == EAGAIN)
501 goto again;
502 return error;
503 }
504 if ((uap->shmflg & IPC_CREAT) == 0)
505 return ENOENT;
506 }
507 return shmget_allocate_segment(p, uap, mode, retval);
508}
509
510struct shmsys_args {
511 u_int which;
512};
513int
514shmsys(p, uap, retval)
515 struct proc *p;
516 struct shmsys_args *uap;
517 int *retval;
518{
519
520 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
521 return EINVAL;
522 return ((*shmcalls[uap->which])(p, &uap[1], retval));
523}
524
525void
526shmfork(p1, p2, isvfork)
527 struct proc *p1, *p2;
528 int isvfork;
529{
530 struct shmmap_state *shmmap_s;
531 size_t size;
532 int i;
533
534 size = shminfo.shmseg * sizeof(struct shmmap_state);
535 shmmap_s = malloc(size, M_SHM, M_WAITOK);
536 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
537 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
538 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
539 if (shmmap_s->shmid != -1)
540 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
541}
542
543void
544shmexit(p)
545 struct proc *p;
546{
547 struct shmmap_state *shmmap_s;
548 int i;
549
550 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
551 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
552 if (shmmap_s->shmid != -1)
553 shm_delete_mapping(p, shmmap_s);
554 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
555 p->p_vmspace->vm_shm = NULL;
556}
557
558void
559shminit(udata)
560 caddr_t udata;
560 void *udata;
561{
562 int i;
563 vm_offset_t garbage1, garbage2;
564
565 /* actually this *should* be pageable. SHM_{LOCK,UNLOCK} */
566 sysvshm_map = kmem_suballoc(kernel_map, &garbage1, &garbage2,
567 shminfo.shmall * NBPG, TRUE);
568 for (i = 0; i < shminfo.shmmni; i++) {
569 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
570 shmsegs[i].shm_perm.seq = 0;
571 }
572 shm_last_free = 0;
573 shm_nused = 0;
574 shm_committed = 0;
575}