]> sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
* sysv_shm.cc (shmget): Allow to retrieve shared memory segments
[newlib-cygwin.git] / winsup / cygserver / sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
2 /*
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
16 * Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This file is heavily changed to become part of Cygwin's cygserver.
34 */
35
36 #ifdef __OUTSIDE_CYGWIN__
37 #include "woutsup.h"
38 #include <sys/cdefs.h>
39 #ifndef __FBSDID
40 #define __FBSDID(s) const char version[] = (s)
41 #endif
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
43
44 #define _KERNEL 1
45 #define __BSD_VISIBLE 1
46 #include <sys/param.h>
47 #include <sys/lock.h>
48 #include <sys/shm.h>
49 #include <malloc.h>
50 #include <sys/mman.h>
51 #include <sys/stat.h>
52 #include <sys/sysproto.h>
53
54 #include <errno.h>
55 #include <time.h>
56 #include <unistd.h>
57 #include "cygserver.h"
58 #include "process.h"
59 #include "cygserver_ipc.h"
60
61 #ifdef __CYGWIN__
62 #ifndef PAGE_SIZE
63 #define PAGE_SIZE (getpagesize ())
64 #endif
65 #ifndef PAGE_MASK
66 #define PAGE_MASK (PAGE_SIZE - 1)
67 #endif
68 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
69 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
70 #define ACCESSPERMS (0777)
71 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
72 #define KERN_SUCCESS 0
73 #define VM_PROT_READ PROT_READ
74 #define VM_PROT_WRITE PROT_WRITE
75 #define VM_INHERIT_SHARE 0
76 #define OBJT_PHYS 0
77 #define OBJT_SWAP 0
78 #define VM_PROT_DEFAULT 0
79 #define VM_OBJECT_LOCK(a)
80 #define vm_object_clear_flag(a,b)
81 #define vm_object_set_flag(a,b)
82 #define VM_OBJECT_UNLOCK(a)
83 #define vm_object_reference(a)
84 #define vm_map_remove(a,b,c) KERN_SUCCESS
85 #define vm_map_find(a,b,c,d,e,f,g,h,i) KERN_SUCCESS
86 #define vm_map_inherit(a,b,c,d)
87 typedef int vm_prot_t;
88 #endif /* __CYGWIN__ */
89
90 #ifndef __CYGWIN__
91 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
92
93 struct oshmctl_args;
94 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
95 #endif /* __CYGWIN__ */
96
97 static int shmget_allocate_segment(struct thread *td,
98 struct shmget_args *uap, int mode);
99 static int shmget_existing(struct thread *td, struct shmget_args *uap,
100 int mode, int segnum);
101
102 #ifndef __CYGWIN__
103 /* XXX casting to (sy_call_t *) is bogus, as usual. */
104 static sy_call_t *shmcalls[] = {
105 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
106 (sy_call_t *)shmdt, (sy_call_t *)shmget,
107 (sy_call_t *)shmctl
108 };
109 #endif /* __CYGWIN__ */
110
111 #define SHMSEG_FREE 0x0200
112 #define SHMSEG_REMOVED 0x0400
113 #define SHMSEG_ALLOCATED 0x0800
114 #define SHMSEG_WANTED 0x1000
115
116 static int shm_last_free, shm_nused, shm_committed, shmalloced, shm_nattch;
117 static struct shmid_ds *shmsegs;
118
119 struct shm_handle {
120 /* vm_offset_t kva; */
121 vm_object_t shm_object;
122 };
123
124 struct shmmap_state {
125 vm_offset_t va;
126 int shmid;
127 };
128
129 static void shm_deallocate_segment(struct shmid_ds *);
130 static int shm_find_segment_by_key(key_t);
131 static struct shmid_ds *shm_find_segment_by_shmid(int);
132 static struct shmid_ds *shm_find_segment_by_shmidx(int);
133 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
134 static void shmrealloc(void);
135
136 /*
137 * Tuneable values.
138 */
139 #ifndef SHMMAXPGS
140 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
141 #endif
142 #ifndef SHMMAX
143 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
144 #endif
145 #ifndef SHMMIN
146 #define SHMMIN 1
147 #endif
148 #ifndef SHMMNI
149 #define SHMMNI 192
150 #endif
151 #ifndef SHMSEG
152 #define SHMSEG 128
153 #endif
154 #ifndef SHMALL
155 #define SHMALL (SHMMAXPGS)
156 #endif
157
158 struct shminfo shminfo = {
159 SHMMAX,
160 SHMMIN,
161 SHMMNI,
162 SHMSEG,
163 SHMALL
164 };
165
166 #ifndef __CYGWIN__
167 static int shm_use_phys;
168 #else
169 static long shm_use_phys;
170 static long shm_allow_removed;
171 #endif /* __CYGWIN__ */
172
173 #ifndef __CYGWIN__
174 struct shm_info shm_info;
175 #endif /* __CYGWIN__ */
176
177 #ifndef __CYGWIN__
178 SYSCTL_DECL(_kern_ipc);
179 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
180 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
181 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
182 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
183 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
184 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
185 &shm_use_phys, 0, "");
186 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
187 &shm_allow_removed, 0, "");
188 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
189 NULL, 0, sysctl_shmsegs, "", "");
190 #endif /* __CYGWIN__ */
191
192 static int
193 shm_find_segment_by_key(key_t key)
194 {
195 int i;
196
197 for (i = 0; i < shmalloced; i++)
198 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
199 shmsegs[i].shm_perm.key == key)
200 return (i);
201 return (-1);
202 }
203
204 static struct shmid_ds *
205 shm_find_segment_by_shmid(int shmid)
206 {
207 int segnum;
208 struct shmid_ds *shmseg;
209
210 segnum = IPCID_TO_IX(shmid);
211 if (segnum < 0 || segnum >= shmalloced)
212 return (NULL);
213 shmseg = &shmsegs[segnum];
214 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
215 (!shm_allow_removed &&
216 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
217 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
218 return (NULL);
219 return (shmseg);
220 }
221
222 static struct shmid_ds *
223 shm_find_segment_by_shmidx(int segnum)
224 {
225 struct shmid_ds *shmseg;
226
227 if (segnum < 0 || segnum >= shmalloced)
228 return (NULL);
229 shmseg = &shmsegs[segnum];
230 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
231 (!shm_allow_removed &&
232 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
233 return (NULL);
234 return (shmseg);
235 }
236
237 static void
238 shm_deallocate_segment(struct shmid_ds *shmseg)
239 {
240 struct shm_handle *shm_handle;
241 size_t size;
242
243 GIANT_REQUIRED;
244
245 shm_handle = shmseg->shm_internal;
246 vm_object_deallocate(shm_handle->shm_object);
247 sys_free(shm_handle, M_SHM);
248 shmseg->shm_internal = NULL;
249 size = round_page(shmseg->shm_segsz);
250 shm_committed -= btoc(size);
251 shm_nused--;
252 shmseg->shm_perm.mode = SHMSEG_FREE;
253 }
254
255 static int
256 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
257 {
258 struct shmid_ds *shmseg;
259 int segnum, result;
260 size_t size;
261
262 GIANT_REQUIRED;
263
264 segnum = IPCID_TO_IX(shmmap_s->shmid);
265 shmseg = &shmsegs[segnum];
266 size = round_page(shmseg->shm_segsz);
267 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
268 if (result != KERN_SUCCESS)
269 return (EINVAL);
270 shmmap_s->shmid = -1;
271 shmseg->shm_dtime = time (NULL);
272 --shm_nattch;
273 if ((--shmseg->shm_nattch <= 0) &&
274 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
275 shm_deallocate_segment(shmseg);
276 shm_last_free = segnum;
277 }
278 return (0);
279 }
280
281 #ifndef _SYS_SYSPROTO_H_
282 struct shmdt_args {
283 const void *shmaddr;
284 };
285 #endif
286
287 /*
288 * MPSAFE
289 */
290 int
291 shmdt(struct thread *td, struct shmdt_args *uap)
292 {
293 struct proc *p = td->td_proc;
294 struct shmmap_state *shmmap_s;
295 int i;
296 int error = 0;
297
298 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
299 return (ENOSYS);
300 mtx_lock(&Giant);
301 shmmap_s = p->p_vmspace->vm_shm;
302 if (shmmap_s == NULL) {
303 error = EINVAL;
304 goto done2;
305 }
306 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
307 if (shmmap_s->shmid != -1 &&
308 shmmap_s->va == (vm_offset_t)uap->shmaddr) {
309 break;
310 }
311 }
312 if (i == shminfo.shmseg) {
313 error = EINVAL;
314 goto done2;
315 }
316 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
317 done2:
318 mtx_unlock(&Giant);
319 return (error);
320 }
321
322 #ifndef _SYS_SYSPROTO_H_
323 struct shmat_args {
324 int shmid;
325 const void *shmaddr;
326 int shmflg;
327 };
328 #endif
329
330 /*
331 * MPSAFE
332 */
333 int
334 kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg)
335 {
336 struct proc *p = td->td_proc;
337 int i, flags;
338 struct shmid_ds *shmseg;
339 struct shmmap_state *shmmap_s = NULL;
340 struct shm_handle *shm_handle;
341 vm_offset_t attach_va;
342 vm_prot_t prot;
343 vm_size_t size;
344 int rv;
345 int error = 0;
346
347 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
348 return (ENOSYS);
349 mtx_lock(&Giant);
350 shmmap_s = p->p_vmspace->vm_shm;
351 if (shmmap_s == NULL) {
352 size = shminfo.shmseg * sizeof(struct shmmap_state);
353 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
354 for (i = 0; i < shminfo.shmseg; i++)
355 shmmap_s[i].shmid = -1;
356 p->p_vmspace->vm_shm = shmmap_s;
357 }
358 shmseg = shm_find_segment_by_shmid(shmid);
359 if (shmseg == NULL) {
360 error = EINVAL;
361 goto done2;
362 }
363 error = ipcperm(td, &shmseg->shm_perm,
364 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
365 if (error)
366 goto done2;
367 for (i = 0; i < shminfo.shmseg; i++) {
368 if (shmmap_s->shmid == -1)
369 break;
370 shmmap_s++;
371 }
372 if (i >= shminfo.shmseg) {
373 error = EMFILE;
374 goto done2;
375 }
376 size = round_page(shmseg->shm_segsz);
377 #ifdef VM_PROT_READ_IS_EXEC
378 prot = VM_PROT_READ | VM_PROT_EXECUTE;
379 #else
380 prot = VM_PROT_READ;
381 #endif
382 if ((shmflg & SHM_RDONLY) == 0)
383 prot |= VM_PROT_WRITE;
384 flags = MAP_ANON | MAP_SHARED;
385 if (shmaddr) {
386 flags |= MAP_FIXED;
387 if (shmflg & SHM_RND) {
388 attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
389 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
390 attach_va = (vm_offset_t)shmaddr;
391 } else {
392 error = EINVAL;
393 goto done2;
394 }
395 } else {
396 /*
397 * This is just a hint to vm_map_find() about where to
398 * put it.
399 */
400 #ifdef __CYGWIN__
401 attach_va = 0;
402 #else
403 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
404 + maxtsiz + maxdsiz);
405 #endif
406 }
407
408 shm_handle = shmseg->shm_internal;
409 vm_object_reference(shm_handle->shm_object);
410 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
411 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
412 if (rv != KERN_SUCCESS) {
413 error = ENOMEM;
414 goto done2;
415 }
416 vm_map_inherit(&p->p_vmspace->vm_map,
417 attach_va, attach_va + size, VM_INHERIT_SHARE);
418
419 shmmap_s->va = attach_va;
420 shmmap_s->shmid = shmid;
421 shmseg->shm_lpid = p->p_pid;
422 shmseg->shm_atime = time (NULL);
423 shmseg->shm_nattch++;
424 shm_nattch++;
425 td->td_retval[0] = attach_va;
426 done2:
427 mtx_unlock(&Giant);
428 return (error);
429 }
430
431 int
432 shmat(struct thread *td, struct shmat_args *uap)
433 {
434 return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
435 }
436
437 #ifndef __CYGWIN__
438 struct oshmid_ds {
439 struct ipc_perm shm_perm; /* operation perms */
440 int shm_segsz; /* size of segment (bytes) */
441 u_short shm_cpid; /* pid, creator */
442 u_short shm_lpid; /* pid, last operation */
443 short shm_nattch; /* no. of current attaches */
444 time_t shm_atime; /* last attach time */
445 time_t shm_dtime; /* last detach time */
446 time_t shm_ctime; /* last change time */
447 void *shm_handle; /* internal handle for shm segment */
448 };
449
450 struct oshmctl_args {
451 int shmid;
452 int cmd;
453 struct oshmid_ds *ubuf;
454 };
455
456 /*
457 * MPSAFE
458 */
459 static int
460 oshmctl(struct thread *td, struct oshmctl_args *uap)
461 {
462 #ifdef COMPAT_43
463 int error = 0;
464 struct shmid_ds *shmseg;
465 struct oshmid_ds outbuf;
466
467 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
468 return (ENOSYS);
469 mtx_lock(&Giant);
470 shmseg = shm_find_segment_by_shmid(uap->shmid);
471 if (shmseg == NULL) {
472 error = EINVAL;
473 goto done2;
474 }
475 switch (uap->cmd) {
476 case IPC_STAT:
477 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
478 if (error)
479 goto done2;
480 outbuf.shm_perm = shmseg->shm_perm;
481 outbuf.shm_segsz = shmseg->shm_segsz;
482 outbuf.shm_cpid = shmseg->shm_cpid;
483 outbuf.shm_lpid = shmseg->shm_lpid;
484 outbuf.shm_nattch = shmseg->shm_nattch;
485 outbuf.shm_atime = shmseg->shm_atime;
486 outbuf.shm_dtime = shmseg->shm_dtime;
487 outbuf.shm_ctime = shmseg->shm_ctime;
488 outbuf.shm_handle = shmseg->shm_internal;
489 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
490 if (error)
491 goto done2;
492 break;
493 default:
494 /* XXX casting to (sy_call_t *) is bogus, as usual. */
495 error = ((sy_call_t *)shmctl)(td, uap);
496 break;
497 }
498 done2:
499 mtx_unlock(&Giant);
500 return (error);
501 #else
502 return (EINVAL);
503 #endif
504 }
505 #endif /* __CYGWIN__ */
506
507 #ifndef _SYS_SYSPROTO_H_
508 struct shmctl_args {
509 int shmid;
510 int cmd;
511 struct shmid_ds *buf;
512 };
513 #endif
514
515 /*
516 * MPSAFE
517 */
518 int
519 kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz)
520 {
521 int error = 0;
522 struct shmid_ds *shmseg;
523
524 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
525 return (ENOSYS);
526
527 mtx_lock(&Giant);
528 switch (cmd) {
529 case IPC_INFO:
530 memcpy(buf, &shminfo, sizeof(shminfo));
531 if (bufsz)
532 *bufsz = sizeof(shminfo);
533 td->td_retval[0] = shmalloced;
534 goto done2;
535 case SHM_INFO: {
536 struct shm_info shm_info;
537 shm_info.used_ids = shm_nused;
538 shm_info.shm_tot = shm_committed * PAGE_SIZE;
539 #ifdef __CYGWIN__
540 shm_info.shm_atts = shm_nattch;
541 #else
542 shm_info.shm_rss = 0; /*XXX where to get from ? */
543 shm_info.shm_swp = 0; /*XXX where to get from ? */
544 shm_info.swap_attempts = 0; /*XXX where to get from ? */
545 shm_info.swap_successes = 0; /*XXX where to get from ? */
546 #endif /* __CYGWIN__ */
547 memcpy(buf, &shm_info, sizeof(shm_info));
548 if (bufsz)
549 *bufsz = sizeof(shm_info);
550 td->td_retval[0] = shmalloced;
551 goto done2;
552 }
553 }
554 if (cmd == SHM_STAT)
555 shmseg = shm_find_segment_by_shmidx(shmid);
556 else
557 shmseg = shm_find_segment_by_shmid(shmid);
558 if (shmseg == NULL) {
559 error = EINVAL;
560 goto done2;
561 }
562 switch (cmd) {
563 case SHM_STAT:
564 case IPC_STAT:
565 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
566 if (error)
567 goto done2;
568 memcpy(buf, shmseg, sizeof(struct shmid_ds));
569 if (bufsz)
570 *bufsz = sizeof(struct shmid_ds);
571 if (cmd == SHM_STAT)
572 td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
573 break;
574 case IPC_SET: {
575 struct shmid_ds *shmid;
576
577 shmid = (struct shmid_ds *)buf;
578 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
579 if (error)
580 goto done2;
581 shmseg->shm_perm.uid = shmid->shm_perm.uid;
582 shmseg->shm_perm.gid = shmid->shm_perm.gid;
583 shmseg->shm_perm.mode =
584 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
585 (shmid->shm_perm.mode & ACCESSPERMS);
586 shmseg->shm_ctime = time (NULL);
587 break;
588 }
589 case IPC_RMID:
590 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
591 if (error)
592 goto done2;
593 shmseg->shm_perm.key = IPC_PRIVATE;
594 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
595 if (shmseg->shm_nattch <= 0) {
596 shm_deallocate_segment(shmseg);
597 shm_last_free = IPCID_TO_IX(shmid);
598 }
599 break;
600 #if 0
601 case SHM_LOCK:
602 case SHM_UNLOCK:
603 #endif
604 default:
605 error = EINVAL;
606 break;
607 }
608 done2:
609 mtx_unlock(&Giant);
610 return (error);
611 }
612
613 int
614 shmctl(struct thread *td, struct shmctl_args *uap)
615 {
616 int error = 0;
617 struct shmid_ds buf;
618 size_t bufsz;
619
620 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
621 if (uap->cmd == IPC_SET) {
622 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
623 goto done;
624 }
625 #ifdef __CYGWIN__
626 if (uap->cmd == IPC_INFO && uap->shmid > 0) {
627 /* Can't use the default kern_shmctl interface. */
628 int shmid = uap->shmid;
629 if (shmid > shminfo.shmmni)
630 shmid = shminfo.shmmni;
631 error = copyout(shmsegs, uap->buf,
632 shmid * sizeof(struct shmid_ds));
633 td->td_retval[0] = error ? -1 : 0;
634 return (error);
635 }
636 #endif /* __CYGWIN__ */
637
638 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
639 if (error)
640 goto done;
641
642 /* Cases in which we need to copyout */
643 switch (uap->cmd) {
644 case IPC_INFO:
645 case SHM_INFO:
646 case SHM_STAT:
647 case IPC_STAT:
648 error = copyout(&buf, uap->buf, bufsz);
649 break;
650 }
651
652 done:
653 if (error) {
654 /* Invalidate the return value */
655 td->td_retval[0] = -1;
656 }
657 return (error);
658 }
659
660
661 #ifndef _SYS_SYSPROTO_H_
662 struct shmget_args {
663 key_t key;
664 size_t size;
665 int shmflg;
666 };
667 #endif
668
669 static int
670 shmget_existing(struct thread *td, struct shmget_args *uap, int mode, int segnum)
671 {
672 struct shmid_ds *shmseg;
673 int error;
674
675 shmseg = &shmsegs[segnum];
676 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
677 /*
678 * This segment is in the process of being allocated. Wait
679 * until it's done, and look the key up again (in case the
680 * allocation failed or it was freed).
681 */
682 shmseg->shm_perm.mode |= SHMSEG_WANTED;
683 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
684 if (error)
685 return (error);
686 return (EAGAIN);
687 }
688 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
689 return (EEXIST);
690 error = ipcperm(td, &shmseg->shm_perm, mode);
691 if (error)
692 return (error);
693 if (uap->size && uap->size > shmseg->shm_segsz)
694 return (EINVAL);
695 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
696 #ifdef __CYGWIN__
697 td->td_retval[1] =
698 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
699 #endif /* __CYGWIN__ */
700 return (0);
701 }
702
703 static int
704 shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode)
705 {
706 int i, segnum, shmid, size;
707 #ifndef __CYGWIN__
708 struct ucred *cred = td->td_ucred;
709 #endif /* __CYGWIN__ */
710 struct shmid_ds *shmseg;
711 struct shm_handle *shm_handle;
712
713 GIANT_REQUIRED;
714
715 if (uap->size < (unsigned long) shminfo.shmmin ||
716 uap->size > (unsigned long) shminfo.shmmax)
717 return (EINVAL);
718 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
719 return (ENOSPC);
720 size = round_page(uap->size);
721 if (shm_committed + btoc(size) > (unsigned long) shminfo.shmall)
722 return (ENOMEM);
723 if (shm_last_free < 0) {
724 shmrealloc(); /* Maybe expand the shmsegs[] array. */
725 for (i = 0; i < shmalloced; i++)
726 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
727 break;
728 if (i == shmalloced)
729 return (ENOSPC);
730 segnum = i;
731 } else {
732 segnum = shm_last_free;
733 shm_last_free = -1;
734 }
735 shmseg = &shmsegs[segnum];
736 /*
737 * In case we sleep in malloc(), mark the segment present but deleted
738 * so that noone else tries to create the same key.
739 */
740 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
741 shmseg->shm_perm.key = uap->key;
742 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
743 shm_handle = (struct shm_handle *)
744 sys_malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
745 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
746
747 /*
748 * We make sure that we have allocated a pager before we need
749 * to.
750 */
751 if (shm_use_phys) {
752 shm_handle->shm_object =
753 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
754 } else {
755 shm_handle->shm_object =
756 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
757 }
758 VM_OBJECT_LOCK(shm_handle->shm_object);
759 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
760 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
761 VM_OBJECT_UNLOCK(shm_handle->shm_object);
762
763 shmseg->shm_internal = shm_handle;
764 #ifdef __CYGWIN__
765 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = td->ipcblk->uid;
766 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = td->ipcblk->gid;
767 #else
768 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
769 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
770 #endif /* __CYGWIN__ */
771 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
772 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
773 shmseg->shm_segsz = uap->size;
774 shmseg->shm_cpid = td->td_proc->p_pid;
775 shmseg->shm_lpid = shmseg->shm_nattch = 0;
776 shmseg->shm_atime = shmseg->shm_dtime = 0;
777 shmseg->shm_ctime = time (NULL);
778 shm_committed += btoc(size);
779 shm_nused++;
780 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
781 /*
782 * Somebody else wanted this key while we were asleep. Wake
783 * them up now.
784 */
785 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
786 wakeup(shmseg);
787 }
788 td->td_retval[0] = shmid;
789 #ifdef __CYGWIN__
790 td->td_retval[1] =
791 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
792 #endif /* __CYGWIN__ */
793 return (0);
794 }
795
796 /*
797 * MPSAFE
798 */
799 int
800 shmget(struct thread *td, struct shmget_args *uap)
801 {
802 int segnum, mode;
803 int error;
804
805 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
806 return (ENOSYS);
807 mtx_lock(&Giant);
808 mode = uap->shmflg & ACCESSPERMS;
809 if (uap->key != IPC_PRIVATE) {
810 again:
811 #ifdef __CYGWIN__
812 if (uap->shmflg & IPC_KEY_IS_SHMID)
813 segnum = shm_find_segment_by_shmid ((int) uap->key) ?
814 IPCID_TO_IX((int) uap->key) : -1;
815 else
816 #endif
817 segnum = shm_find_segment_by_key(uap->key);
818 if (segnum >= 0) {
819 error = shmget_existing(td, uap, mode, segnum);
820 if (error == EAGAIN)
821 goto again;
822 goto done2;
823 }
824 if ((uap->shmflg & IPC_CREAT) == 0) {
825 error = ENOENT;
826 goto done2;
827 }
828 }
829 error = shmget_allocate_segment(td, uap, mode);
830 done2:
831 #ifdef __CYGWIN__
832 if (!error)
833 ipcexit_creat_hookthread (td);
834 #endif
835 mtx_unlock(&Giant);
836 return (error);
837 }
838
839 #ifndef __CYGWIN__
840 /*
841 * MPSAFE
842 */
843 int
844 shmsys(td, uap)
845 struct thread *td;
846 /* XXX actually varargs. */
847 struct shmsys_args /* {
848 int which;
849 int a2;
850 int a3;
851 int a4;
852 } */ *uap;
853 {
854 int error;
855
856 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
857 return (ENOSYS);
858 if (uap->which < 0 ||
859 uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
860 return (EINVAL);
861 mtx_lock(&Giant);
862 error = (*shmcalls[uap->which])(td, &uap->a2);
863 mtx_unlock(&Giant);
864 return (error);
865 }
866 #endif /* __CYGWIN__ */
867
868 static void
869 shmfork_myhook(struct proc *p1, struct proc *p2)
870 {
871 struct shmmap_state *shmmap_s;
872 size_t size;
873 int i;
874
875 size = shminfo.shmseg * sizeof(struct shmmap_state);
876 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
877 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
878 p2->p_vmspace->vm_shm = shmmap_s;
879 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
880 if (shmmap_s->shmid != -1) {
881 shm_nattch++;
882 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
883 }
884 }
885
886 #ifdef __CYGWIN__
887 int cygwin_shmfork_myhook (struct thread *td, struct proc *parent)
888 {
889 ipcexit_creat_hookthread (td);
890 ipc_p_vmspace (td->ipcblk);
891 ipc_p_vmspace (parent);
892 shmfork_myhook (parent, td->ipcblk);
893 return 0;
894 }
895 #endif
896
897 void
898 shmexit_myhook(struct vmspace *vm)
899 {
900 struct shmmap_state *base, *shm;
901 int i;
902
903 GIANT_REQUIRED;
904
905 if ((base = vm->vm_shm) != NULL) {
906 vm->vm_shm = NULL;
907 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
908 if (shm->shmid != -1)
909 shm_delete_mapping(vm, shm);
910 }
911 sys_free(base, M_SHM);
912 }
913 }
914
915 static void
916 shmrealloc(void)
917 {
918 int i;
919 struct shmid_ds *newsegs;
920
921 if (shmalloced >= shminfo.shmmni)
922 return;
923
924 newsegs = (struct shmid_ds *) sys_malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
925 if (newsegs == NULL)
926 return;
927 for (i = 0; i < shmalloced; i++)
928 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
929 for (; i < shminfo.shmmni; i++) {
930 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
931 shmsegs[i].shm_perm.seq = 0;
932 }
933 sys_free(shmsegs, M_SHM);
934 shmsegs = newsegs;
935 shmalloced = shminfo.shmmni;
936 }
937
938 void
939 shminit(void)
940 {
941 int i;
942
943 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
944 for (i = PAGE_SIZE; i > 0; i--) {
945 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
946 if (shminfo.shmmax >= shminfo.shmall)
947 break;
948 }
949 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
950 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
951 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
952 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
953
954 shmalloced = shminfo.shmmni;
955 shmsegs = (struct shmid_ds *) sys_malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
956 if (shmsegs == NULL)
957 panic("cannot allocate initial memory for sysvshm");
958 for (i = 0; i < shmalloced; i++) {
959 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
960 shmsegs[i].shm_perm.seq = 0;
961 }
962 shm_last_free = 0;
963 shm_nused = 0;
964 shm_committed = 0;
965 #ifndef __CYGWIN__
966 shmexit_hook = &shmexit_myhook;
967 shmfork_hook = &shmfork_myhook;
968 #endif /* __CYGWIN__ */
969 }
970
971 int
972 shmunload(void)
973 {
974
975 if (shm_nused > 0)
976 return (EBUSY);
977
978 sys_free(shmsegs, M_SHM);
979 #ifndef __CYGWIN__
980 shmexit_hook = NULL;
981 shmfork_hook = NULL;
982 #endif /* __CYGWIN__ */
983 return (0);
984 }
985
986 #ifndef __CYGWIN__
987 static int
988 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
989 {
990
991 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
992 }
993
994 static int
995 sysvshm_modload(struct module *module, int cmd, void *arg)
996 {
997 int error = 0;
998
999 switch (cmd) {
1000 case MOD_LOAD:
1001 shminit();
1002 break;
1003 case MOD_UNLOAD:
1004 error = shmunload();
1005 break;
1006 case MOD_SHUTDOWN:
1007 break;
1008 default:
1009 error = EINVAL;
1010 break;
1011 }
1012 return (error);
1013 }
1014
1015 static moduledata_t sysvshm_mod = {
1016 "sysvshm",
1017 &sysvshm_modload,
1018 NULL
1019 };
1020
1021 SYSCALL_MODULE_HELPER(shmsys);
1022 SYSCALL_MODULE_HELPER(shmat);
1023 SYSCALL_MODULE_HELPER(shmctl);
1024 SYSCALL_MODULE_HELPER(shmdt);
1025 SYSCALL_MODULE_HELPER(shmget);
1026
1027 DECLARE_MODULE(sysvshm, sysvshm_mod,
1028 SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
1029 MODULE_VERSION(sysvshm, 1);
1030 #endif /* __CYGWIN__ */
1031 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.180528 seconds and 6 git commands to generate.