]>
sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * This file is heavily changed to become part of Cygwin's cygserver.
36 #ifdef __OUTSIDE_CYGWIN__
38 #include <sys/cdefs.h>
40 #define __FBSDID(s) const char version[] = (s)
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
45 #define __BSD_VISIBLE 1
46 #include <sys/param.h>
52 #include <sys/sysproto.h>
57 #include "cygserver.h"
59 #include "cygserver_ipc.h"
63 #define PAGE_SIZE (getpagesize ())
66 #define PAGE_MASK (PAGE_SIZE - 1)
68 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
69 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
70 #define ACCESSPERMS (0777)
71 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
72 #define KERN_SUCCESS 0
73 #define VM_PROT_READ PROT_READ
74 #define VM_PROT_WRITE PROT_WRITE
75 #define VM_INHERIT_SHARE 0
78 #define VM_PROT_DEFAULT 0
79 #define VM_OBJECT_LOCK(a)
80 #define vm_object_clear_flag(a,b)
81 #define vm_object_set_flag(a,b)
82 #define VM_OBJECT_UNLOCK(a)
83 #define vm_object_reference(a)
84 #define vm_map_remove(a,b,c) KERN_SUCCESS
85 #define vm_map_find(a,b,c,d,e,f,g,h,i) KERN_SUCCESS
86 #define vm_map_inherit(a,b,c,d)
87 typedef int vm_prot_t
;
88 #endif /* __CYGWIN__ */
91 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
94 static int oshmctl(struct thread
*td
, struct oshmctl_args
*uap
);
95 #endif /* __CYGWIN__ */
97 static int shmget_allocate_segment(struct thread
*td
,
98 struct shmget_args
*uap
, int mode
);
99 static int shmget_existing(struct thread
*td
, struct shmget_args
*uap
,
100 int mode
, int segnum
);
103 /* XXX casting to (sy_call_t *) is bogus, as usual. */
104 static sy_call_t
*shmcalls
[] = {
105 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
106 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
109 #endif /* __CYGWIN__ */
111 #define SHMSEG_FREE 0x0200
112 #define SHMSEG_REMOVED 0x0400
113 #define SHMSEG_ALLOCATED 0x0800
114 #define SHMSEG_WANTED 0x1000
116 static int shm_last_free
, shm_nused
, shm_committed
, shmalloced
, shm_nattch
;
117 static struct shmid_ds
*shmsegs
;
120 /* vm_offset_t kva; */
121 vm_object_t shm_object
;
124 struct shmmap_state
{
129 static void shm_deallocate_segment(struct shmid_ds
*);
130 static int shm_find_segment_by_key(key_t
);
131 static struct shmid_ds
*shm_find_segment_by_shmid(int);
132 static struct shmid_ds
*shm_find_segment_by_shmidx(int);
133 static int shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*);
134 static void shmrealloc(void);
140 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
143 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
155 #define SHMALL (SHMMAXPGS)
158 struct shminfo shminfo
= {
167 static int shm_use_phys
;
169 static long shm_use_phys
;
170 static long shm_allow_removed
;
171 #endif /* __CYGWIN__ */
174 struct shm_info shm_info
;
175 #endif /* __CYGWIN__ */
178 SYSCTL_DECL(_kern_ipc
);
179 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0, "");
180 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0, "");
181 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RDTUN
, &shminfo
.shmmni
, 0, "");
182 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RDTUN
, &shminfo
.shmseg
, 0, "");
183 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0, "");
184 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
,
185 &shm_use_phys
, 0, "");
186 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
187 &shm_allow_removed
, 0, "");
188 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, shmsegs
, CTLFLAG_RD
,
189 NULL
, 0, sysctl_shmsegs
, "", "");
190 #endif /* __CYGWIN__ */
193 shm_find_segment_by_key(key_t key
)
197 for (i
= 0; i
< shmalloced
; i
++)
198 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
199 shmsegs
[i
].shm_perm
.key
== key
)
204 static struct shmid_ds
*
205 shm_find_segment_by_shmid(int shmid
)
208 struct shmid_ds
*shmseg
;
210 segnum
= IPCID_TO_IX(shmid
);
211 if (segnum
< 0 || segnum
>= shmalloced
)
213 shmseg
= &shmsegs
[segnum
];
214 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
215 (!shm_allow_removed
&&
216 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
217 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
222 static struct shmid_ds
*
223 shm_find_segment_by_shmidx(int segnum
)
225 struct shmid_ds
*shmseg
;
227 if (segnum
< 0 || segnum
>= shmalloced
)
229 shmseg
= &shmsegs
[segnum
];
230 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
231 (!shm_allow_removed
&&
232 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0))
238 shm_deallocate_segment(struct shmid_ds
*shmseg
)
240 struct shm_handle
*shm_handle
;
245 shm_handle
= shmseg
->shm_internal
;
246 vm_object_deallocate(shm_handle
->shm_object
);
247 sys_free(shm_handle
, M_SHM
);
248 shmseg
->shm_internal
= NULL
;
249 size
= round_page(shmseg
->shm_segsz
);
250 shm_committed
-= btoc(size
);
252 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
256 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
258 struct shmid_ds
*shmseg
;
264 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
265 shmseg
= &shmsegs
[segnum
];
266 size
= round_page(shmseg
->shm_segsz
);
267 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
268 if (result
!= KERN_SUCCESS
)
270 shmmap_s
->shmid
= -1;
271 shmseg
->shm_dtime
= time (NULL
);
273 if ((--shmseg
->shm_nattch
<= 0) &&
274 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
275 shm_deallocate_segment(shmseg
);
276 shm_last_free
= segnum
;
281 #ifndef _SYS_SYSPROTO_H_
291 shmdt(struct thread
*td
, struct shmdt_args
*uap
)
293 struct proc
*p
= td
->td_proc
;
294 struct shmmap_state
*shmmap_s
;
298 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
301 shmmap_s
= p
->p_vmspace
->vm_shm
;
302 if (shmmap_s
== NULL
) {
306 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
307 if (shmmap_s
->shmid
!= -1 &&
308 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
) {
312 if (i
== shminfo
.shmseg
) {
316 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
322 #ifndef _SYS_SYSPROTO_H_
334 kern_shmat(struct thread
*td
, int shmid
, const void *shmaddr
, int shmflg
)
336 struct proc
*p
= td
->td_proc
;
338 struct shmid_ds
*shmseg
;
339 struct shmmap_state
*shmmap_s
= NULL
;
340 struct shm_handle
*shm_handle
;
341 vm_offset_t attach_va
;
347 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
350 shmmap_s
= p
->p_vmspace
->vm_shm
;
351 if (shmmap_s
== NULL
) {
352 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
353 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
354 for (i
= 0; i
< shminfo
.shmseg
; i
++)
355 shmmap_s
[i
].shmid
= -1;
356 p
->p_vmspace
->vm_shm
= shmmap_s
;
358 shmseg
= shm_find_segment_by_shmid(shmid
);
359 if (shmseg
== NULL
) {
363 error
= ipcperm(td
, &shmseg
->shm_perm
,
364 (shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
367 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
368 if (shmmap_s
->shmid
== -1)
372 if (i
>= shminfo
.shmseg
) {
376 size
= round_page(shmseg
->shm_segsz
);
377 #ifdef VM_PROT_READ_IS_EXEC
378 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
382 if ((shmflg
& SHM_RDONLY
) == 0)
383 prot
|= VM_PROT_WRITE
;
384 flags
= MAP_ANON
| MAP_SHARED
;
387 if (shmflg
& SHM_RND
) {
388 attach_va
= (vm_offset_t
)shmaddr
& ~(SHMLBA
-1);
389 } else if (((vm_offset_t
)shmaddr
& (SHMLBA
-1)) == 0) {
390 attach_va
= (vm_offset_t
)shmaddr
;
397 * This is just a hint to vm_map_find() about where to
403 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
404 + maxtsiz
+ maxdsiz
);
408 shm_handle
= shmseg
->shm_internal
;
409 vm_object_reference(shm_handle
->shm_object
);
410 rv
= vm_map_find(&p
->p_vmspace
->vm_map
, shm_handle
->shm_object
,
411 0, &attach_va
, size
, (flags
& MAP_FIXED
)?0:1, prot
, prot
, 0);
412 if (rv
!= KERN_SUCCESS
) {
416 vm_map_inherit(&p
->p_vmspace
->vm_map
,
417 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
419 shmmap_s
->va
= attach_va
;
420 shmmap_s
->shmid
= shmid
;
421 shmseg
->shm_lpid
= p
->p_pid
;
422 shmseg
->shm_atime
= time (NULL
);
423 shmseg
->shm_nattch
++;
425 td
->td_retval
[0] = attach_va
;
432 shmat(struct thread
*td
, struct shmat_args
*uap
)
434 return kern_shmat(td
, uap
->shmid
, uap
->shmaddr
, uap
->shmflg
);
439 struct ipc_perm shm_perm
; /* operation perms */
440 int shm_segsz
; /* size of segment (bytes) */
441 u_short shm_cpid
; /* pid, creator */
442 u_short shm_lpid
; /* pid, last operation */
443 short shm_nattch
; /* no. of current attaches */
444 time_t shm_atime
; /* last attach time */
445 time_t shm_dtime
; /* last detach time */
446 time_t shm_ctime
; /* last change time */
447 void *shm_handle
; /* internal handle for shm segment */
450 struct oshmctl_args
{
453 struct oshmid_ds
*ubuf
;
460 oshmctl(struct thread
*td
, struct oshmctl_args
*uap
)
464 struct shmid_ds
*shmseg
;
465 struct oshmid_ds outbuf
;
467 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
470 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
471 if (shmseg
== NULL
) {
477 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
480 outbuf
.shm_perm
= shmseg
->shm_perm
;
481 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
482 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
483 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
484 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
485 outbuf
.shm_atime
= shmseg
->shm_atime
;
486 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
487 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
488 outbuf
.shm_handle
= shmseg
->shm_internal
;
489 error
= copyout(&outbuf
, uap
->ubuf
, sizeof(outbuf
));
494 /* XXX casting to (sy_call_t *) is bogus, as usual. */
495 error
= ((sy_call_t
*)shmctl
)(td
, uap
);
505 #endif /* __CYGWIN__ */
507 #ifndef _SYS_SYSPROTO_H_
511 struct shmid_ds
*buf
;
519 kern_shmctl(struct thread
*td
, int shmid
, int cmd
, void *buf
, size_t *bufsz
)
522 struct shmid_ds
*shmseg
;
524 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
530 memcpy(buf
, &shminfo
, sizeof(shminfo
));
532 *bufsz
= sizeof(shminfo
);
533 td
->td_retval
[0] = shmalloced
;
536 struct shm_info shm_info
;
537 shm_info
.used_ids
= shm_nused
;
538 shm_info
.shm_tot
= shm_committed
* PAGE_SIZE
;
540 shm_info
.shm_atts
= shm_nattch
;
542 shm_info
.shm_rss
= 0; /*XXX where to get from ? */
543 shm_info
.shm_swp
= 0; /*XXX where to get from ? */
544 shm_info
.swap_attempts
= 0; /*XXX where to get from ? */
545 shm_info
.swap_successes
= 0; /*XXX where to get from ? */
546 #endif /* __CYGWIN__ */
547 memcpy(buf
, &shm_info
, sizeof(shm_info
));
549 *bufsz
= sizeof(shm_info
);
550 td
->td_retval
[0] = shmalloced
;
555 shmseg
= shm_find_segment_by_shmidx(shmid
);
557 shmseg
= shm_find_segment_by_shmid(shmid
);
558 if (shmseg
== NULL
) {
565 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
568 memcpy(buf
, shmseg
, sizeof(struct shmid_ds
));
570 *bufsz
= sizeof(struct shmid_ds
);
572 td
->td_retval
[0] = IXSEQ_TO_IPCID(shmid
, shmseg
->shm_perm
);
575 struct shmid_ds
*shmid
;
577 shmid
= (struct shmid_ds
*)buf
;
578 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
581 shmseg
->shm_perm
.uid
= shmid
->shm_perm
.uid
;
582 shmseg
->shm_perm
.gid
= shmid
->shm_perm
.gid
;
583 shmseg
->shm_perm
.mode
=
584 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
585 (shmid
->shm_perm
.mode
& ACCESSPERMS
);
586 shmseg
->shm_ctime
= time (NULL
);
590 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
593 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
594 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
595 if (shmseg
->shm_nattch
<= 0) {
596 shm_deallocate_segment(shmseg
);
597 shm_last_free
= IPCID_TO_IX(shmid
);
614 shmctl(struct thread
*td
, struct shmctl_args
*uap
)
620 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
621 if (uap
->cmd
== IPC_SET
) {
622 if ((error
= copyin(uap
->buf
, &buf
, sizeof(struct shmid_ds
))))
626 if (uap
->cmd
== IPC_INFO
&& uap
->shmid
> 0) {
627 /* Can't use the default kern_shmctl interface. */
628 int shmid
= uap
->shmid
;
629 if (shmid
> shminfo
.shmmni
)
630 shmid
= shminfo
.shmmni
;
631 error
= copyout(shmsegs
, uap
->buf
,
632 shmid
* sizeof(struct shmid_ds
));
633 td
->td_retval
[0] = error
? -1 : 0;
636 #endif /* __CYGWIN__ */
638 error
= kern_shmctl(td
, uap
->shmid
, uap
->cmd
, (void *)&buf
, &bufsz
);
642 /* Cases in which we need to copyout */
648 error
= copyout(&buf
, uap
->buf
, bufsz
);
654 /* Invalidate the return value */
655 td
->td_retval
[0] = -1;
661 #ifndef _SYS_SYSPROTO_H_
670 shmget_existing(struct thread
*td
, struct shmget_args
*uap
, int mode
, int segnum
)
672 struct shmid_ds
*shmseg
;
675 shmseg
= &shmsegs
[segnum
];
676 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
678 * This segment is in the process of being allocated. Wait
679 * until it's done, and look the key up again (in case the
680 * allocation failed or it was freed).
682 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
683 error
= tsleep(shmseg
, PLOCK
| PCATCH
, "shmget", 0);
688 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
690 error
= ipcperm(td
, &shmseg
->shm_perm
, mode
);
693 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
695 td
->td_retval
[0] = IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
698 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
699 #endif /* __CYGWIN__ */
704 shmget_allocate_segment(struct thread
*td
, struct shmget_args
*uap
, int mode
)
706 int i
, segnum
, shmid
, size
;
708 struct ucred
*cred
= td
->td_ucred
;
709 #endif /* __CYGWIN__ */
710 struct shmid_ds
*shmseg
;
711 struct shm_handle
*shm_handle
;
715 if (uap
->size
< (unsigned long) shminfo
.shmmin
||
716 uap
->size
> (unsigned long) shminfo
.shmmax
)
718 if (shm_nused
>= shminfo
.shmmni
) /* Any shmids left? */
720 size
= round_page(uap
->size
);
721 if (shm_committed
+ btoc(size
) > (unsigned long) shminfo
.shmall
)
723 if (shm_last_free
< 0) {
724 shmrealloc(); /* Maybe expand the shmsegs[] array. */
725 for (i
= 0; i
< shmalloced
; i
++)
726 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
732 segnum
= shm_last_free
;
735 shmseg
= &shmsegs
[segnum
];
737 * In case we sleep in malloc(), mark the segment present but deleted
738 * so that noone else tries to create the same key.
740 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
741 shmseg
->shm_perm
.key
= uap
->key
;
742 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
743 shm_handle
= (struct shm_handle
*)
744 sys_malloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
745 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
748 * We make sure that we have allocated a pager before we need
752 shm_handle
->shm_object
=
753 vm_pager_allocate(OBJT_PHYS
, 0, size
, VM_PROT_DEFAULT
, 0);
755 shm_handle
->shm_object
=
756 vm_pager_allocate(OBJT_SWAP
, 0, size
, VM_PROT_DEFAULT
, 0);
758 VM_OBJECT_LOCK(shm_handle
->shm_object
);
759 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
760 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
761 VM_OBJECT_UNLOCK(shm_handle
->shm_object
);
763 shmseg
->shm_internal
= shm_handle
;
765 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= td
->ipcblk
->uid
;
766 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= td
->ipcblk
->gid
;
768 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
769 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
770 #endif /* __CYGWIN__ */
771 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
772 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
773 shmseg
->shm_segsz
= uap
->size
;
774 shmseg
->shm_cpid
= td
->td_proc
->p_pid
;
775 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
776 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
777 shmseg
->shm_ctime
= time (NULL
);
778 shm_committed
+= btoc(size
);
780 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
782 * Somebody else wanted this key while we were asleep. Wake
785 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
788 td
->td_retval
[0] = shmid
;
791 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
792 #endif /* __CYGWIN__ */
800 shmget(struct thread
*td
, struct shmget_args
*uap
)
805 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
808 mode
= uap
->shmflg
& ACCESSPERMS
;
809 if (uap
->key
!= IPC_PRIVATE
) {
812 if (uap
->shmflg
& IPC_KEY_IS_SHMID
)
813 segnum
= shm_find_segment_by_shmid ((int) uap
->key
) ?
814 IPCID_TO_IX((int) uap
->key
) : -1;
817 segnum
= shm_find_segment_by_key(uap
->key
);
819 error
= shmget_existing(td
, uap
, mode
, segnum
);
824 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
829 error
= shmget_allocate_segment(td
, uap
, mode
);
833 ipcexit_creat_hookthread (td
);
846 /* XXX actually varargs. */
847 struct shmsys_args
/* {
856 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
858 if (uap
->which
< 0 ||
859 uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
862 error
= (*shmcalls
[uap
->which
])(td
, &uap
->a2
);
866 #endif /* __CYGWIN__ */
869 shmfork_myhook(struct proc
*p1
, struct proc
*p2
)
871 struct shmmap_state
*shmmap_s
;
875 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
876 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
877 bcopy(p1
->p_vmspace
->vm_shm
, shmmap_s
, size
);
878 p2
->p_vmspace
->vm_shm
= shmmap_s
;
879 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
880 if (shmmap_s
->shmid
!= -1) {
882 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
887 int cygwin_shmfork_myhook (struct thread
*td
, struct proc
*parent
)
889 ipcexit_creat_hookthread (td
);
890 ipc_p_vmspace (td
->ipcblk
);
891 ipc_p_vmspace (parent
);
892 shmfork_myhook (parent
, td
->ipcblk
);
898 shmexit_myhook(struct vmspace
*vm
)
900 struct shmmap_state
*base
, *shm
;
905 if ((base
= vm
->vm_shm
) != NULL
) {
907 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
908 if (shm
->shmid
!= -1)
909 shm_delete_mapping(vm
, shm
);
911 sys_free(base
, M_SHM
);
919 struct shmid_ds
*newsegs
;
921 if (shmalloced
>= shminfo
.shmmni
)
924 newsegs
= (struct shmid_ds
*) sys_malloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
927 for (i
= 0; i
< shmalloced
; i
++)
928 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
929 for (; i
< shminfo
.shmmni
; i
++) {
930 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
931 shmsegs
[i
].shm_perm
.seq
= 0;
933 sys_free(shmsegs
, M_SHM
);
935 shmalloced
= shminfo
.shmmni
;
943 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
944 for (i
= PAGE_SIZE
; i
> 0; i
--) {
945 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
946 if (shminfo
.shmmax
>= shminfo
.shmall
)
949 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo
.shmmin
);
950 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo
.shmmni
);
951 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo
.shmseg
);
952 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys
);
954 shmalloced
= shminfo
.shmmni
;
955 shmsegs
= (struct shmid_ds
*) sys_malloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
957 panic("cannot allocate initial memory for sysvshm");
958 for (i
= 0; i
< shmalloced
; i
++) {
959 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
960 shmsegs
[i
].shm_perm
.seq
= 0;
966 shmexit_hook
= &shmexit_myhook
;
967 shmfork_hook
= &shmfork_myhook
;
968 #endif /* __CYGWIN__ */
978 sys_free(shmsegs
, M_SHM
);
982 #endif /* __CYGWIN__ */
988 sysctl_shmsegs(SYSCTL_HANDLER_ARGS
)
991 return (SYSCTL_OUT(req
, shmsegs
, shmalloced
* sizeof(shmsegs
[0])));
995 sysvshm_modload(struct module
*module
, int cmd
, void *arg
)
1004 error
= shmunload();
1015 static moduledata_t sysvshm_mod
= {
1021 SYSCALL_MODULE_HELPER(shmsys
);
1022 SYSCALL_MODULE_HELPER(shmat
);
1023 SYSCALL_MODULE_HELPER(shmctl
);
1024 SYSCALL_MODULE_HELPER(shmdt
);
1025 SYSCALL_MODULE_HELPER(shmget
);
1027 DECLARE_MODULE(sysvshm
, sysvshm_mod
,
1028 SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
);
1029 MODULE_VERSION(sysvshm
, 1);
1030 #endif /* __CYGWIN__ */
1031 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.180528 seconds and 6 git commands to generate.