]>
sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * This file is heavily changed to become part of Cygwin's cygserver.
36 #ifdef __OUTSIDE_CYGWIN__
38 #include <sys/cdefs.h>
40 #define __FBSDID(s) const char version[] = (s)
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
43 /* CV, 2006-01-09: Inspected upstream up to version 1.104. */
46 #define __BSD_VISIBLE 1
47 #include <sys/param.h>
53 #include <sys/sysproto.h>
58 #include "cygserver.h"
60 #include "cygserver_ipc.h"
64 #define PAGE_SIZE (getpagesize ())
67 #define PAGE_MASK (PAGE_SIZE - 1)
69 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
70 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
72 #define GIANT_REQUIRED
74 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
76 #define KERN_SUCCESS 0
77 #define VM_PROT_READ PROT_READ
78 #define VM_PROT_WRITE PROT_WRITE
79 #define VM_INHERIT_SHARE 0
82 #define VM_PROT_DEFAULT 0
83 #define VM_OBJECT_LOCK(a)
84 #define vm_object_clear_flag(a,b)
85 #define vm_object_set_flag(a,b)
86 #define VM_OBJECT_UNLOCK(a)
87 #define vm_map_remove(a,b,c) KERN_SUCCESS
88 typedef int vm_prot_t
;
89 #endif /* __CYGWIN__ */
92 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
95 static int oshmctl(struct thread
*td
, struct oshmctl_args
*uap
);
96 #endif /* __CYGWIN__ */
98 static int shmget_allocate_segment(struct thread
*td
,
99 struct shmget_args
*uap
, int mode
);
100 static int shmget_existing(struct thread
*td
, struct shmget_args
*uap
,
101 int mode
, int segnum
);
104 /* XXX casting to (sy_call_t *) is bogus, as usual. */
105 static sy_call_t
*shmcalls
[] = {
106 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
107 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
110 #endif /* __CYGWIN__ */
112 #define SHMSEG_FREE 0x0200
113 #define SHMSEG_REMOVED 0x0400
114 #define SHMSEG_ALLOCATED 0x0800
115 #define SHMSEG_WANTED 0x1000
117 static int shm_last_free
, shm_nused
, shm_committed
, shmalloced
, shm_nattch
;
118 static struct shmid_ds
*shmsegs
;
121 /* vm_offset_t kva; */
122 vm_object_t shm_object
;
125 struct shmmap_state
{
130 static void shm_deallocate_segment(struct shmid_ds
*);
131 static int shm_find_segment_by_key(key_t
);
132 static struct shmid_ds
*shm_find_segment_by_shmid(int);
133 static struct shmid_ds
*shm_find_segment_by_shmidx(int);
134 static int shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*);
135 static void shmrealloc(void);
141 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
144 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
156 #define SHMALL (SHMMAXPGS)
159 struct shminfo shminfo
= {
168 static int shm_use_phys
;
170 static long shm_use_phys
;
171 static long shm_allow_removed
;
172 #endif /* __CYGWIN__ */
175 struct shm_info shm_info
;
176 #endif /* __CYGWIN__ */
179 SYSCTL_DECL(_kern_ipc
);
180 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0, "");
181 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0, "");
182 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RDTUN
, &shminfo
.shmmni
, 0, "");
183 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RDTUN
, &shminfo
.shmseg
, 0, "");
184 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0, "");
185 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
,
186 &shm_use_phys
, 0, "");
187 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
188 &shm_allow_removed
, 0, "");
189 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, shmsegs
, CTLFLAG_RD
,
190 NULL
, 0, sysctl_shmsegs
, "", "");
191 #endif /* __CYGWIN__ */
194 shm_find_segment_by_key(key_t key
)
198 for (i
= 0; i
< shmalloced
; i
++)
199 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
200 shmsegs
[i
].shm_perm
.key
== key
)
205 static struct shmid_ds
*
206 shm_find_segment_by_shmid(int shmid
)
209 struct shmid_ds
*shmseg
;
211 segnum
= IPCID_TO_IX(shmid
);
212 if (segnum
< 0 || segnum
>= shmalloced
)
214 shmseg
= &shmsegs
[segnum
];
215 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
216 (!shm_allow_removed
&&
217 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
218 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
223 static struct shmid_ds
*
224 shm_find_segment_by_shmidx(int segnum
)
226 struct shmid_ds
*shmseg
;
228 if (segnum
< 0 || segnum
>= shmalloced
)
230 shmseg
= &shmsegs
[segnum
];
231 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
232 (!shm_allow_removed
&&
233 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0))
239 shm_deallocate_segment(struct shmid_ds
*shmseg
)
241 struct shm_handle
*shm_handle
;
246 shm_handle
= shmseg
->shm_internal
;
247 vm_object_deallocate(shm_handle
->shm_object
);
248 sys_free(shm_handle
, M_SHM
);
249 shmseg
->shm_internal
= NULL
;
250 size
= round_page(shmseg
->shm_segsz
);
251 shm_committed
-= btoc(size
);
253 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
257 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
259 struct shmid_ds
*shmseg
;
261 size_t size
__attribute__ ((unused
));
265 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
266 shmseg
= &shmsegs
[segnum
];
267 size
= round_page(shmseg
->shm_segsz
);
268 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
269 if (result
!= KERN_SUCCESS
)
271 shmmap_s
->shmid
= -1;
272 shmseg
->shm_dtime
= time (NULL
);
274 if ((--shmseg
->shm_nattch
<= 0) &&
275 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
276 shm_deallocate_segment(shmseg
);
277 shm_last_free
= segnum
;
282 #ifndef _SYS_SYSPROTO_H_
292 shmdt(struct thread
*td
, struct shmdt_args
*uap
)
294 struct proc
*p
= td
->td_proc
;
295 struct shmmap_state
*shmmap_s
;
299 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
302 shmmap_s
= p
->p_vmspace
->vm_shm
;
303 if (shmmap_s
== NULL
) {
307 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
308 if (shmmap_s
->shmid
!= -1 &&
309 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
) {
313 if (i
== shminfo
.shmseg
) {
317 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
323 #ifndef _SYS_SYSPROTO_H_
335 kern_shmat(struct thread
*td
, int shmid
, const void *shmaddr
, int shmflg
)
337 struct proc
*p
= td
->td_proc
;
338 int i
, flags
__attribute__ ((unused
));
339 struct shmid_ds
*shmseg
;
340 struct shmmap_state
*shmmap_s
= NULL
;
342 struct shm_handle
*shm_handle
;
344 vm_offset_t attach_va
;
352 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
355 shmmap_s
= p
->p_vmspace
->vm_shm
;
356 if (shmmap_s
== NULL
) {
357 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
358 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
359 for (i
= 0; i
< shminfo
.shmseg
; i
++)
360 shmmap_s
[i
].shmid
= -1;
361 p
->p_vmspace
->vm_shm
= shmmap_s
;
363 shmseg
= shm_find_segment_by_shmid(shmid
);
364 if (shmseg
== NULL
) {
368 error
= ipcperm(td
, &shmseg
->shm_perm
,
369 (shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
372 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
373 if (shmmap_s
->shmid
== -1)
377 if (i
>= shminfo
.shmseg
) {
381 size
= round_page(shmseg
->shm_segsz
);
382 #ifdef VM_PROT_READ_IS_EXEC
383 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
387 if ((shmflg
& SHM_RDONLY
) == 0)
388 prot
|= VM_PROT_WRITE
;
389 flags
= MAP_ANON
| MAP_SHARED
;
390 debug_printf ("shmaddr: %x, shmflg: %x", shmaddr
, shmflg
);
392 /* The alignment checks have already been made in the Cygwin DLL
393 and shmat's only job is to keep record of the attached mem.
394 These checks break shm on 9x since MapViewOfFileEx apparently
395 returns memory which isn't aligned to SHMLBA. Go figure! */
396 attach_va
= (vm_offset_t
)shmaddr
;
400 if (shmflg
& SHM_RND
) {
401 attach_va
= (vm_offset_t
)shmaddr
& ~(SHMLBA
-1);
402 } else if (((vm_offset_t
)shmaddr
& (SHMLBA
-1)) == 0) {
403 attach_va
= (vm_offset_t
)shmaddr
;
410 * This is just a hint to vm_map_find() about where to
413 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
414 + maxtsiz
+ maxdsiz
);
417 shm_handle
= shmseg
->shm_internal
;
418 vm_object_reference(shm_handle
->shm_object
);
419 rv
= vm_map_find(&p
->p_vmspace
->vm_map
, shm_handle
->shm_object
,
420 0, &attach_va
, size
, (flags
& MAP_FIXED
)?0:1, prot
, prot
, 0);
421 if (rv
!= KERN_SUCCESS
) {
425 vm_map_inherit(&p
->p_vmspace
->vm_map
,
426 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
429 shmmap_s
->va
= attach_va
;
430 shmmap_s
->shmid
= shmid
;
431 shmseg
->shm_lpid
= p
->p_pid
;
432 shmseg
->shm_atime
= time (NULL
);
433 shmseg
->shm_nattch
++;
435 td
->td_retval
[0] = attach_va
;
442 shmat(struct thread
*td
, struct shmat_args
*uap
)
444 return kern_shmat(td
, uap
->shmid
, uap
->shmaddr
, uap
->shmflg
);
449 struct ipc_perm shm_perm
; /* operation perms */
450 int shm_segsz
; /* size of segment (bytes) */
451 u_short shm_cpid
; /* pid, creator */
452 u_short shm_lpid
; /* pid, last operation */
453 short shm_nattch
; /* no. of current attaches */
454 time_t shm_atime
; /* last attach time */
455 time_t shm_dtime
; /* last detach time */
456 time_t shm_ctime
; /* last change time */
457 void *shm_handle
; /* internal handle for shm segment */
460 struct oshmctl_args
{
463 struct oshmid_ds
*ubuf
;
470 oshmctl(struct thread
*td
, struct oshmctl_args
*uap
)
474 struct shmid_ds
*shmseg
;
475 struct oshmid_ds outbuf
;
477 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
480 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
481 if (shmseg
== NULL
) {
487 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
490 outbuf
.shm_perm
= shmseg
->shm_perm
;
491 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
492 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
493 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
494 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
495 outbuf
.shm_atime
= shmseg
->shm_atime
;
496 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
497 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
498 outbuf
.shm_handle
= shmseg
->shm_internal
;
499 error
= copyout(&outbuf
, uap
->ubuf
, sizeof(outbuf
));
504 /* XXX casting to (sy_call_t *) is bogus, as usual. */
505 error
= ((sy_call_t
*)shmctl
)(td
, uap
);
515 #endif /* !__CYGWIN__ */
517 #ifndef _SYS_SYSPROTO_H_
521 struct shmid_ds
*buf
;
529 kern_shmctl(struct thread
*td
, int shmid
, int cmd
, void *buf
, size_t *bufsz
)
532 struct shmid_ds
*shmseg
;
534 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
540 memcpy(buf
, &shminfo
, sizeof(shminfo
));
542 *bufsz
= sizeof(shminfo
);
543 td
->td_retval
[0] = shmalloced
;
546 struct shm_info shm_info
;
547 shm_info
.used_ids
= shm_nused
;
548 shm_info
.shm_tot
= shm_committed
* PAGE_SIZE
;
550 shm_info
.shm_atts
= shm_nattch
;
552 shm_info
.shm_rss
= 0; /*XXX where to get from ? */
553 shm_info
.shm_swp
= 0; /*XXX where to get from ? */
554 shm_info
.swap_attempts
= 0; /*XXX where to get from ? */
555 shm_info
.swap_successes
= 0; /*XXX where to get from ? */
556 #endif /* __CYGWIN__ */
557 memcpy(buf
, &shm_info
, sizeof(shm_info
));
559 *bufsz
= sizeof(shm_info
);
560 td
->td_retval
[0] = shmalloced
;
565 shmseg
= shm_find_segment_by_shmidx(shmid
);
567 shmseg
= shm_find_segment_by_shmid(shmid
);
568 if (shmseg
== NULL
) {
575 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
578 memcpy(buf
, shmseg
, sizeof(struct shmid_ds
));
580 *bufsz
= sizeof(struct shmid_ds
);
582 td
->td_retval
[0] = IXSEQ_TO_IPCID(shmid
, shmseg
->shm_perm
);
585 struct shmid_ds
*shmid
;
587 shmid
= (struct shmid_ds
*)buf
;
588 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
591 shmseg
->shm_perm
.uid
= shmid
->shm_perm
.uid
;
592 shmseg
->shm_perm
.gid
= shmid
->shm_perm
.gid
;
593 shmseg
->shm_perm
.mode
=
594 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
595 (shmid
->shm_perm
.mode
& ACCESSPERMS
);
596 shmseg
->shm_ctime
= time (NULL
);
600 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
603 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
604 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
605 if (shmseg
->shm_nattch
<= 0) {
606 shm_deallocate_segment(shmseg
);
607 shm_last_free
= IPCID_TO_IX(shmid
);
624 shmctl(struct thread
*td
, struct shmctl_args
*uap
)
630 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
631 if (uap
->cmd
== IPC_SET
) {
632 if ((error
= copyin(uap
->buf
, &buf
, sizeof(struct shmid_ds
))))
636 if (uap
->cmd
== IPC_INFO
&& uap
->shmid
> 0) {
637 /* Can't use the default kern_shmctl interface. */
638 int shmid
= uap
->shmid
;
639 if (shmid
> shminfo
.shmmni
)
640 shmid
= shminfo
.shmmni
;
641 error
= copyout(shmsegs
, uap
->buf
,
642 shmid
* sizeof(struct shmid_ds
));
643 td
->td_retval
[0] = error
? -1 : 0;
646 #endif /* __CYGWIN__ */
648 error
= kern_shmctl(td
, uap
->shmid
, uap
->cmd
, (void *)&buf
, &bufsz
);
652 /* Cases in which we need to copyout */
658 error
= copyout(&buf
, uap
->buf
, bufsz
);
664 /* Invalidate the return value */
665 td
->td_retval
[0] = -1;
671 #ifndef _SYS_SYSPROTO_H_
680 shmget_existing(struct thread
*td
, struct shmget_args
*uap
, int mode
, int segnum
)
682 struct shmid_ds
*shmseg
;
685 shmseg
= &shmsegs
[segnum
];
686 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
688 * This segment is in the process of being allocated. Wait
689 * until it's done, and look the key up again (in case the
690 * allocation failed or it was freed).
692 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
693 error
= tsleep(shmseg
, PLOCK
| PCATCH
, "shmget", 0);
698 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
700 error
= ipcperm(td
, &shmseg
->shm_perm
, mode
);
703 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
705 td
->td_retval
[0] = IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
708 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
709 #endif /* __CYGWIN__ */
714 shmget_allocate_segment(struct thread
*td
, struct shmget_args
*uap
, int mode
)
716 int i
, segnum
, shmid
, size
;
718 struct ucred
*cred
= td
->td_ucred
;
719 #endif /* __CYGWIN__ */
720 struct shmid_ds
*shmseg
;
721 struct shm_handle
*shm_handle
;
725 if (uap
->size
< (unsigned long) shminfo
.shmmin
||
726 uap
->size
> (unsigned long) shminfo
.shmmax
)
728 if (shm_nused
>= shminfo
.shmmni
) /* Any shmids left? */
730 size
= round_page(uap
->size
);
731 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
733 if (shm_last_free
< 0) {
734 shmrealloc(); /* Maybe expand the shmsegs[] array. */
735 for (i
= 0; i
< shmalloced
; i
++)
736 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
742 segnum
= shm_last_free
;
745 shmseg
= &shmsegs
[segnum
];
747 * In case we sleep in malloc(), mark the segment present but deleted
748 * so that noone else tries to create the same key.
750 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
751 shmseg
->shm_perm
.key
= uap
->key
;
752 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
753 shm_handle
= (struct shm_handle
*)
754 sys_malloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
755 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
758 * We make sure that we have allocated a pager before we need
762 shm_handle
->shm_object
=
763 vm_pager_allocate(OBJT_PHYS
, 0, size
, VM_PROT_DEFAULT
, 0);
765 shm_handle
->shm_object
=
766 vm_pager_allocate(OBJT_SWAP
, 0, size
, VM_PROT_DEFAULT
, 0);
768 VM_OBJECT_LOCK(shm_handle
->shm_object
);
769 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
770 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
771 VM_OBJECT_UNLOCK(shm_handle
->shm_object
);
773 shmseg
->shm_internal
= shm_handle
;
775 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= td
->ipcblk
->uid
;
776 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= td
->ipcblk
->gid
;
778 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
779 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
780 #endif /* __CYGWIN__ */
781 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
782 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
783 shmseg
->shm_segsz
= uap
->size
;
784 shmseg
->shm_cpid
= td
->td_proc
->p_pid
;
785 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
786 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
787 shmseg
->shm_ctime
= time (NULL
);
788 shm_committed
+= btoc(size
);
790 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
792 * Somebody else wanted this key while we were asleep. Wake
795 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
798 td
->td_retval
[0] = shmid
;
801 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
802 #endif /* __CYGWIN__ */
810 shmget(struct thread
*td
, struct shmget_args
*uap
)
815 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
818 mode
= uap
->shmflg
& ACCESSPERMS
;
819 if (uap
->key
!= IPC_PRIVATE
) {
822 if (uap
->shmflg
& IPC_KEY_IS_SHMID
)
823 segnum
= shm_find_segment_by_shmid ((int) uap
->key
) ?
824 IPCID_TO_IX((int) uap
->key
) : -1;
827 segnum
= shm_find_segment_by_key(uap
->key
);
829 error
= shmget_existing(td
, uap
, mode
, segnum
);
834 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
839 error
= shmget_allocate_segment(td
, uap
, mode
);
843 ipcexit_creat_hookthread (td
);
845 td
->td_retval
[0] = -1;
858 /* XXX actually varargs. */
859 struct shmsys_args
/* {
868 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
870 if (uap
->which
< 0 ||
871 uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
874 error
= (*shmcalls
[uap
->which
])(td
, &uap
->a2
);
878 #endif /* __CYGWIN__ */
881 shmfork_myhook(struct proc
*p1
, struct proc
*p2
)
883 struct shmmap_state
*shmmap_s
;
887 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
888 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
889 bcopy(p1
->p_vmspace
->vm_shm
, shmmap_s
, size
);
890 p2
->p_vmspace
->vm_shm
= shmmap_s
;
891 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
892 if (shmmap_s
->shmid
!= -1) {
894 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
899 int cygwin_shmfork_myhook (struct thread
*td
, struct proc
*parent
)
901 ipcexit_creat_hookthread (td
);
902 ipc_p_vmspace (td
->ipcblk
);
903 ipc_p_vmspace (parent
);
904 shmfork_myhook (parent
, td
->ipcblk
);
910 shmexit_myhook(struct vmspace
*vm
)
912 struct shmmap_state
*base
, *shm
;
917 if ((base
= vm
->vm_shm
) != NULL
) {
919 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
920 if (shm
->shmid
!= -1)
921 shm_delete_mapping(vm
, shm
);
923 sys_free(base
, M_SHM
);
931 struct shmid_ds
*newsegs
;
933 if (shmalloced
>= shminfo
.shmmni
)
936 newsegs
= (struct shmid_ds
*) sys_malloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
939 for (i
= 0; i
< shmalloced
; i
++)
940 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
941 for (; i
< shminfo
.shmmni
; i
++) {
942 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
943 shmsegs
[i
].shm_perm
.seq
= 0;
945 sys_free(shmsegs
, M_SHM
);
947 shmalloced
= shminfo
.shmmni
;
956 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
957 for (i
= PAGE_SIZE
; i
> 0; i
--) {
958 shminfo
.shmmax
= shminfo
.shmall
* i
;
959 if (shminfo
.shmmax
>= shminfo
.shmall
)
962 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo
.shmmin
);
963 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo
.shmmni
);
964 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo
.shmseg
);
965 TUNABLE_BOOL_FETCH("kern.ipc.shm_allow_removed", &shm_ar
);
966 if (shm_ar
== TUN_TRUE
)
967 shm_allow_removed
= 1;
968 shmalloced
= shminfo
.shmmni
;
969 shmsegs
= (struct shmid_ds
*) sys_malloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
971 panic("cannot allocate initial memory for sysvshm");
972 for (i
= 0; i
< shmalloced
; i
++) {
973 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
974 shmsegs
[i
].shm_perm
.seq
= 0;
980 shmexit_hook
= &shmexit_myhook
;
981 shmfork_hook
= &shmfork_myhook
;
982 #endif /* __CYGWIN__ */
992 sys_free(shmsegs
, M_SHM
);
996 #endif /* __CYGWIN__ */
1002 sysctl_shmsegs(SYSCTL_HANDLER_ARGS
)
1005 return (SYSCTL_OUT(req
, shmsegs
, shmalloced
* sizeof(shmsegs
[0])));
1009 sysvshm_modload(struct module
*module
, int cmd
, void *arg
)
1018 error
= shmunload();
1029 static moduledata_t sysvshm_mod
= {
1035 SYSCALL_MODULE_HELPER(shmsys
);
1036 SYSCALL_MODULE_HELPER(shmat
);
1037 SYSCALL_MODULE_HELPER(shmctl
);
1038 SYSCALL_MODULE_HELPER(shmdt
);
1039 SYSCALL_MODULE_HELPER(shmget
);
1041 DECLARE_MODULE(sysvshm
, sysvshm_mod
,
1042 SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
);
1043 MODULE_VERSION(sysvshm
, 1);
1044 #endif /* __CYGWIN__ */
1045 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.082639 seconds and 5 git commands to generate.