]>
sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * This file is heavily changed to become part of Cygwin's cygserver.
36 #ifdef __OUTSIDE_CYGWIN__
38 #include <sys/cdefs.h>
40 #define __FBSDID(s) const char version[] = (s)
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
43 /* CV, 2006-01-09: Inspected upstream up to version 1.104. */
46 #define __BSD_VISIBLE 1
47 #include <sys/param.h>
53 #include <sys/sysproto.h>
58 #include "cygserver.h"
60 #include "cygserver_ipc.h"
64 #define PAGE_SIZE (getpagesize ())
67 #define PAGE_MASK (PAGE_SIZE - 1)
69 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
70 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
71 #define ACCESSPERMS (0777)
73 #define GIANT_REQUIRED
75 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
77 #define KERN_SUCCESS 0
78 #define VM_PROT_READ PROT_READ
79 #define VM_PROT_WRITE PROT_WRITE
80 #define VM_INHERIT_SHARE 0
83 #define VM_PROT_DEFAULT 0
84 #define VM_OBJECT_LOCK(a)
85 #define vm_object_clear_flag(a,b)
86 #define vm_object_set_flag(a,b)
87 #define VM_OBJECT_UNLOCK(a)
88 #define vm_map_remove(a,b,c) KERN_SUCCESS
89 typedef int vm_prot_t
;
90 #endif /* __CYGWIN__ */
93 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
96 static int oshmctl(struct thread
*td
, struct oshmctl_args
*uap
);
97 #endif /* __CYGWIN__ */
99 static int shmget_allocate_segment(struct thread
*td
,
100 struct shmget_args
*uap
, int mode
);
101 static int shmget_existing(struct thread
*td
, struct shmget_args
*uap
,
102 int mode
, int segnum
);
105 /* XXX casting to (sy_call_t *) is bogus, as usual. */
106 static sy_call_t
*shmcalls
[] = {
107 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
108 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
111 #endif /* __CYGWIN__ */
113 #define SHMSEG_FREE 0x0200
114 #define SHMSEG_REMOVED 0x0400
115 #define SHMSEG_ALLOCATED 0x0800
116 #define SHMSEG_WANTED 0x1000
118 static int shm_last_free
, shm_nused
, shm_committed
, shmalloced
, shm_nattch
;
119 static struct shmid_ds
*shmsegs
;
122 /* vm_offset_t kva; */
123 vm_object_t shm_object
;
126 struct shmmap_state
{
131 static void shm_deallocate_segment(struct shmid_ds
*);
132 static int shm_find_segment_by_key(key_t
);
133 static struct shmid_ds
*shm_find_segment_by_shmid(int);
134 static struct shmid_ds
*shm_find_segment_by_shmidx(int);
135 static int shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*);
136 static void shmrealloc(void);
142 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
145 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
157 #define SHMALL (SHMMAXPGS)
160 struct shminfo shminfo
= {
169 static int shm_use_phys
;
171 static long shm_use_phys
;
172 static long shm_allow_removed
;
173 #endif /* __CYGWIN__ */
176 struct shm_info shm_info
;
177 #endif /* __CYGWIN__ */
180 SYSCTL_DECL(_kern_ipc
);
181 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0, "");
182 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0, "");
183 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RDTUN
, &shminfo
.shmmni
, 0, "");
184 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RDTUN
, &shminfo
.shmseg
, 0, "");
185 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0, "");
186 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
,
187 &shm_use_phys
, 0, "");
188 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
189 &shm_allow_removed
, 0, "");
190 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, shmsegs
, CTLFLAG_RD
,
191 NULL
, 0, sysctl_shmsegs
, "", "");
192 #endif /* __CYGWIN__ */
195 shm_find_segment_by_key(key_t key
)
199 for (i
= 0; i
< shmalloced
; i
++)
200 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
201 shmsegs
[i
].shm_perm
.key
== key
)
206 static struct shmid_ds
*
207 shm_find_segment_by_shmid(int shmid
)
210 struct shmid_ds
*shmseg
;
212 segnum
= IPCID_TO_IX(shmid
);
213 if (segnum
< 0 || segnum
>= shmalloced
)
215 shmseg
= &shmsegs
[segnum
];
216 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
217 (!shm_allow_removed
&&
218 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
219 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
224 static struct shmid_ds
*
225 shm_find_segment_by_shmidx(int segnum
)
227 struct shmid_ds
*shmseg
;
229 if (segnum
< 0 || segnum
>= shmalloced
)
231 shmseg
= &shmsegs
[segnum
];
232 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
233 (!shm_allow_removed
&&
234 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0))
240 shm_deallocate_segment(struct shmid_ds
*shmseg
)
242 struct shm_handle
*shm_handle
;
247 shm_handle
= shmseg
->shm_internal
;
248 vm_object_deallocate(shm_handle
->shm_object
);
249 sys_free(shm_handle
, M_SHM
);
250 shmseg
->shm_internal
= NULL
;
251 size
= round_page(shmseg
->shm_segsz
);
252 shm_committed
-= btoc(size
);
254 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
258 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
260 struct shmid_ds
*shmseg
;
266 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
267 shmseg
= &shmsegs
[segnum
];
268 size
= round_page(shmseg
->shm_segsz
);
269 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
270 if (result
!= KERN_SUCCESS
)
272 shmmap_s
->shmid
= -1;
273 shmseg
->shm_dtime
= time (NULL
);
275 if ((--shmseg
->shm_nattch
<= 0) &&
276 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
277 shm_deallocate_segment(shmseg
);
278 shm_last_free
= segnum
;
283 #ifndef _SYS_SYSPROTO_H_
293 shmdt(struct thread
*td
, struct shmdt_args
*uap
)
295 struct proc
*p
= td
->td_proc
;
296 struct shmmap_state
*shmmap_s
;
300 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
303 shmmap_s
= p
->p_vmspace
->vm_shm
;
304 if (shmmap_s
== NULL
) {
308 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
309 if (shmmap_s
->shmid
!= -1 &&
310 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
) {
314 if (i
== shminfo
.shmseg
) {
318 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
324 #ifndef _SYS_SYSPROTO_H_
336 kern_shmat(struct thread
*td
, int shmid
, const void *shmaddr
, int shmflg
)
338 struct proc
*p
= td
->td_proc
;
340 struct shmid_ds
*shmseg
;
341 struct shmmap_state
*shmmap_s
= NULL
;
343 struct shm_handle
*shm_handle
;
345 vm_offset_t attach_va
;
353 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
356 shmmap_s
= p
->p_vmspace
->vm_shm
;
357 if (shmmap_s
== NULL
) {
358 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
359 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
360 for (i
= 0; i
< shminfo
.shmseg
; i
++)
361 shmmap_s
[i
].shmid
= -1;
362 p
->p_vmspace
->vm_shm
= shmmap_s
;
364 shmseg
= shm_find_segment_by_shmid(shmid
);
365 if (shmseg
== NULL
) {
369 error
= ipcperm(td
, &shmseg
->shm_perm
,
370 (shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
373 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
374 if (shmmap_s
->shmid
== -1)
378 if (i
>= shminfo
.shmseg
) {
382 size
= round_page(shmseg
->shm_segsz
);
383 #ifdef VM_PROT_READ_IS_EXEC
384 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
388 if ((shmflg
& SHM_RDONLY
) == 0)
389 prot
|= VM_PROT_WRITE
;
390 flags
= MAP_ANON
| MAP_SHARED
;
391 debug_printf ("shmaddr: %x, shmflg: %x", shmaddr
, shmflg
);
393 /* The alignment checks have already been made in the Cygwin DLL
394 and shmat's only job is to keep record of the attached mem.
395 These checks break shm on 9x since MapViewOfFileEx apparently
396 returns memory which isn't aligned to SHMLBA. Go figure! */
397 attach_va
= (vm_offset_t
)shmaddr
;
401 if (shmflg
& SHM_RND
) {
402 attach_va
= (vm_offset_t
)shmaddr
& ~(SHMLBA
-1);
403 } else if (((vm_offset_t
)shmaddr
& (SHMLBA
-1)) == 0) {
404 attach_va
= (vm_offset_t
)shmaddr
;
411 * This is just a hint to vm_map_find() about where to
414 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
415 + maxtsiz
+ maxdsiz
);
418 shm_handle
= shmseg
->shm_internal
;
419 vm_object_reference(shm_handle
->shm_object
);
420 rv
= vm_map_find(&p
->p_vmspace
->vm_map
, shm_handle
->shm_object
,
421 0, &attach_va
, size
, (flags
& MAP_FIXED
)?0:1, prot
, prot
, 0);
422 if (rv
!= KERN_SUCCESS
) {
426 vm_map_inherit(&p
->p_vmspace
->vm_map
,
427 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
430 shmmap_s
->va
= attach_va
;
431 shmmap_s
->shmid
= shmid
;
432 shmseg
->shm_lpid
= p
->p_pid
;
433 shmseg
->shm_atime
= time (NULL
);
434 shmseg
->shm_nattch
++;
436 td
->td_retval
[0] = attach_va
;
443 shmat(struct thread
*td
, struct shmat_args
*uap
)
445 return kern_shmat(td
, uap
->shmid
, uap
->shmaddr
, uap
->shmflg
);
450 struct ipc_perm shm_perm
; /* operation perms */
451 int shm_segsz
; /* size of segment (bytes) */
452 u_short shm_cpid
; /* pid, creator */
453 u_short shm_lpid
; /* pid, last operation */
454 short shm_nattch
; /* no. of current attaches */
455 time_t shm_atime
; /* last attach time */
456 time_t shm_dtime
; /* last detach time */
457 time_t shm_ctime
; /* last change time */
458 void *shm_handle
; /* internal handle for shm segment */
461 struct oshmctl_args
{
464 struct oshmid_ds
*ubuf
;
471 oshmctl(struct thread
*td
, struct oshmctl_args
*uap
)
475 struct shmid_ds
*shmseg
;
476 struct oshmid_ds outbuf
;
478 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
481 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
482 if (shmseg
== NULL
) {
488 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
491 outbuf
.shm_perm
= shmseg
->shm_perm
;
492 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
493 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
494 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
495 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
496 outbuf
.shm_atime
= shmseg
->shm_atime
;
497 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
498 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
499 outbuf
.shm_handle
= shmseg
->shm_internal
;
500 error
= copyout(&outbuf
, uap
->ubuf
, sizeof(outbuf
));
505 /* XXX casting to (sy_call_t *) is bogus, as usual. */
506 error
= ((sy_call_t
*)shmctl
)(td
, uap
);
516 #endif /* __CYGWIN__ */
518 #ifndef _SYS_SYSPROTO_H_
522 struct shmid_ds
*buf
;
530 kern_shmctl(struct thread
*td
, int shmid
, int cmd
, void *buf
, size_t *bufsz
)
533 struct shmid_ds
*shmseg
;
535 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
541 memcpy(buf
, &shminfo
, sizeof(shminfo
));
543 *bufsz
= sizeof(shminfo
);
544 td
->td_retval
[0] = shmalloced
;
547 struct shm_info shm_info
;
548 shm_info
.used_ids
= shm_nused
;
549 shm_info
.shm_tot
= shm_committed
* PAGE_SIZE
;
551 shm_info
.shm_atts
= shm_nattch
;
553 shm_info
.shm_rss
= 0; /*XXX where to get from ? */
554 shm_info
.shm_swp
= 0; /*XXX where to get from ? */
555 shm_info
.swap_attempts
= 0; /*XXX where to get from ? */
556 shm_info
.swap_successes
= 0; /*XXX where to get from ? */
557 #endif /* __CYGWIN__ */
558 memcpy(buf
, &shm_info
, sizeof(shm_info
));
560 *bufsz
= sizeof(shm_info
);
561 td
->td_retval
[0] = shmalloced
;
566 shmseg
= shm_find_segment_by_shmidx(shmid
);
568 shmseg
= shm_find_segment_by_shmid(shmid
);
569 if (shmseg
== NULL
) {
576 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
579 memcpy(buf
, shmseg
, sizeof(struct shmid_ds
));
581 *bufsz
= sizeof(struct shmid_ds
);
583 td
->td_retval
[0] = IXSEQ_TO_IPCID(shmid
, shmseg
->shm_perm
);
586 struct shmid_ds
*shmid
;
588 shmid
= (struct shmid_ds
*)buf
;
589 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
592 shmseg
->shm_perm
.uid
= shmid
->shm_perm
.uid
;
593 shmseg
->shm_perm
.gid
= shmid
->shm_perm
.gid
;
594 shmseg
->shm_perm
.mode
=
595 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
596 (shmid
->shm_perm
.mode
& ACCESSPERMS
);
597 shmseg
->shm_ctime
= time (NULL
);
601 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
604 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
605 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
606 if (shmseg
->shm_nattch
<= 0) {
607 shm_deallocate_segment(shmseg
);
608 shm_last_free
= IPCID_TO_IX(shmid
);
625 shmctl(struct thread
*td
, struct shmctl_args
*uap
)
631 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
632 if (uap
->cmd
== IPC_SET
) {
633 if ((error
= copyin(uap
->buf
, &buf
, sizeof(struct shmid_ds
))))
637 if (uap
->cmd
== IPC_INFO
&& uap
->shmid
> 0) {
638 /* Can't use the default kern_shmctl interface. */
639 int shmid
= uap
->shmid
;
640 if (shmid
> shminfo
.shmmni
)
641 shmid
= shminfo
.shmmni
;
642 error
= copyout(shmsegs
, uap
->buf
,
643 shmid
* sizeof(struct shmid_ds
));
644 td
->td_retval
[0] = error
? -1 : 0;
647 #endif /* __CYGWIN__ */
649 error
= kern_shmctl(td
, uap
->shmid
, uap
->cmd
, (void *)&buf
, &bufsz
);
653 /* Cases in which we need to copyout */
659 error
= copyout(&buf
, uap
->buf
, bufsz
);
665 /* Invalidate the return value */
666 td
->td_retval
[0] = -1;
672 #ifndef _SYS_SYSPROTO_H_
681 shmget_existing(struct thread
*td
, struct shmget_args
*uap
, int mode
, int segnum
)
683 struct shmid_ds
*shmseg
;
686 shmseg
= &shmsegs
[segnum
];
687 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
689 * This segment is in the process of being allocated. Wait
690 * until it's done, and look the key up again (in case the
691 * allocation failed or it was freed).
693 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
694 error
= tsleep(shmseg
, PLOCK
| PCATCH
, "shmget", 0);
699 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
701 error
= ipcperm(td
, &shmseg
->shm_perm
, mode
);
704 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
706 td
->td_retval
[0] = IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
709 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
710 #endif /* __CYGWIN__ */
715 shmget_allocate_segment(struct thread
*td
, struct shmget_args
*uap
, int mode
)
717 int i
, segnum
, shmid
, size
;
719 struct ucred
*cred
= td
->td_ucred
;
720 #endif /* __CYGWIN__ */
721 struct shmid_ds
*shmseg
;
722 struct shm_handle
*shm_handle
;
726 if (uap
->size
< (unsigned long) shminfo
.shmmin
||
727 uap
->size
> (unsigned long) shminfo
.shmmax
)
729 if (shm_nused
>= shminfo
.shmmni
) /* Any shmids left? */
731 size
= round_page(uap
->size
);
732 if (shm_committed
+ btoc(size
) > shminfo
.shmall
)
734 if (shm_last_free
< 0) {
735 shmrealloc(); /* Maybe expand the shmsegs[] array. */
736 for (i
= 0; i
< shmalloced
; i
++)
737 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
743 segnum
= shm_last_free
;
746 shmseg
= &shmsegs
[segnum
];
748 * In case we sleep in malloc(), mark the segment present but deleted
749 * so that noone else tries to create the same key.
751 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
752 shmseg
->shm_perm
.key
= uap
->key
;
753 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
754 shm_handle
= (struct shm_handle
*)
755 sys_malloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
756 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
759 * We make sure that we have allocated a pager before we need
763 shm_handle
->shm_object
=
764 vm_pager_allocate(OBJT_PHYS
, 0, size
, VM_PROT_DEFAULT
, 0);
766 shm_handle
->shm_object
=
767 vm_pager_allocate(OBJT_SWAP
, 0, size
, VM_PROT_DEFAULT
, 0);
769 VM_OBJECT_LOCK(shm_handle
->shm_object
);
770 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
771 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
772 VM_OBJECT_UNLOCK(shm_handle
->shm_object
);
774 shmseg
->shm_internal
= shm_handle
;
776 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= td
->ipcblk
->uid
;
777 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= td
->ipcblk
->gid
;
779 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
780 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
781 #endif /* __CYGWIN__ */
782 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
783 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
784 shmseg
->shm_segsz
= uap
->size
;
785 shmseg
->shm_cpid
= td
->td_proc
->p_pid
;
786 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
787 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
788 shmseg
->shm_ctime
= time (NULL
);
789 shm_committed
+= btoc(size
);
791 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
793 * Somebody else wanted this key while we were asleep. Wake
796 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
799 td
->td_retval
[0] = shmid
;
802 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
803 #endif /* __CYGWIN__ */
811 shmget(struct thread
*td
, struct shmget_args
*uap
)
816 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
819 mode
= uap
->shmflg
& ACCESSPERMS
;
820 if (uap
->key
!= IPC_PRIVATE
) {
823 if (uap
->shmflg
& IPC_KEY_IS_SHMID
)
824 segnum
= shm_find_segment_by_shmid ((int) uap
->key
) ?
825 IPCID_TO_IX((int) uap
->key
) : -1;
828 segnum
= shm_find_segment_by_key(uap
->key
);
830 error
= shmget_existing(td
, uap
, mode
, segnum
);
835 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
840 error
= shmget_allocate_segment(td
, uap
, mode
);
844 ipcexit_creat_hookthread (td
);
846 td
->td_retval
[0] = -1;
859 /* XXX actually varargs. */
860 struct shmsys_args
/* {
869 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
871 if (uap
->which
< 0 ||
872 uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
875 error
= (*shmcalls
[uap
->which
])(td
, &uap
->a2
);
879 #endif /* __CYGWIN__ */
882 shmfork_myhook(struct proc
*p1
, struct proc
*p2
)
884 struct shmmap_state
*shmmap_s
;
888 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
889 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
890 bcopy(p1
->p_vmspace
->vm_shm
, shmmap_s
, size
);
891 p2
->p_vmspace
->vm_shm
= shmmap_s
;
892 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
893 if (shmmap_s
->shmid
!= -1) {
895 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
900 int cygwin_shmfork_myhook (struct thread
*td
, struct proc
*parent
)
902 ipcexit_creat_hookthread (td
);
903 ipc_p_vmspace (td
->ipcblk
);
904 ipc_p_vmspace (parent
);
905 shmfork_myhook (parent
, td
->ipcblk
);
911 shmexit_myhook(struct vmspace
*vm
)
913 struct shmmap_state
*base
, *shm
;
918 if ((base
= vm
->vm_shm
) != NULL
) {
920 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
921 if (shm
->shmid
!= -1)
922 shm_delete_mapping(vm
, shm
);
924 sys_free(base
, M_SHM
);
932 struct shmid_ds
*newsegs
;
934 if (shmalloced
>= shminfo
.shmmni
)
937 newsegs
= (struct shmid_ds
*) sys_malloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
940 for (i
= 0; i
< shmalloced
; i
++)
941 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
942 for (; i
< shminfo
.shmmni
; i
++) {
943 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
944 shmsegs
[i
].shm_perm
.seq
= 0;
946 sys_free(shmsegs
, M_SHM
);
948 shmalloced
= shminfo
.shmmni
;
956 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
957 for (i
= PAGE_SIZE
; i
> 0; i
--) {
958 shminfo
.shmmax
= shminfo
.shmall
* i
;
959 if (shminfo
.shmmax
>= shminfo
.shmall
)
962 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo
.shmmin
);
963 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo
.shmmni
);
964 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo
.shmseg
);
965 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys
);
967 shmalloced
= shminfo
.shmmni
;
968 shmsegs
= (struct shmid_ds
*) sys_malloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
970 panic("cannot allocate initial memory for sysvshm");
971 for (i
= 0; i
< shmalloced
; i
++) {
972 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
973 shmsegs
[i
].shm_perm
.seq
= 0;
979 shmexit_hook
= &shmexit_myhook
;
980 shmfork_hook
= &shmfork_myhook
;
981 #endif /* __CYGWIN__ */
991 sys_free(shmsegs
, M_SHM
);
995 #endif /* __CYGWIN__ */
1001 sysctl_shmsegs(SYSCTL_HANDLER_ARGS
)
1004 return (SYSCTL_OUT(req
, shmsegs
, shmalloced
* sizeof(shmsegs
[0])));
1008 sysvshm_modload(struct module
*module
, int cmd
, void *arg
)
1017 error
= shmunload();
1028 static moduledata_t sysvshm_mod
= {
1034 SYSCALL_MODULE_HELPER(shmsys
);
1035 SYSCALL_MODULE_HELPER(shmat
);
1036 SYSCALL_MODULE_HELPER(shmctl
);
1037 SYSCALL_MODULE_HELPER(shmdt
);
1038 SYSCALL_MODULE_HELPER(shmget
);
1040 DECLARE_MODULE(sysvshm
, sysvshm_mod
,
1041 SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
);
1042 MODULE_VERSION(sysvshm
, 1);
1043 #endif /* __CYGWIN__ */
1044 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.083243 seconds and 5 git commands to generate.