]>
sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * This file is heavily changed to become part of Cygwin's cygserver.
36 #ifdef __OUTSIDE_CYGWIN__
38 #include <sys/cdefs.h>
40 #define __FBSDID(s) const char version[] = (s)
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
45 #define __BSD_VISIBLE 1
46 #include <sys/param.h>
52 #include <sys/sysproto.h>
57 #include "cygserver.h"
59 #include "cygserver_ipc.h"
63 #define PAGE_SIZE (getpagesize ())
66 #define PAGE_MASK (PAGE_SIZE - 1)
68 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
69 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
70 #define ACCESSPERMS (0777)
71 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
72 #define KERN_SUCCESS 0
73 #define VM_PROT_READ PROT_READ
74 #define VM_PROT_WRITE PROT_WRITE
75 #define VM_INHERIT_SHARE 0
78 #define VM_PROT_DEFAULT 0
79 #define VM_OBJECT_LOCK(a)
80 #define vm_object_clear_flag(a,b)
81 #define vm_object_set_flag(a,b)
82 #define VM_OBJECT_UNLOCK(a)
83 #define vm_map_remove(a,b,c) KERN_SUCCESS
84 typedef int vm_prot_t
;
85 #endif /* __CYGWIN__ */
88 static MALLOC_DEFINE(M_SHM
, "shm", "SVID compatible shared memory segments");
91 static int oshmctl(struct thread
*td
, struct oshmctl_args
*uap
);
92 #endif /* __CYGWIN__ */
94 static int shmget_allocate_segment(struct thread
*td
,
95 struct shmget_args
*uap
, int mode
);
96 static int shmget_existing(struct thread
*td
, struct shmget_args
*uap
,
97 int mode
, int segnum
);
100 /* XXX casting to (sy_call_t *) is bogus, as usual. */
101 static sy_call_t
*shmcalls
[] = {
102 (sy_call_t
*)shmat
, (sy_call_t
*)oshmctl
,
103 (sy_call_t
*)shmdt
, (sy_call_t
*)shmget
,
106 #endif /* __CYGWIN__ */
108 #define SHMSEG_FREE 0x0200
109 #define SHMSEG_REMOVED 0x0400
110 #define SHMSEG_ALLOCATED 0x0800
111 #define SHMSEG_WANTED 0x1000
113 static int shm_last_free
, shm_nused
, shm_committed
, shmalloced
, shm_nattch
;
114 static struct shmid_ds
*shmsegs
;
117 /* vm_offset_t kva; */
118 vm_object_t shm_object
;
121 struct shmmap_state
{
126 static void shm_deallocate_segment(struct shmid_ds
*);
127 static int shm_find_segment_by_key(key_t
);
128 static struct shmid_ds
*shm_find_segment_by_shmid(int);
129 static struct shmid_ds
*shm_find_segment_by_shmidx(int);
130 static int shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*);
131 static void shmrealloc(void);
137 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
140 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
152 #define SHMALL (SHMMAXPGS)
155 struct shminfo shminfo
= {
164 static int shm_use_phys
;
166 static long shm_use_phys
;
167 static long shm_allow_removed
;
168 #endif /* __CYGWIN__ */
171 struct shm_info shm_info
;
172 #endif /* __CYGWIN__ */
175 SYSCTL_DECL(_kern_ipc
);
176 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmax
, CTLFLAG_RW
, &shminfo
.shmmax
, 0, "");
177 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmin
, CTLFLAG_RW
, &shminfo
.shmmin
, 0, "");
178 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmmni
, CTLFLAG_RDTUN
, &shminfo
.shmmni
, 0, "");
179 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmseg
, CTLFLAG_RDTUN
, &shminfo
.shmseg
, 0, "");
180 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shmall
, CTLFLAG_RW
, &shminfo
.shmall
, 0, "");
181 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_use_phys
, CTLFLAG_RW
,
182 &shm_use_phys
, 0, "");
183 SYSCTL_INT(_kern_ipc
, OID_AUTO
, shm_allow_removed
, CTLFLAG_RW
,
184 &shm_allow_removed
, 0, "");
185 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, shmsegs
, CTLFLAG_RD
,
186 NULL
, 0, sysctl_shmsegs
, "", "");
187 #endif /* __CYGWIN__ */
190 shm_find_segment_by_key(key_t key
)
194 for (i
= 0; i
< shmalloced
; i
++)
195 if ((shmsegs
[i
].shm_perm
.mode
& SHMSEG_ALLOCATED
) &&
196 shmsegs
[i
].shm_perm
.key
== key
)
201 static struct shmid_ds
*
202 shm_find_segment_by_shmid(int shmid
)
205 struct shmid_ds
*shmseg
;
207 segnum
= IPCID_TO_IX(shmid
);
208 if (segnum
< 0 || segnum
>= shmalloced
)
210 shmseg
= &shmsegs
[segnum
];
211 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
212 (!shm_allow_removed
&&
213 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0) ||
214 shmseg
->shm_perm
.seq
!= IPCID_TO_SEQ(shmid
))
219 static struct shmid_ds
*
220 shm_find_segment_by_shmidx(int segnum
)
222 struct shmid_ds
*shmseg
;
224 if (segnum
< 0 || segnum
>= shmalloced
)
226 shmseg
= &shmsegs
[segnum
];
227 if ((shmseg
->shm_perm
.mode
& SHMSEG_ALLOCATED
) == 0 ||
228 (!shm_allow_removed
&&
229 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) != 0))
235 shm_deallocate_segment(struct shmid_ds
*shmseg
)
237 struct shm_handle
*shm_handle
;
242 shm_handle
= shmseg
->shm_internal
;
243 vm_object_deallocate(shm_handle
->shm_object
);
244 sys_free(shm_handle
, M_SHM
);
245 shmseg
->shm_internal
= NULL
;
246 size
= round_page(shmseg
->shm_segsz
);
247 shm_committed
-= btoc(size
);
249 shmseg
->shm_perm
.mode
= SHMSEG_FREE
;
253 shm_delete_mapping(struct vmspace
*vm
, struct shmmap_state
*shmmap_s
)
255 struct shmid_ds
*shmseg
;
261 segnum
= IPCID_TO_IX(shmmap_s
->shmid
);
262 shmseg
= &shmsegs
[segnum
];
263 size
= round_page(shmseg
->shm_segsz
);
264 result
= vm_map_remove(&vm
->vm_map
, shmmap_s
->va
, shmmap_s
->va
+ size
);
265 if (result
!= KERN_SUCCESS
)
267 shmmap_s
->shmid
= -1;
268 shmseg
->shm_dtime
= time (NULL
);
270 if ((--shmseg
->shm_nattch
<= 0) &&
271 (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
)) {
272 shm_deallocate_segment(shmseg
);
273 shm_last_free
= segnum
;
278 #ifndef _SYS_SYSPROTO_H_
288 shmdt(struct thread
*td
, struct shmdt_args
*uap
)
290 struct proc
*p
= td
->td_proc
;
291 struct shmmap_state
*shmmap_s
;
295 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
298 shmmap_s
= p
->p_vmspace
->vm_shm
;
299 if (shmmap_s
== NULL
) {
303 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++) {
304 if (shmmap_s
->shmid
!= -1 &&
305 shmmap_s
->va
== (vm_offset_t
)uap
->shmaddr
) {
309 if (i
== shminfo
.shmseg
) {
313 error
= shm_delete_mapping(p
->p_vmspace
, shmmap_s
);
319 #ifndef _SYS_SYSPROTO_H_
331 kern_shmat(struct thread
*td
, int shmid
, const void *shmaddr
, int shmflg
)
333 struct proc
*p
= td
->td_proc
;
335 struct shmid_ds
*shmseg
;
336 struct shmmap_state
*shmmap_s
= NULL
;
338 struct shm_handle
*shm_handle
;
340 vm_offset_t attach_va
;
348 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
351 shmmap_s
= p
->p_vmspace
->vm_shm
;
352 if (shmmap_s
== NULL
) {
353 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
354 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
355 for (i
= 0; i
< shminfo
.shmseg
; i
++)
356 shmmap_s
[i
].shmid
= -1;
357 p
->p_vmspace
->vm_shm
= shmmap_s
;
359 shmseg
= shm_find_segment_by_shmid(shmid
);
360 if (shmseg
== NULL
) {
364 error
= ipcperm(td
, &shmseg
->shm_perm
,
365 (shmflg
& SHM_RDONLY
) ? IPC_R
: IPC_R
|IPC_W
);
368 for (i
= 0; i
< shminfo
.shmseg
; i
++) {
369 if (shmmap_s
->shmid
== -1)
373 if (i
>= shminfo
.shmseg
) {
377 size
= round_page(shmseg
->shm_segsz
);
378 #ifdef VM_PROT_READ_IS_EXEC
379 prot
= VM_PROT_READ
| VM_PROT_EXECUTE
;
383 if ((shmflg
& SHM_RDONLY
) == 0)
384 prot
|= VM_PROT_WRITE
;
385 flags
= MAP_ANON
| MAP_SHARED
;
386 debug_printf ("shmaddr: %x, shmflg: %x", shmaddr
, shmflg
);
388 /* The alignment checks have already been made in the Cygwin DLL
389 and shmat's only job is to keep record of the attached mem.
390 These checks break shm on 9x since MapViewOfFileEx apparently
391 returns memory which isn't aligned to SHMLBA. Go figure! */
392 attach_va
= (vm_offset_t
)shmaddr
;
396 if (shmflg
& SHM_RND
) {
397 attach_va
= (vm_offset_t
)shmaddr
& ~(SHMLBA
-1);
398 } else if (((vm_offset_t
)shmaddr
& (SHMLBA
-1)) == 0) {
399 attach_va
= (vm_offset_t
)shmaddr
;
406 * This is just a hint to vm_map_find() about where to
409 attach_va
= round_page((vm_offset_t
)p
->p_vmspace
->vm_taddr
410 + maxtsiz
+ maxdsiz
);
413 shm_handle
= shmseg
->shm_internal
;
414 vm_object_reference(shm_handle
->shm_object
);
415 rv
= vm_map_find(&p
->p_vmspace
->vm_map
, shm_handle
->shm_object
,
416 0, &attach_va
, size
, (flags
& MAP_FIXED
)?0:1, prot
, prot
, 0);
417 if (rv
!= KERN_SUCCESS
) {
421 vm_map_inherit(&p
->p_vmspace
->vm_map
,
422 attach_va
, attach_va
+ size
, VM_INHERIT_SHARE
);
425 shmmap_s
->va
= attach_va
;
426 shmmap_s
->shmid
= shmid
;
427 shmseg
->shm_lpid
= p
->p_pid
;
428 shmseg
->shm_atime
= time (NULL
);
429 shmseg
->shm_nattch
++;
431 td
->td_retval
[0] = attach_va
;
438 shmat(struct thread
*td
, struct shmat_args
*uap
)
440 return kern_shmat(td
, uap
->shmid
, uap
->shmaddr
, uap
->shmflg
);
445 struct ipc_perm shm_perm
; /* operation perms */
446 int shm_segsz
; /* size of segment (bytes) */
447 u_short shm_cpid
; /* pid, creator */
448 u_short shm_lpid
; /* pid, last operation */
449 short shm_nattch
; /* no. of current attaches */
450 time_t shm_atime
; /* last attach time */
451 time_t shm_dtime
; /* last detach time */
452 time_t shm_ctime
; /* last change time */
453 void *shm_handle
; /* internal handle for shm segment */
456 struct oshmctl_args
{
459 struct oshmid_ds
*ubuf
;
466 oshmctl(struct thread
*td
, struct oshmctl_args
*uap
)
470 struct shmid_ds
*shmseg
;
471 struct oshmid_ds outbuf
;
473 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
476 shmseg
= shm_find_segment_by_shmid(uap
->shmid
);
477 if (shmseg
== NULL
) {
483 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
486 outbuf
.shm_perm
= shmseg
->shm_perm
;
487 outbuf
.shm_segsz
= shmseg
->shm_segsz
;
488 outbuf
.shm_cpid
= shmseg
->shm_cpid
;
489 outbuf
.shm_lpid
= shmseg
->shm_lpid
;
490 outbuf
.shm_nattch
= shmseg
->shm_nattch
;
491 outbuf
.shm_atime
= shmseg
->shm_atime
;
492 outbuf
.shm_dtime
= shmseg
->shm_dtime
;
493 outbuf
.shm_ctime
= shmseg
->shm_ctime
;
494 outbuf
.shm_handle
= shmseg
->shm_internal
;
495 error
= copyout(&outbuf
, uap
->ubuf
, sizeof(outbuf
));
500 /* XXX casting to (sy_call_t *) is bogus, as usual. */
501 error
= ((sy_call_t
*)shmctl
)(td
, uap
);
511 #endif /* __CYGWIN__ */
513 #ifndef _SYS_SYSPROTO_H_
517 struct shmid_ds
*buf
;
525 kern_shmctl(struct thread
*td
, int shmid
, int cmd
, void *buf
, size_t *bufsz
)
528 struct shmid_ds
*shmseg
;
530 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
536 memcpy(buf
, &shminfo
, sizeof(shminfo
));
538 *bufsz
= sizeof(shminfo
);
539 td
->td_retval
[0] = shmalloced
;
542 struct shm_info shm_info
;
543 shm_info
.used_ids
= shm_nused
;
544 shm_info
.shm_tot
= shm_committed
* PAGE_SIZE
;
546 shm_info
.shm_atts
= shm_nattch
;
548 shm_info
.shm_rss
= 0; /*XXX where to get from ? */
549 shm_info
.shm_swp
= 0; /*XXX where to get from ? */
550 shm_info
.swap_attempts
= 0; /*XXX where to get from ? */
551 shm_info
.swap_successes
= 0; /*XXX where to get from ? */
552 #endif /* __CYGWIN__ */
553 memcpy(buf
, &shm_info
, sizeof(shm_info
));
555 *bufsz
= sizeof(shm_info
);
556 td
->td_retval
[0] = shmalloced
;
561 shmseg
= shm_find_segment_by_shmidx(shmid
);
563 shmseg
= shm_find_segment_by_shmid(shmid
);
564 if (shmseg
== NULL
) {
571 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_R
);
574 memcpy(buf
, shmseg
, sizeof(struct shmid_ds
));
576 *bufsz
= sizeof(struct shmid_ds
);
578 td
->td_retval
[0] = IXSEQ_TO_IPCID(shmid
, shmseg
->shm_perm
);
581 struct shmid_ds
*shmid
;
583 shmid
= (struct shmid_ds
*)buf
;
584 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
587 shmseg
->shm_perm
.uid
= shmid
->shm_perm
.uid
;
588 shmseg
->shm_perm
.gid
= shmid
->shm_perm
.gid
;
589 shmseg
->shm_perm
.mode
=
590 (shmseg
->shm_perm
.mode
& ~ACCESSPERMS
) |
591 (shmid
->shm_perm
.mode
& ACCESSPERMS
);
592 shmseg
->shm_ctime
= time (NULL
);
596 error
= ipcperm(td
, &shmseg
->shm_perm
, IPC_M
);
599 shmseg
->shm_perm
.key
= IPC_PRIVATE
;
600 shmseg
->shm_perm
.mode
|= SHMSEG_REMOVED
;
601 if (shmseg
->shm_nattch
<= 0) {
602 shm_deallocate_segment(shmseg
);
603 shm_last_free
= IPCID_TO_IX(shmid
);
620 shmctl(struct thread
*td
, struct shmctl_args
*uap
)
626 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
627 if (uap
->cmd
== IPC_SET
) {
628 if ((error
= copyin(uap
->buf
, &buf
, sizeof(struct shmid_ds
))))
632 if (uap
->cmd
== IPC_INFO
&& uap
->shmid
> 0) {
633 /* Can't use the default kern_shmctl interface. */
634 int shmid
= uap
->shmid
;
635 if (shmid
> shminfo
.shmmni
)
636 shmid
= shminfo
.shmmni
;
637 error
= copyout(shmsegs
, uap
->buf
,
638 shmid
* sizeof(struct shmid_ds
));
639 td
->td_retval
[0] = error
? -1 : 0;
642 #endif /* __CYGWIN__ */
644 error
= kern_shmctl(td
, uap
->shmid
, uap
->cmd
, (void *)&buf
, &bufsz
);
648 /* Cases in which we need to copyout */
654 error
= copyout(&buf
, uap
->buf
, bufsz
);
660 /* Invalidate the return value */
661 td
->td_retval
[0] = -1;
667 #ifndef _SYS_SYSPROTO_H_
676 shmget_existing(struct thread
*td
, struct shmget_args
*uap
, int mode
, int segnum
)
678 struct shmid_ds
*shmseg
;
681 shmseg
= &shmsegs
[segnum
];
682 if (shmseg
->shm_perm
.mode
& SHMSEG_REMOVED
) {
684 * This segment is in the process of being allocated. Wait
685 * until it's done, and look the key up again (in case the
686 * allocation failed or it was freed).
688 shmseg
->shm_perm
.mode
|= SHMSEG_WANTED
;
689 error
= tsleep(shmseg
, PLOCK
| PCATCH
, "shmget", 0);
694 if ((uap
->shmflg
& (IPC_CREAT
| IPC_EXCL
)) == (IPC_CREAT
| IPC_EXCL
))
696 error
= ipcperm(td
, &shmseg
->shm_perm
, mode
);
699 if (uap
->size
&& uap
->size
> shmseg
->shm_segsz
)
701 td
->td_retval
[0] = IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
704 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
705 #endif /* __CYGWIN__ */
710 shmget_allocate_segment(struct thread
*td
, struct shmget_args
*uap
, int mode
)
712 int i
, segnum
, shmid
, size
;
714 struct ucred
*cred
= td
->td_ucred
;
715 #endif /* __CYGWIN__ */
716 struct shmid_ds
*shmseg
;
717 struct shm_handle
*shm_handle
;
721 if (uap
->size
< (unsigned long) shminfo
.shmmin
||
722 uap
->size
> (unsigned long) shminfo
.shmmax
)
724 if (shm_nused
>= shminfo
.shmmni
) /* Any shmids left? */
726 size
= round_page(uap
->size
);
727 if (shm_committed
+ btoc(size
) > (unsigned long) shminfo
.shmall
)
729 if (shm_last_free
< 0) {
730 shmrealloc(); /* Maybe expand the shmsegs[] array. */
731 for (i
= 0; i
< shmalloced
; i
++)
732 if (shmsegs
[i
].shm_perm
.mode
& SHMSEG_FREE
)
738 segnum
= shm_last_free
;
741 shmseg
= &shmsegs
[segnum
];
743 * In case we sleep in malloc(), mark the segment present but deleted
744 * so that noone else tries to create the same key.
746 shmseg
->shm_perm
.mode
= SHMSEG_ALLOCATED
| SHMSEG_REMOVED
;
747 shmseg
->shm_perm
.key
= uap
->key
;
748 shmseg
->shm_perm
.seq
= (shmseg
->shm_perm
.seq
+ 1) & 0x7fff;
749 shm_handle
= (struct shm_handle
*)
750 sys_malloc(sizeof(struct shm_handle
), M_SHM
, M_WAITOK
);
751 shmid
= IXSEQ_TO_IPCID(segnum
, shmseg
->shm_perm
);
754 * We make sure that we have allocated a pager before we need
758 shm_handle
->shm_object
=
759 vm_pager_allocate(OBJT_PHYS
, 0, size
, VM_PROT_DEFAULT
, 0);
761 shm_handle
->shm_object
=
762 vm_pager_allocate(OBJT_SWAP
, 0, size
, VM_PROT_DEFAULT
, 0);
764 VM_OBJECT_LOCK(shm_handle
->shm_object
);
765 vm_object_clear_flag(shm_handle
->shm_object
, OBJ_ONEMAPPING
);
766 vm_object_set_flag(shm_handle
->shm_object
, OBJ_NOSPLIT
);
767 VM_OBJECT_UNLOCK(shm_handle
->shm_object
);
769 shmseg
->shm_internal
= shm_handle
;
771 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= td
->ipcblk
->uid
;
772 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= td
->ipcblk
->gid
;
774 shmseg
->shm_perm
.cuid
= shmseg
->shm_perm
.uid
= cred
->cr_uid
;
775 shmseg
->shm_perm
.cgid
= shmseg
->shm_perm
.gid
= cred
->cr_gid
;
776 #endif /* __CYGWIN__ */
777 shmseg
->shm_perm
.mode
= (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) |
778 (mode
& ACCESSPERMS
) | SHMSEG_ALLOCATED
;
779 shmseg
->shm_segsz
= uap
->size
;
780 shmseg
->shm_cpid
= td
->td_proc
->p_pid
;
781 shmseg
->shm_lpid
= shmseg
->shm_nattch
= 0;
782 shmseg
->shm_atime
= shmseg
->shm_dtime
= 0;
783 shmseg
->shm_ctime
= time (NULL
);
784 shm_committed
+= btoc(size
);
786 if (shmseg
->shm_perm
.mode
& SHMSEG_WANTED
) {
788 * Somebody else wanted this key while we were asleep. Wake
791 shmseg
->shm_perm
.mode
&= ~SHMSEG_WANTED
;
794 td
->td_retval
[0] = shmid
;
797 vm_object_duplicate(td
, shmseg
->shm_internal
->shm_object
);
798 #endif /* __CYGWIN__ */
806 shmget(struct thread
*td
, struct shmget_args
*uap
)
811 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
814 mode
= uap
->shmflg
& ACCESSPERMS
;
815 if (uap
->key
!= IPC_PRIVATE
) {
818 if (uap
->shmflg
& IPC_KEY_IS_SHMID
)
819 segnum
= shm_find_segment_by_shmid ((int) uap
->key
) ?
820 IPCID_TO_IX((int) uap
->key
) : -1;
823 segnum
= shm_find_segment_by_key(uap
->key
);
825 error
= shmget_existing(td
, uap
, mode
, segnum
);
830 if ((uap
->shmflg
& IPC_CREAT
) == 0) {
835 error
= shmget_allocate_segment(td
, uap
, mode
);
839 ipcexit_creat_hookthread (td
);
841 td
->td_retval
[0] = -1;
854 /* XXX actually varargs. */
855 struct shmsys_args
/* {
864 if (!jail_sysvipc_allowed
&& jailed(td
->td_ucred
))
866 if (uap
->which
< 0 ||
867 uap
->which
>= sizeof(shmcalls
)/sizeof(shmcalls
[0]))
870 error
= (*shmcalls
[uap
->which
])(td
, &uap
->a2
);
874 #endif /* __CYGWIN__ */
877 shmfork_myhook(struct proc
*p1
, struct proc
*p2
)
879 struct shmmap_state
*shmmap_s
;
883 size
= shminfo
.shmseg
* sizeof(struct shmmap_state
);
884 shmmap_s
= (struct shmmap_state
*) sys_malloc(size
, M_SHM
, M_WAITOK
);
885 bcopy(p1
->p_vmspace
->vm_shm
, shmmap_s
, size
);
886 p2
->p_vmspace
->vm_shm
= shmmap_s
;
887 for (i
= 0; i
< shminfo
.shmseg
; i
++, shmmap_s
++)
888 if (shmmap_s
->shmid
!= -1) {
890 shmsegs
[IPCID_TO_IX(shmmap_s
->shmid
)].shm_nattch
++;
895 int cygwin_shmfork_myhook (struct thread
*td
, struct proc
*parent
)
897 ipcexit_creat_hookthread (td
);
898 ipc_p_vmspace (td
->ipcblk
);
899 ipc_p_vmspace (parent
);
900 shmfork_myhook (parent
, td
->ipcblk
);
906 shmexit_myhook(struct vmspace
*vm
)
908 struct shmmap_state
*base
, *shm
;
913 if ((base
= vm
->vm_shm
) != NULL
) {
915 for (i
= 0, shm
= base
; i
< shminfo
.shmseg
; i
++, shm
++) {
916 if (shm
->shmid
!= -1)
917 shm_delete_mapping(vm
, shm
);
919 sys_free(base
, M_SHM
);
927 struct shmid_ds
*newsegs
;
929 if (shmalloced
>= shminfo
.shmmni
)
932 newsegs
= (struct shmid_ds
*) sys_malloc(shminfo
.shmmni
* sizeof(*newsegs
), M_SHM
, M_WAITOK
);
935 for (i
= 0; i
< shmalloced
; i
++)
936 bcopy(&shmsegs
[i
], &newsegs
[i
], sizeof(newsegs
[0]));
937 for (; i
< shminfo
.shmmni
; i
++) {
938 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
939 shmsegs
[i
].shm_perm
.seq
= 0;
941 sys_free(shmsegs
, M_SHM
);
943 shmalloced
= shminfo
.shmmni
;
951 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo
.shmall
);
952 for (i
= PAGE_SIZE
; i
> 0; i
--) {
953 shminfo
.shmmax
= shminfo
.shmall
* PAGE_SIZE
;
954 if (shminfo
.shmmax
>= shminfo
.shmall
)
957 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo
.shmmin
);
958 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo
.shmmni
);
959 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo
.shmseg
);
960 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys
);
962 shmalloced
= shminfo
.shmmni
;
963 shmsegs
= (struct shmid_ds
*) sys_malloc(shmalloced
* sizeof(shmsegs
[0]), M_SHM
, M_WAITOK
);
965 panic("cannot allocate initial memory for sysvshm");
966 for (i
= 0; i
< shmalloced
; i
++) {
967 shmsegs
[i
].shm_perm
.mode
= SHMSEG_FREE
;
968 shmsegs
[i
].shm_perm
.seq
= 0;
974 shmexit_hook
= &shmexit_myhook
;
975 shmfork_hook
= &shmfork_myhook
;
976 #endif /* __CYGWIN__ */
986 sys_free(shmsegs
, M_SHM
);
990 #endif /* __CYGWIN__ */
996 sysctl_shmsegs(SYSCTL_HANDLER_ARGS
)
999 return (SYSCTL_OUT(req
, shmsegs
, shmalloced
* sizeof(shmsegs
[0])));
1003 sysvshm_modload(struct module
*module
, int cmd
, void *arg
)
1012 error
= shmunload();
1023 static moduledata_t sysvshm_mod
= {
1029 SYSCALL_MODULE_HELPER(shmsys
);
1030 SYSCALL_MODULE_HELPER(shmat
);
1031 SYSCALL_MODULE_HELPER(shmctl
);
1032 SYSCALL_MODULE_HELPER(shmdt
);
1033 SYSCALL_MODULE_HELPER(shmget
);
1035 DECLARE_MODULE(sysvshm
, sysvshm_mod
,
1036 SI_SUB_SYSV_SHM
, SI_ORDER_FIRST
);
1037 MODULE_VERSION(sysvshm
, 1);
1038 #endif /* __CYGWIN__ */
1039 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.083077 seconds and 6 git commands to generate.