]> sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
* bsd_helper.cc (ipcexit_creat_hookthread): Delete shs to make
[newlib-cygwin.git] / winsup / cygserver / sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
2 /*
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
16 * Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This file is heavily changed to become part of Cygwin's cygserver.
34 */
35
36 #ifdef __OUTSIDE_CYGWIN__
37 #include "woutsup.h"
38 #include <sys/cdefs.h>
39 #ifndef __FBSDID
40 #define __FBSDID(s) const char version[] = (s)
41 #endif
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
43 /* CV, 2006-01-09: Inspected upstream up to version 1.104. */
44
45 #define _KERNEL 1
46 #define __BSD_VISIBLE 1
47 #include <sys/param.h>
48 #include <sys/lock.h>
49 #include <sys/shm.h>
50 #include <malloc.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/sysproto.h>
54
55 #include <errno.h>
56 #include <time.h>
57 #include <unistd.h>
58 #include "cygserver.h"
59 #include "process.h"
60 #include "cygserver_ipc.h"
61
62 #ifdef __CYGWIN__
63 #ifndef PAGE_SIZE
64 #define PAGE_SIZE (getpagesize ())
65 #endif
66 #ifndef PAGE_MASK
67 #define PAGE_MASK (PAGE_SIZE - 1)
68 #endif
69 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
70 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
71 #ifdef __CYGWIN__
72 #define GIANT_REQUIRED
73 #else
74 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
75 #endif
76 #define KERN_SUCCESS 0
77 #define VM_PROT_READ PROT_READ
78 #define VM_PROT_WRITE PROT_WRITE
79 #define VM_INHERIT_SHARE 0
80 #define OBJT_PHYS 0
81 #define OBJT_SWAP 0
82 #define VM_PROT_DEFAULT 0
83 #define VM_OBJECT_LOCK(a)
84 #define vm_object_clear_flag(a,b)
85 #define vm_object_set_flag(a,b)
86 #define VM_OBJECT_UNLOCK(a)
87 #define vm_map_remove(a,b,c) KERN_SUCCESS
88 typedef int vm_prot_t;
89 #endif /* __CYGWIN__ */
90
91 #ifndef __CYGWIN__
92 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
93
94 struct oshmctl_args;
95 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
96 #endif /* __CYGWIN__ */
97
98 static int shmget_allocate_segment(struct thread *td,
99 struct shmget_args *uap, int mode);
100 static int shmget_existing(struct thread *td, struct shmget_args *uap,
101 int mode, int segnum);
102
103 #ifndef __CYGWIN__
104 /* XXX casting to (sy_call_t *) is bogus, as usual. */
105 static sy_call_t *shmcalls[] = {
106 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
107 (sy_call_t *)shmdt, (sy_call_t *)shmget,
108 (sy_call_t *)shmctl
109 };
110 #endif /* __CYGWIN__ */
111
112 #define SHMSEG_FREE 0x0200
113 #define SHMSEG_REMOVED 0x0400
114 #define SHMSEG_ALLOCATED 0x0800
115 #define SHMSEG_WANTED 0x1000
116
117 static int shm_last_free, shm_nused, shm_committed, shmalloced, shm_nattch;
118 static struct shmid_ds *shmsegs;
119
120 struct shm_handle {
121 /* vm_offset_t kva; */
122 vm_object_t shm_object;
123 };
124
125 struct shmmap_state {
126 vm_offset_t va;
127 int shmid;
128 };
129
130 static void shm_deallocate_segment(struct shmid_ds *);
131 static int shm_find_segment_by_key(key_t);
132 static struct shmid_ds *shm_find_segment_by_shmid(int);
133 static struct shmid_ds *shm_find_segment_by_shmidx(int);
134 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
135 static void shmrealloc(void);
136
137 /*
138 * Tuneable values.
139 */
140 #ifndef SHMMAXPGS
141 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
142 #endif
143 #ifndef SHMMAX
144 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
145 #endif
146 #ifndef SHMMIN
147 #define SHMMIN 1
148 #endif
149 #ifndef SHMMNI
150 #define SHMMNI 192
151 #endif
152 #ifndef SHMSEG
153 #define SHMSEG 128
154 #endif
155 #ifndef SHMALL
156 #define SHMALL (SHMMAXPGS)
157 #endif
158
159 struct shminfo shminfo = {
160 SHMMAX,
161 SHMMIN,
162 SHMMNI,
163 SHMSEG,
164 SHMALL
165 };
166
167 #ifndef __CYGWIN__
168 static int shm_use_phys;
169 #else
170 static long shm_use_phys;
171 static long shm_allow_removed;
172 #endif /* __CYGWIN__ */
173
174 #ifndef __CYGWIN__
175 struct shm_info shm_info;
176 #endif /* __CYGWIN__ */
177
178 #ifndef __CYGWIN__
179 SYSCTL_DECL(_kern_ipc);
180 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
181 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
182 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
183 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
184 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
185 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
186 &shm_use_phys, 0, "");
187 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
188 &shm_allow_removed, 0, "");
189 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
190 NULL, 0, sysctl_shmsegs, "", "");
191 #endif /* __CYGWIN__ */
192
193 static int
194 shm_find_segment_by_key(key_t key)
195 {
196 int i;
197
198 for (i = 0; i < shmalloced; i++)
199 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
200 shmsegs[i].shm_perm.key == key)
201 return (i);
202 return (-1);
203 }
204
205 static struct shmid_ds *
206 shm_find_segment_by_shmid(int shmid)
207 {
208 int segnum;
209 struct shmid_ds *shmseg;
210
211 segnum = IPCID_TO_IX(shmid);
212 if (segnum < 0 || segnum >= shmalloced)
213 return (NULL);
214 shmseg = &shmsegs[segnum];
215 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
216 (!shm_allow_removed &&
217 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
218 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
219 return (NULL);
220 return (shmseg);
221 }
222
223 static struct shmid_ds *
224 shm_find_segment_by_shmidx(int segnum)
225 {
226 struct shmid_ds *shmseg;
227
228 if (segnum < 0 || segnum >= shmalloced)
229 return (NULL);
230 shmseg = &shmsegs[segnum];
231 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
232 (!shm_allow_removed &&
233 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
234 return (NULL);
235 return (shmseg);
236 }
237
238 static void
239 shm_deallocate_segment(struct shmid_ds *shmseg)
240 {
241 struct shm_handle *shm_handle;
242 size_t size;
243
244 GIANT_REQUIRED;
245
246 shm_handle = shmseg->shm_internal;
247 vm_object_deallocate(shm_handle->shm_object);
248 sys_free(shm_handle, M_SHM);
249 shmseg->shm_internal = NULL;
250 size = round_page(shmseg->shm_segsz);
251 shm_committed -= btoc(size);
252 shm_nused--;
253 shmseg->shm_perm.mode = SHMSEG_FREE;
254 }
255
256 static int
257 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
258 {
259 struct shmid_ds *shmseg;
260 int segnum, result;
261 size_t size __attribute__ ((unused));
262
263 GIANT_REQUIRED;
264
265 segnum = IPCID_TO_IX(shmmap_s->shmid);
266 shmseg = &shmsegs[segnum];
267 size = round_page(shmseg->shm_segsz);
268 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
269 if (result != KERN_SUCCESS)
270 return (EINVAL);
271 shmmap_s->shmid = -1;
272 shmseg->shm_dtime = time (NULL);
273 --shm_nattch;
274 if ((--shmseg->shm_nattch <= 0) &&
275 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
276 shm_deallocate_segment(shmseg);
277 shm_last_free = segnum;
278 }
279 return (0);
280 }
281
282 #ifndef _SYS_SYSPROTO_H_
283 struct shmdt_args {
284 const void *shmaddr;
285 };
286 #endif
287
288 /*
289 * MPSAFE
290 */
291 int
292 shmdt(struct thread *td, struct shmdt_args *uap)
293 {
294 struct proc *p = td->td_proc;
295 struct shmmap_state *shmmap_s;
296 int i;
297 int error = 0;
298
299 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
300 return (ENOSYS);
301 mtx_lock(&Giant);
302 shmmap_s = p->p_vmspace->vm_shm;
303 if (shmmap_s == NULL) {
304 error = EINVAL;
305 goto done2;
306 }
307 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
308 if (shmmap_s->shmid != -1 &&
309 shmmap_s->va == (vm_offset_t)uap->shmaddr) {
310 break;
311 }
312 }
313 if (i == shminfo.shmseg) {
314 error = EINVAL;
315 goto done2;
316 }
317 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
318 done2:
319 mtx_unlock(&Giant);
320 return (error);
321 }
322
323 #ifndef _SYS_SYSPROTO_H_
324 struct shmat_args {
325 int shmid;
326 const void *shmaddr;
327 int shmflg;
328 };
329 #endif
330
331 /*
332 * MPSAFE
333 */
334 int
335 kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg)
336 {
337 struct proc *p = td->td_proc;
338 int i, flags __attribute__ ((unused));
339 struct shmid_ds *shmseg;
340 struct shmmap_state *shmmap_s = NULL;
341 #ifndef __CYGWIN__
342 struct shm_handle *shm_handle;
343 #endif
344 vm_offset_t attach_va;
345 vm_prot_t prot;
346 vm_size_t size;
347 #ifndef __CYGWIN__
348 int rv;
349 #endif
350 int error = 0;
351
352 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
353 return (ENOSYS);
354 mtx_lock(&Giant);
355 shmmap_s = p->p_vmspace->vm_shm;
356 if (shmmap_s == NULL) {
357 size = shminfo.shmseg * sizeof(struct shmmap_state);
358 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
359 for (i = 0; i < shminfo.shmseg; i++)
360 shmmap_s[i].shmid = -1;
361 p->p_vmspace->vm_shm = shmmap_s;
362 }
363 shmseg = shm_find_segment_by_shmid(shmid);
364 if (shmseg == NULL) {
365 error = EINVAL;
366 goto done2;
367 }
368 error = ipcperm(td, &shmseg->shm_perm,
369 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
370 if (error)
371 goto done2;
372 for (i = 0; i < shminfo.shmseg; i++) {
373 if (shmmap_s->shmid == -1)
374 break;
375 shmmap_s++;
376 }
377 if (i >= shminfo.shmseg) {
378 error = EMFILE;
379 goto done2;
380 }
381 size = round_page(shmseg->shm_segsz);
382 #ifdef VM_PROT_READ_IS_EXEC
383 prot = VM_PROT_READ | VM_PROT_EXECUTE;
384 #else
385 prot = VM_PROT_READ;
386 #endif
387 if ((shmflg & SHM_RDONLY) == 0)
388 prot |= VM_PROT_WRITE;
389 flags = MAP_ANON | MAP_SHARED;
390 debug_printf ("shmaddr: %x, shmflg: %x", shmaddr, shmflg);
391 #ifdef __CYGWIN__
392 /* The alignment checks have already been made in the Cygwin DLL
393 and shmat's only job is to keep record of the attached mem.
394 These checks break shm on 9x since MapViewOfFileEx apparently
395 returns memory which isn't aligned to SHMLBA. Go figure! */
396 attach_va = (vm_offset_t)shmaddr;
397 #else
398 if (shmaddr) {
399 flags |= MAP_FIXED;
400 if (shmflg & SHM_RND) {
401 attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
402 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
403 attach_va = (vm_offset_t)shmaddr;
404 } else {
405 error = EINVAL;
406 goto done2;
407 }
408 } else {
409 /*
410 * This is just a hint to vm_map_find() about where to
411 * put it.
412 */
413 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
414 + maxtsiz + maxdsiz);
415 }
416
417 shm_handle = shmseg->shm_internal;
418 vm_object_reference(shm_handle->shm_object);
419 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
420 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
421 if (rv != KERN_SUCCESS) {
422 error = ENOMEM;
423 goto done2;
424 }
425 vm_map_inherit(&p->p_vmspace->vm_map,
426 attach_va, attach_va + size, VM_INHERIT_SHARE);
427 #endif
428
429 shmmap_s->va = attach_va;
430 shmmap_s->shmid = shmid;
431 shmseg->shm_lpid = p->p_pid;
432 shmseg->shm_atime = time (NULL);
433 shmseg->shm_nattch++;
434 shm_nattch++;
435 td->td_retval[0] = attach_va;
436 done2:
437 mtx_unlock(&Giant);
438 return (error);
439 }
440
441 int
442 shmat(struct thread *td, struct shmat_args *uap)
443 {
444 return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
445 }
446
447 #ifndef __CYGWIN__
448 struct oshmid_ds {
449 struct ipc_perm shm_perm; /* operation perms */
450 int shm_segsz; /* size of segment (bytes) */
451 u_short shm_cpid; /* pid, creator */
452 u_short shm_lpid; /* pid, last operation */
453 short shm_nattch; /* no. of current attaches */
454 time_t shm_atime; /* last attach time */
455 time_t shm_dtime; /* last detach time */
456 time_t shm_ctime; /* last change time */
457 void *shm_handle; /* internal handle for shm segment */
458 };
459
460 struct oshmctl_args {
461 int shmid;
462 int cmd;
463 struct oshmid_ds *ubuf;
464 };
465
466 /*
467 * MPSAFE
468 */
469 static int
470 oshmctl(struct thread *td, struct oshmctl_args *uap)
471 {
472 #ifdef COMPAT_43
473 int error = 0;
474 struct shmid_ds *shmseg;
475 struct oshmid_ds outbuf;
476
477 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
478 return (ENOSYS);
479 mtx_lock(&Giant);
480 shmseg = shm_find_segment_by_shmid(uap->shmid);
481 if (shmseg == NULL) {
482 error = EINVAL;
483 goto done2;
484 }
485 switch (uap->cmd) {
486 case IPC_STAT:
487 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
488 if (error)
489 goto done2;
490 outbuf.shm_perm = shmseg->shm_perm;
491 outbuf.shm_segsz = shmseg->shm_segsz;
492 outbuf.shm_cpid = shmseg->shm_cpid;
493 outbuf.shm_lpid = shmseg->shm_lpid;
494 outbuf.shm_nattch = shmseg->shm_nattch;
495 outbuf.shm_atime = shmseg->shm_atime;
496 outbuf.shm_dtime = shmseg->shm_dtime;
497 outbuf.shm_ctime = shmseg->shm_ctime;
498 outbuf.shm_handle = shmseg->shm_internal;
499 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
500 if (error)
501 goto done2;
502 break;
503 default:
504 /* XXX casting to (sy_call_t *) is bogus, as usual. */
505 error = ((sy_call_t *)shmctl)(td, uap);
506 break;
507 }
508 done2:
509 mtx_unlock(&Giant);
510 return (error);
511 #else
512 return (EINVAL);
513 #endif
514 }
515 #endif /* !__CYGWIN__ */
516
517 #ifndef _SYS_SYSPROTO_H_
518 struct shmctl_args {
519 int shmid;
520 int cmd;
521 struct shmid_ds *buf;
522 };
523 #endif
524
525 /*
526 * MPSAFE
527 */
528 int
529 kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz)
530 {
531 int error = 0;
532 struct shmid_ds *shmseg;
533
534 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
535 return (ENOSYS);
536
537 mtx_lock(&Giant);
538 switch (cmd) {
539 case IPC_INFO:
540 memcpy(buf, &shminfo, sizeof(shminfo));
541 if (bufsz)
542 *bufsz = sizeof(shminfo);
543 td->td_retval[0] = shmalloced;
544 goto done2;
545 case SHM_INFO: {
546 struct shm_info shm_info;
547 shm_info.used_ids = shm_nused;
548 shm_info.shm_tot = shm_committed * PAGE_SIZE;
549 #ifdef __CYGWIN__
550 shm_info.shm_atts = shm_nattch;
551 #else
552 shm_info.shm_rss = 0; /*XXX where to get from ? */
553 shm_info.shm_swp = 0; /*XXX where to get from ? */
554 shm_info.swap_attempts = 0; /*XXX where to get from ? */
555 shm_info.swap_successes = 0; /*XXX where to get from ? */
556 #endif /* __CYGWIN__ */
557 memcpy(buf, &shm_info, sizeof(shm_info));
558 if (bufsz)
559 *bufsz = sizeof(shm_info);
560 td->td_retval[0] = shmalloced;
561 goto done2;
562 }
563 }
564 if (cmd == SHM_STAT)
565 shmseg = shm_find_segment_by_shmidx(shmid);
566 else
567 shmseg = shm_find_segment_by_shmid(shmid);
568 if (shmseg == NULL) {
569 error = EINVAL;
570 goto done2;
571 }
572 switch (cmd) {
573 case SHM_STAT:
574 case IPC_STAT:
575 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
576 if (error)
577 goto done2;
578 memcpy(buf, shmseg, sizeof(struct shmid_ds));
579 if (bufsz)
580 *bufsz = sizeof(struct shmid_ds);
581 if (cmd == SHM_STAT)
582 td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
583 break;
584 case IPC_SET: {
585 struct shmid_ds *shmid;
586
587 shmid = (struct shmid_ds *)buf;
588 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
589 if (error)
590 goto done2;
591 shmseg->shm_perm.uid = shmid->shm_perm.uid;
592 shmseg->shm_perm.gid = shmid->shm_perm.gid;
593 shmseg->shm_perm.mode =
594 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
595 (shmid->shm_perm.mode & ACCESSPERMS);
596 shmseg->shm_ctime = time (NULL);
597 break;
598 }
599 case IPC_RMID:
600 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
601 if (error)
602 goto done2;
603 shmseg->shm_perm.key = IPC_PRIVATE;
604 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
605 if (shmseg->shm_nattch <= 0) {
606 shm_deallocate_segment(shmseg);
607 shm_last_free = IPCID_TO_IX(shmid);
608 }
609 break;
610 #if 0
611 case SHM_LOCK:
612 case SHM_UNLOCK:
613 #endif
614 default:
615 error = EINVAL;
616 break;
617 }
618 done2:
619 mtx_unlock(&Giant);
620 return (error);
621 }
622
623 int
624 shmctl(struct thread *td, struct shmctl_args *uap)
625 {
626 int error = 0;
627 struct shmid_ds buf;
628 size_t bufsz;
629
630 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
631 if (uap->cmd == IPC_SET) {
632 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
633 goto done;
634 }
635 #ifdef __CYGWIN__
636 if (uap->cmd == IPC_INFO && uap->shmid > 0) {
637 /* Can't use the default kern_shmctl interface. */
638 int shmid = uap->shmid;
639 if (shmid > shminfo.shmmni)
640 shmid = shminfo.shmmni;
641 error = copyout(shmsegs, uap->buf,
642 shmid * sizeof(struct shmid_ds));
643 td->td_retval[0] = error ? -1 : 0;
644 return (error);
645 }
646 #endif /* __CYGWIN__ */
647
648 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
649 if (error)
650 goto done;
651
652 /* Cases in which we need to copyout */
653 switch (uap->cmd) {
654 case IPC_INFO:
655 case SHM_INFO:
656 case SHM_STAT:
657 case IPC_STAT:
658 error = copyout(&buf, uap->buf, bufsz);
659 break;
660 }
661
662 done:
663 if (error) {
664 /* Invalidate the return value */
665 td->td_retval[0] = -1;
666 }
667 return (error);
668 }
669
670
671 #ifndef _SYS_SYSPROTO_H_
672 struct shmget_args {
673 key_t key;
674 size_t size;
675 int shmflg;
676 };
677 #endif
678
679 static int
680 shmget_existing(struct thread *td, struct shmget_args *uap, int mode, int segnum)
681 {
682 struct shmid_ds *shmseg;
683 int error;
684
685 shmseg = &shmsegs[segnum];
686 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
687 /*
688 * This segment is in the process of being allocated. Wait
689 * until it's done, and look the key up again (in case the
690 * allocation failed or it was freed).
691 */
692 shmseg->shm_perm.mode |= SHMSEG_WANTED;
693 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
694 if (error)
695 return (error);
696 return (EAGAIN);
697 }
698 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
699 return (EEXIST);
700 error = ipcperm(td, &shmseg->shm_perm, mode);
701 if (error)
702 return (error);
703 if (uap->size && uap->size > shmseg->shm_segsz)
704 return (EINVAL);
705 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
706 #ifdef __CYGWIN__
707 td->td_retval[1] =
708 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
709 #endif /* __CYGWIN__ */
710 return (0);
711 }
712
713 static int
714 shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode)
715 {
716 int i, segnum, shmid, size;
717 #ifndef __CYGWIN__
718 struct ucred *cred = td->td_ucred;
719 #endif /* __CYGWIN__ */
720 struct shmid_ds *shmseg;
721 struct shm_handle *shm_handle;
722
723 GIANT_REQUIRED;
724
725 if (uap->size < (unsigned long) shminfo.shmmin ||
726 uap->size > (unsigned long) shminfo.shmmax)
727 return (EINVAL);
728 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
729 return (ENOSPC);
730 size = round_page(uap->size);
731 if (shm_committed + btoc(size) > shminfo.shmall)
732 return (ENOMEM);
733 if (shm_last_free < 0) {
734 shmrealloc(); /* Maybe expand the shmsegs[] array. */
735 for (i = 0; i < shmalloced; i++)
736 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
737 break;
738 if (i == shmalloced)
739 return (ENOSPC);
740 segnum = i;
741 } else {
742 segnum = shm_last_free;
743 shm_last_free = -1;
744 }
745 shmseg = &shmsegs[segnum];
746 /*
747 * In case we sleep in malloc(), mark the segment present but deleted
748 * so that noone else tries to create the same key.
749 */
750 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
751 shmseg->shm_perm.key = uap->key;
752 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
753 shm_handle = (struct shm_handle *)
754 sys_malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
755 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
756
757 /*
758 * We make sure that we have allocated a pager before we need
759 * to.
760 */
761 if (shm_use_phys) {
762 shm_handle->shm_object =
763 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
764 } else {
765 shm_handle->shm_object =
766 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
767 }
768 VM_OBJECT_LOCK(shm_handle->shm_object);
769 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
770 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
771 VM_OBJECT_UNLOCK(shm_handle->shm_object);
772
773 shmseg->shm_internal = shm_handle;
774 #ifdef __CYGWIN__
775 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = td->ipcblk->uid;
776 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = td->ipcblk->gid;
777 #else
778 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
779 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
780 #endif /* __CYGWIN__ */
781 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
782 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
783 shmseg->shm_segsz = uap->size;
784 shmseg->shm_cpid = td->td_proc->p_pid;
785 shmseg->shm_lpid = shmseg->shm_nattch = 0;
786 shmseg->shm_atime = shmseg->shm_dtime = 0;
787 shmseg->shm_ctime = time (NULL);
788 shm_committed += btoc(size);
789 shm_nused++;
790 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
791 /*
792 * Somebody else wanted this key while we were asleep. Wake
793 * them up now.
794 */
795 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
796 wakeup(shmseg);
797 }
798 td->td_retval[0] = shmid;
799 #ifdef __CYGWIN__
800 td->td_retval[1] =
801 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
802 #endif /* __CYGWIN__ */
803 return (0);
804 }
805
806 /*
807 * MPSAFE
808 */
809 int
810 shmget(struct thread *td, struct shmget_args *uap)
811 {
812 int segnum, mode;
813 int error;
814
815 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
816 return (ENOSYS);
817 mtx_lock(&Giant);
818 mode = uap->shmflg & ACCESSPERMS;
819 if (uap->key != IPC_PRIVATE) {
820 again:
821 #ifdef __CYGWIN__
822 if (uap->shmflg & IPC_KEY_IS_SHMID)
823 segnum = shm_find_segment_by_shmid ((int) uap->key) ?
824 IPCID_TO_IX((int) uap->key) : -1;
825 else
826 #endif
827 segnum = shm_find_segment_by_key(uap->key);
828 if (segnum >= 0) {
829 error = shmget_existing(td, uap, mode, segnum);
830 if (error == EAGAIN)
831 goto again;
832 goto done2;
833 }
834 if ((uap->shmflg & IPC_CREAT) == 0) {
835 error = ENOENT;
836 goto done2;
837 }
838 }
839 error = shmget_allocate_segment(td, uap, mode);
840 done2:
841 #ifdef __CYGWIN__
842 if (!error)
843 ipcexit_creat_hookthread (td);
844 else
845 td->td_retval[0] = -1;
846 #endif
847 mtx_unlock(&Giant);
848 return (error);
849 }
850
851 #ifndef __CYGWIN__
852 /*
853 * MPSAFE
854 */
855 int
856 shmsys(td, uap)
857 struct thread *td;
858 /* XXX actually varargs. */
859 struct shmsys_args /* {
860 int which;
861 int a2;
862 int a3;
863 int a4;
864 } */ *uap;
865 {
866 int error;
867
868 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
869 return (ENOSYS);
870 if (uap->which < 0 ||
871 uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
872 return (EINVAL);
873 mtx_lock(&Giant);
874 error = (*shmcalls[uap->which])(td, &uap->a2);
875 mtx_unlock(&Giant);
876 return (error);
877 }
878 #endif /* __CYGWIN__ */
879
880 static void
881 shmfork_myhook(struct proc *p1, struct proc *p2)
882 {
883 struct shmmap_state *shmmap_s;
884 size_t size;
885 int i;
886
887 size = shminfo.shmseg * sizeof(struct shmmap_state);
888 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
889 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
890 p2->p_vmspace->vm_shm = shmmap_s;
891 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
892 if (shmmap_s->shmid != -1) {
893 shm_nattch++;
894 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
895 }
896 }
897
898 #ifdef __CYGWIN__
899 int cygwin_shmfork_myhook (struct thread *td, struct proc *parent)
900 {
901 ipcexit_creat_hookthread (td);
902 ipc_p_vmspace (td->ipcblk);
903 ipc_p_vmspace (parent);
904 shmfork_myhook (parent, td->ipcblk);
905 return 0;
906 }
907 #endif
908
909 void
910 shmexit_myhook(struct vmspace *vm)
911 {
912 struct shmmap_state *base, *shm;
913 int i;
914
915 GIANT_REQUIRED;
916
917 if ((base = vm->vm_shm) != NULL) {
918 vm->vm_shm = NULL;
919 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
920 if (shm->shmid != -1)
921 shm_delete_mapping(vm, shm);
922 }
923 sys_free(base, M_SHM);
924 }
925 }
926
927 static void
928 shmrealloc(void)
929 {
930 int i;
931 struct shmid_ds *newsegs;
932
933 if (shmalloced >= shminfo.shmmni)
934 return;
935
936 newsegs = (struct shmid_ds *) sys_malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
937 if (newsegs == NULL)
938 return;
939 for (i = 0; i < shmalloced; i++)
940 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
941 for (; i < shminfo.shmmni; i++) {
942 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
943 shmsegs[i].shm_perm.seq = 0;
944 }
945 sys_free(shmsegs, M_SHM);
946 shmsegs = newsegs;
947 shmalloced = shminfo.shmmni;
948 }
949
950 void
951 shminit(void)
952 {
953 int i;
954 tun_bool_t shm_ar;
955
956 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
957 for (i = PAGE_SIZE; i > 0; i--) {
958 shminfo.shmmax = shminfo.shmall * i;
959 if (shminfo.shmmax >= shminfo.shmall)
960 break;
961 }
962 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
963 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
964 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
965 TUNABLE_BOOL_FETCH("kern.ipc.shm_allow_removed", &shm_ar);
966 if (shm_ar == TUN_TRUE)
967 shm_allow_removed = 1;
968 shmalloced = shminfo.shmmni;
969 shmsegs = (struct shmid_ds *) sys_malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
970 if (shmsegs == NULL)
971 panic("cannot allocate initial memory for sysvshm");
972 for (i = 0; i < shmalloced; i++) {
973 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
974 shmsegs[i].shm_perm.seq = 0;
975 }
976 shm_last_free = 0;
977 shm_nused = 0;
978 shm_committed = 0;
979 #ifndef __CYGWIN__
980 shmexit_hook = &shmexit_myhook;
981 shmfork_hook = &shmfork_myhook;
982 #endif /* __CYGWIN__ */
983 }
984
985 int
986 shmunload(void)
987 {
988
989 if (shm_nused > 0)
990 return (EBUSY);
991
992 sys_free(shmsegs, M_SHM);
993 #ifndef __CYGWIN__
994 shmexit_hook = NULL;
995 shmfork_hook = NULL;
996 #endif /* __CYGWIN__ */
997 return (0);
998 }
999
1000 #ifndef __CYGWIN__
1001 static int
1002 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
1003 {
1004
1005 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
1006 }
1007
1008 static int
1009 sysvshm_modload(struct module *module, int cmd, void *arg)
1010 {
1011 int error = 0;
1012
1013 switch (cmd) {
1014 case MOD_LOAD:
1015 shminit();
1016 break;
1017 case MOD_UNLOAD:
1018 error = shmunload();
1019 break;
1020 case MOD_SHUTDOWN:
1021 break;
1022 default:
1023 error = EINVAL;
1024 break;
1025 }
1026 return (error);
1027 }
1028
1029 static moduledata_t sysvshm_mod = {
1030 "sysvshm",
1031 &sysvshm_modload,
1032 NULL
1033 };
1034
1035 SYSCALL_MODULE_HELPER(shmsys);
1036 SYSCALL_MODULE_HELPER(shmat);
1037 SYSCALL_MODULE_HELPER(shmctl);
1038 SYSCALL_MODULE_HELPER(shmdt);
1039 SYSCALL_MODULE_HELPER(shmget);
1040
1041 DECLARE_MODULE(sysvshm, sysvshm_mod,
1042 SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
1043 MODULE_VERSION(sysvshm, 1);
1044 #endif /* __CYGWIN__ */
1045 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.082639 seconds and 5 git commands to generate.