]> sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
* sysv_shm.cc (kern_shmat): Avoid compiler warning.
[newlib-cygwin.git] / winsup / cygserver / sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
2 /*
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
16 * Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This file is heavily changed to become part of Cygwin's cygserver.
34 */
35
36 #ifdef __OUTSIDE_CYGWIN__
37 #include "woutsup.h"
38 #include <sys/cdefs.h>
39 #ifndef __FBSDID
40 #define __FBSDID(s) const char version[] = (s)
41 #endif
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
43
44 #define _KERNEL 1
45 #define __BSD_VISIBLE 1
46 #include <sys/param.h>
47 #include <sys/lock.h>
48 #include <sys/shm.h>
49 #include <malloc.h>
50 #include <sys/mman.h>
51 #include <sys/stat.h>
52 #include <sys/sysproto.h>
53
54 #include <errno.h>
55 #include <time.h>
56 #include <unistd.h>
57 #include "cygserver.h"
58 #include "process.h"
59 #include "cygserver_ipc.h"
60
61 #ifdef __CYGWIN__
62 #ifndef PAGE_SIZE
63 #define PAGE_SIZE (getpagesize ())
64 #endif
65 #ifndef PAGE_MASK
66 #define PAGE_MASK (PAGE_SIZE - 1)
67 #endif
68 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
69 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
70 #define ACCESSPERMS (0777)
71 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
72 #define KERN_SUCCESS 0
73 #define VM_PROT_READ PROT_READ
74 #define VM_PROT_WRITE PROT_WRITE
75 #define VM_INHERIT_SHARE 0
76 #define OBJT_PHYS 0
77 #define OBJT_SWAP 0
78 #define VM_PROT_DEFAULT 0
79 #define VM_OBJECT_LOCK(a)
80 #define vm_object_clear_flag(a,b)
81 #define vm_object_set_flag(a,b)
82 #define VM_OBJECT_UNLOCK(a)
83 #define vm_map_remove(a,b,c) KERN_SUCCESS
84 typedef int vm_prot_t;
85 #endif /* __CYGWIN__ */
86
87 #ifndef __CYGWIN__
88 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
89
90 struct oshmctl_args;
91 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
92 #endif /* __CYGWIN__ */
93
94 static int shmget_allocate_segment(struct thread *td,
95 struct shmget_args *uap, int mode);
96 static int shmget_existing(struct thread *td, struct shmget_args *uap,
97 int mode, int segnum);
98
99 #ifndef __CYGWIN__
100 /* XXX casting to (sy_call_t *) is bogus, as usual. */
101 static sy_call_t *shmcalls[] = {
102 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
103 (sy_call_t *)shmdt, (sy_call_t *)shmget,
104 (sy_call_t *)shmctl
105 };
106 #endif /* __CYGWIN__ */
107
108 #define SHMSEG_FREE 0x0200
109 #define SHMSEG_REMOVED 0x0400
110 #define SHMSEG_ALLOCATED 0x0800
111 #define SHMSEG_WANTED 0x1000
112
113 static int shm_last_free, shm_nused, shm_committed, shmalloced, shm_nattch;
114 static struct shmid_ds *shmsegs;
115
116 struct shm_handle {
117 /* vm_offset_t kva; */
118 vm_object_t shm_object;
119 };
120
121 struct shmmap_state {
122 vm_offset_t va;
123 int shmid;
124 };
125
126 static void shm_deallocate_segment(struct shmid_ds *);
127 static int shm_find_segment_by_key(key_t);
128 static struct shmid_ds *shm_find_segment_by_shmid(int);
129 static struct shmid_ds *shm_find_segment_by_shmidx(int);
130 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
131 static void shmrealloc(void);
132
133 /*
134 * Tuneable values.
135 */
136 #ifndef SHMMAXPGS
137 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
138 #endif
139 #ifndef SHMMAX
140 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
141 #endif
142 #ifndef SHMMIN
143 #define SHMMIN 1
144 #endif
145 #ifndef SHMMNI
146 #define SHMMNI 192
147 #endif
148 #ifndef SHMSEG
149 #define SHMSEG 128
150 #endif
151 #ifndef SHMALL
152 #define SHMALL (SHMMAXPGS)
153 #endif
154
155 struct shminfo shminfo = {
156 SHMMAX,
157 SHMMIN,
158 SHMMNI,
159 SHMSEG,
160 SHMALL
161 };
162
163 #ifndef __CYGWIN__
164 static int shm_use_phys;
165 #else
166 static long shm_use_phys;
167 static long shm_allow_removed;
168 #endif /* __CYGWIN__ */
169
170 #ifndef __CYGWIN__
171 struct shm_info shm_info;
172 #endif /* __CYGWIN__ */
173
174 #ifndef __CYGWIN__
175 SYSCTL_DECL(_kern_ipc);
176 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
177 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
178 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
179 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
180 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
181 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
182 &shm_use_phys, 0, "");
183 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
184 &shm_allow_removed, 0, "");
185 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
186 NULL, 0, sysctl_shmsegs, "", "");
187 #endif /* __CYGWIN__ */
188
189 static int
190 shm_find_segment_by_key(key_t key)
191 {
192 int i;
193
194 for (i = 0; i < shmalloced; i++)
195 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
196 shmsegs[i].shm_perm.key == key)
197 return (i);
198 return (-1);
199 }
200
201 static struct shmid_ds *
202 shm_find_segment_by_shmid(int shmid)
203 {
204 int segnum;
205 struct shmid_ds *shmseg;
206
207 segnum = IPCID_TO_IX(shmid);
208 if (segnum < 0 || segnum >= shmalloced)
209 return (NULL);
210 shmseg = &shmsegs[segnum];
211 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
212 (!shm_allow_removed &&
213 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
214 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
215 return (NULL);
216 return (shmseg);
217 }
218
219 static struct shmid_ds *
220 shm_find_segment_by_shmidx(int segnum)
221 {
222 struct shmid_ds *shmseg;
223
224 if (segnum < 0 || segnum >= shmalloced)
225 return (NULL);
226 shmseg = &shmsegs[segnum];
227 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
228 (!shm_allow_removed &&
229 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
230 return (NULL);
231 return (shmseg);
232 }
233
234 static void
235 shm_deallocate_segment(struct shmid_ds *shmseg)
236 {
237 struct shm_handle *shm_handle;
238 size_t size;
239
240 GIANT_REQUIRED;
241
242 shm_handle = shmseg->shm_internal;
243 vm_object_deallocate(shm_handle->shm_object);
244 sys_free(shm_handle, M_SHM);
245 shmseg->shm_internal = NULL;
246 size = round_page(shmseg->shm_segsz);
247 shm_committed -= btoc(size);
248 shm_nused--;
249 shmseg->shm_perm.mode = SHMSEG_FREE;
250 }
251
252 static int
253 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
254 {
255 struct shmid_ds *shmseg;
256 int segnum, result;
257 size_t size;
258
259 GIANT_REQUIRED;
260
261 segnum = IPCID_TO_IX(shmmap_s->shmid);
262 shmseg = &shmsegs[segnum];
263 size = round_page(shmseg->shm_segsz);
264 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
265 if (result != KERN_SUCCESS)
266 return (EINVAL);
267 shmmap_s->shmid = -1;
268 shmseg->shm_dtime = time (NULL);
269 --shm_nattch;
270 if ((--shmseg->shm_nattch <= 0) &&
271 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
272 shm_deallocate_segment(shmseg);
273 shm_last_free = segnum;
274 }
275 return (0);
276 }
277
278 #ifndef _SYS_SYSPROTO_H_
279 struct shmdt_args {
280 const void *shmaddr;
281 };
282 #endif
283
284 /*
285 * MPSAFE
286 */
287 int
288 shmdt(struct thread *td, struct shmdt_args *uap)
289 {
290 struct proc *p = td->td_proc;
291 struct shmmap_state *shmmap_s;
292 int i;
293 int error = 0;
294
295 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
296 return (ENOSYS);
297 mtx_lock(&Giant);
298 shmmap_s = p->p_vmspace->vm_shm;
299 if (shmmap_s == NULL) {
300 error = EINVAL;
301 goto done2;
302 }
303 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
304 if (shmmap_s->shmid != -1 &&
305 shmmap_s->va == (vm_offset_t)uap->shmaddr) {
306 break;
307 }
308 }
309 if (i == shminfo.shmseg) {
310 error = EINVAL;
311 goto done2;
312 }
313 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
314 done2:
315 mtx_unlock(&Giant);
316 return (error);
317 }
318
319 #ifndef _SYS_SYSPROTO_H_
320 struct shmat_args {
321 int shmid;
322 const void *shmaddr;
323 int shmflg;
324 };
325 #endif
326
327 /*
328 * MPSAFE
329 */
330 int
331 kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg)
332 {
333 struct proc *p = td->td_proc;
334 int i, flags;
335 struct shmid_ds *shmseg;
336 struct shmmap_state *shmmap_s = NULL;
337 #ifndef __CYGWIN__
338 struct shm_handle *shm_handle;
339 #endif
340 vm_offset_t attach_va;
341 vm_prot_t prot;
342 vm_size_t size;
343 #ifndef __CYGWIN__
344 int rv;
345 #endif
346 int error = 0;
347
348 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
349 return (ENOSYS);
350 mtx_lock(&Giant);
351 shmmap_s = p->p_vmspace->vm_shm;
352 if (shmmap_s == NULL) {
353 size = shminfo.shmseg * sizeof(struct shmmap_state);
354 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
355 for (i = 0; i < shminfo.shmseg; i++)
356 shmmap_s[i].shmid = -1;
357 p->p_vmspace->vm_shm = shmmap_s;
358 }
359 shmseg = shm_find_segment_by_shmid(shmid);
360 if (shmseg == NULL) {
361 error = EINVAL;
362 goto done2;
363 }
364 error = ipcperm(td, &shmseg->shm_perm,
365 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
366 if (error)
367 goto done2;
368 for (i = 0; i < shminfo.shmseg; i++) {
369 if (shmmap_s->shmid == -1)
370 break;
371 shmmap_s++;
372 }
373 if (i >= shminfo.shmseg) {
374 error = EMFILE;
375 goto done2;
376 }
377 size = round_page(shmseg->shm_segsz);
378 #ifdef VM_PROT_READ_IS_EXEC
379 prot = VM_PROT_READ | VM_PROT_EXECUTE;
380 #else
381 prot = VM_PROT_READ;
382 #endif
383 if ((shmflg & SHM_RDONLY) == 0)
384 prot |= VM_PROT_WRITE;
385 flags = MAP_ANON | MAP_SHARED;
386 debug_printf ("shmaddr: %x, shmflg: %x", shmaddr, shmflg);
387 #ifdef __CYGWIN__
388 /* The alignment checks have already been made in the Cygwin DLL
389 and shmat's only job is to keep record of the attached mem.
390 These checks break shm on 9x since MapViewOfFileEx apparently
391 returns memory which isn't aligned to SHMLBA. Go figure! */
392 attach_va = (vm_offset_t)shmaddr;
393 #else
394 if (shmaddr) {
395 flags |= MAP_FIXED;
396 if (shmflg & SHM_RND) {
397 attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
398 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
399 attach_va = (vm_offset_t)shmaddr;
400 } else {
401 error = EINVAL;
402 goto done2;
403 }
404 } else {
405 /*
406 * This is just a hint to vm_map_find() about where to
407 * put it.
408 */
409 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
410 + maxtsiz + maxdsiz);
411 }
412
413 shm_handle = shmseg->shm_internal;
414 vm_object_reference(shm_handle->shm_object);
415 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
416 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
417 if (rv != KERN_SUCCESS) {
418 error = ENOMEM;
419 goto done2;
420 }
421 vm_map_inherit(&p->p_vmspace->vm_map,
422 attach_va, attach_va + size, VM_INHERIT_SHARE);
423 #endif
424
425 shmmap_s->va = attach_va;
426 shmmap_s->shmid = shmid;
427 shmseg->shm_lpid = p->p_pid;
428 shmseg->shm_atime = time (NULL);
429 shmseg->shm_nattch++;
430 shm_nattch++;
431 td->td_retval[0] = attach_va;
432 done2:
433 mtx_unlock(&Giant);
434 return (error);
435 }
436
437 int
438 shmat(struct thread *td, struct shmat_args *uap)
439 {
440 return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
441 }
442
443 #ifndef __CYGWIN__
444 struct oshmid_ds {
445 struct ipc_perm shm_perm; /* operation perms */
446 int shm_segsz; /* size of segment (bytes) */
447 u_short shm_cpid; /* pid, creator */
448 u_short shm_lpid; /* pid, last operation */
449 short shm_nattch; /* no. of current attaches */
450 time_t shm_atime; /* last attach time */
451 time_t shm_dtime; /* last detach time */
452 time_t shm_ctime; /* last change time */
453 void *shm_handle; /* internal handle for shm segment */
454 };
455
456 struct oshmctl_args {
457 int shmid;
458 int cmd;
459 struct oshmid_ds *ubuf;
460 };
461
462 /*
463 * MPSAFE
464 */
465 static int
466 oshmctl(struct thread *td, struct oshmctl_args *uap)
467 {
468 #ifdef COMPAT_43
469 int error = 0;
470 struct shmid_ds *shmseg;
471 struct oshmid_ds outbuf;
472
473 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
474 return (ENOSYS);
475 mtx_lock(&Giant);
476 shmseg = shm_find_segment_by_shmid(uap->shmid);
477 if (shmseg == NULL) {
478 error = EINVAL;
479 goto done2;
480 }
481 switch (uap->cmd) {
482 case IPC_STAT:
483 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
484 if (error)
485 goto done2;
486 outbuf.shm_perm = shmseg->shm_perm;
487 outbuf.shm_segsz = shmseg->shm_segsz;
488 outbuf.shm_cpid = shmseg->shm_cpid;
489 outbuf.shm_lpid = shmseg->shm_lpid;
490 outbuf.shm_nattch = shmseg->shm_nattch;
491 outbuf.shm_atime = shmseg->shm_atime;
492 outbuf.shm_dtime = shmseg->shm_dtime;
493 outbuf.shm_ctime = shmseg->shm_ctime;
494 outbuf.shm_handle = shmseg->shm_internal;
495 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
496 if (error)
497 goto done2;
498 break;
499 default:
500 /* XXX casting to (sy_call_t *) is bogus, as usual. */
501 error = ((sy_call_t *)shmctl)(td, uap);
502 break;
503 }
504 done2:
505 mtx_unlock(&Giant);
506 return (error);
507 #else
508 return (EINVAL);
509 #endif
510 }
511 #endif /* __CYGWIN__ */
512
513 #ifndef _SYS_SYSPROTO_H_
514 struct shmctl_args {
515 int shmid;
516 int cmd;
517 struct shmid_ds *buf;
518 };
519 #endif
520
521 /*
522 * MPSAFE
523 */
524 int
525 kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz)
526 {
527 int error = 0;
528 struct shmid_ds *shmseg;
529
530 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
531 return (ENOSYS);
532
533 mtx_lock(&Giant);
534 switch (cmd) {
535 case IPC_INFO:
536 memcpy(buf, &shminfo, sizeof(shminfo));
537 if (bufsz)
538 *bufsz = sizeof(shminfo);
539 td->td_retval[0] = shmalloced;
540 goto done2;
541 case SHM_INFO: {
542 struct shm_info shm_info;
543 shm_info.used_ids = shm_nused;
544 shm_info.shm_tot = shm_committed * PAGE_SIZE;
545 #ifdef __CYGWIN__
546 shm_info.shm_atts = shm_nattch;
547 #else
548 shm_info.shm_rss = 0; /*XXX where to get from ? */
549 shm_info.shm_swp = 0; /*XXX where to get from ? */
550 shm_info.swap_attempts = 0; /*XXX where to get from ? */
551 shm_info.swap_successes = 0; /*XXX where to get from ? */
552 #endif /* __CYGWIN__ */
553 memcpy(buf, &shm_info, sizeof(shm_info));
554 if (bufsz)
555 *bufsz = sizeof(shm_info);
556 td->td_retval[0] = shmalloced;
557 goto done2;
558 }
559 }
560 if (cmd == SHM_STAT)
561 shmseg = shm_find_segment_by_shmidx(shmid);
562 else
563 shmseg = shm_find_segment_by_shmid(shmid);
564 if (shmseg == NULL) {
565 error = EINVAL;
566 goto done2;
567 }
568 switch (cmd) {
569 case SHM_STAT:
570 case IPC_STAT:
571 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
572 if (error)
573 goto done2;
574 memcpy(buf, shmseg, sizeof(struct shmid_ds));
575 if (bufsz)
576 *bufsz = sizeof(struct shmid_ds);
577 if (cmd == SHM_STAT)
578 td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
579 break;
580 case IPC_SET: {
581 struct shmid_ds *shmid;
582
583 shmid = (struct shmid_ds *)buf;
584 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
585 if (error)
586 goto done2;
587 shmseg->shm_perm.uid = shmid->shm_perm.uid;
588 shmseg->shm_perm.gid = shmid->shm_perm.gid;
589 shmseg->shm_perm.mode =
590 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
591 (shmid->shm_perm.mode & ACCESSPERMS);
592 shmseg->shm_ctime = time (NULL);
593 break;
594 }
595 case IPC_RMID:
596 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
597 if (error)
598 goto done2;
599 shmseg->shm_perm.key = IPC_PRIVATE;
600 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
601 if (shmseg->shm_nattch <= 0) {
602 shm_deallocate_segment(shmseg);
603 shm_last_free = IPCID_TO_IX(shmid);
604 }
605 break;
606 #if 0
607 case SHM_LOCK:
608 case SHM_UNLOCK:
609 #endif
610 default:
611 error = EINVAL;
612 break;
613 }
614 done2:
615 mtx_unlock(&Giant);
616 return (error);
617 }
618
619 int
620 shmctl(struct thread *td, struct shmctl_args *uap)
621 {
622 int error = 0;
623 struct shmid_ds buf;
624 size_t bufsz;
625
626 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
627 if (uap->cmd == IPC_SET) {
628 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
629 goto done;
630 }
631 #ifdef __CYGWIN__
632 if (uap->cmd == IPC_INFO && uap->shmid > 0) {
633 /* Can't use the default kern_shmctl interface. */
634 int shmid = uap->shmid;
635 if (shmid > shminfo.shmmni)
636 shmid = shminfo.shmmni;
637 error = copyout(shmsegs, uap->buf,
638 shmid * sizeof(struct shmid_ds));
639 td->td_retval[0] = error ? -1 : 0;
640 return (error);
641 }
642 #endif /* __CYGWIN__ */
643
644 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
645 if (error)
646 goto done;
647
648 /* Cases in which we need to copyout */
649 switch (uap->cmd) {
650 case IPC_INFO:
651 case SHM_INFO:
652 case SHM_STAT:
653 case IPC_STAT:
654 error = copyout(&buf, uap->buf, bufsz);
655 break;
656 }
657
658 done:
659 if (error) {
660 /* Invalidate the return value */
661 td->td_retval[0] = -1;
662 }
663 return (error);
664 }
665
666
667 #ifndef _SYS_SYSPROTO_H_
668 struct shmget_args {
669 key_t key;
670 size_t size;
671 int shmflg;
672 };
673 #endif
674
675 static int
676 shmget_existing(struct thread *td, struct shmget_args *uap, int mode, int segnum)
677 {
678 struct shmid_ds *shmseg;
679 int error;
680
681 shmseg = &shmsegs[segnum];
682 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
683 /*
684 * This segment is in the process of being allocated. Wait
685 * until it's done, and look the key up again (in case the
686 * allocation failed or it was freed).
687 */
688 shmseg->shm_perm.mode |= SHMSEG_WANTED;
689 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
690 if (error)
691 return (error);
692 return (EAGAIN);
693 }
694 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
695 return (EEXIST);
696 error = ipcperm(td, &shmseg->shm_perm, mode);
697 if (error)
698 return (error);
699 if (uap->size && uap->size > shmseg->shm_segsz)
700 return (EINVAL);
701 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
702 #ifdef __CYGWIN__
703 td->td_retval[1] =
704 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
705 #endif /* __CYGWIN__ */
706 return (0);
707 }
708
709 static int
710 shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode)
711 {
712 int i, segnum, shmid, size;
713 #ifndef __CYGWIN__
714 struct ucred *cred = td->td_ucred;
715 #endif /* __CYGWIN__ */
716 struct shmid_ds *shmseg;
717 struct shm_handle *shm_handle;
718
719 GIANT_REQUIRED;
720
721 if (uap->size < (unsigned long) shminfo.shmmin ||
722 uap->size > (unsigned long) shminfo.shmmax)
723 return (EINVAL);
724 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
725 return (ENOSPC);
726 size = round_page(uap->size);
727 if (shm_committed + btoc(size) > (unsigned long) shminfo.shmall)
728 return (ENOMEM);
729 if (shm_last_free < 0) {
730 shmrealloc(); /* Maybe expand the shmsegs[] array. */
731 for (i = 0; i < shmalloced; i++)
732 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
733 break;
734 if (i == shmalloced)
735 return (ENOSPC);
736 segnum = i;
737 } else {
738 segnum = shm_last_free;
739 shm_last_free = -1;
740 }
741 shmseg = &shmsegs[segnum];
742 /*
743 * In case we sleep in malloc(), mark the segment present but deleted
744 * so that noone else tries to create the same key.
745 */
746 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
747 shmseg->shm_perm.key = uap->key;
748 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
749 shm_handle = (struct shm_handle *)
750 sys_malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
751 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
752
753 /*
754 * We make sure that we have allocated a pager before we need
755 * to.
756 */
757 if (shm_use_phys) {
758 shm_handle->shm_object =
759 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
760 } else {
761 shm_handle->shm_object =
762 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
763 }
764 VM_OBJECT_LOCK(shm_handle->shm_object);
765 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
766 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
767 VM_OBJECT_UNLOCK(shm_handle->shm_object);
768
769 shmseg->shm_internal = shm_handle;
770 #ifdef __CYGWIN__
771 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = td->ipcblk->uid;
772 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = td->ipcblk->gid;
773 #else
774 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
775 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
776 #endif /* __CYGWIN__ */
777 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
778 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
779 shmseg->shm_segsz = uap->size;
780 shmseg->shm_cpid = td->td_proc->p_pid;
781 shmseg->shm_lpid = shmseg->shm_nattch = 0;
782 shmseg->shm_atime = shmseg->shm_dtime = 0;
783 shmseg->shm_ctime = time (NULL);
784 shm_committed += btoc(size);
785 shm_nused++;
786 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
787 /*
788 * Somebody else wanted this key while we were asleep. Wake
789 * them up now.
790 */
791 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
792 wakeup(shmseg);
793 }
794 td->td_retval[0] = shmid;
795 #ifdef __CYGWIN__
796 td->td_retval[1] =
797 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
798 #endif /* __CYGWIN__ */
799 return (0);
800 }
801
802 /*
803 * MPSAFE
804 */
805 int
806 shmget(struct thread *td, struct shmget_args *uap)
807 {
808 int segnum, mode;
809 int error;
810
811 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
812 return (ENOSYS);
813 mtx_lock(&Giant);
814 mode = uap->shmflg & ACCESSPERMS;
815 if (uap->key != IPC_PRIVATE) {
816 again:
817 #ifdef __CYGWIN__
818 if (uap->shmflg & IPC_KEY_IS_SHMID)
819 segnum = shm_find_segment_by_shmid ((int) uap->key) ?
820 IPCID_TO_IX((int) uap->key) : -1;
821 else
822 #endif
823 segnum = shm_find_segment_by_key(uap->key);
824 if (segnum >= 0) {
825 error = shmget_existing(td, uap, mode, segnum);
826 if (error == EAGAIN)
827 goto again;
828 goto done2;
829 }
830 if ((uap->shmflg & IPC_CREAT) == 0) {
831 error = ENOENT;
832 goto done2;
833 }
834 }
835 error = shmget_allocate_segment(td, uap, mode);
836 done2:
837 #ifdef __CYGWIN__
838 if (!error)
839 ipcexit_creat_hookthread (td);
840 else
841 td->td_retval[0] = -1;
842 #endif
843 mtx_unlock(&Giant);
844 return (error);
845 }
846
847 #ifndef __CYGWIN__
848 /*
849 * MPSAFE
850 */
851 int
852 shmsys(td, uap)
853 struct thread *td;
854 /* XXX actually varargs. */
855 struct shmsys_args /* {
856 int which;
857 int a2;
858 int a3;
859 int a4;
860 } */ *uap;
861 {
862 int error;
863
864 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
865 return (ENOSYS);
866 if (uap->which < 0 ||
867 uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
868 return (EINVAL);
869 mtx_lock(&Giant);
870 error = (*shmcalls[uap->which])(td, &uap->a2);
871 mtx_unlock(&Giant);
872 return (error);
873 }
874 #endif /* __CYGWIN__ */
875
876 static void
877 shmfork_myhook(struct proc *p1, struct proc *p2)
878 {
879 struct shmmap_state *shmmap_s;
880 size_t size;
881 int i;
882
883 size = shminfo.shmseg * sizeof(struct shmmap_state);
884 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
885 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
886 p2->p_vmspace->vm_shm = shmmap_s;
887 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
888 if (shmmap_s->shmid != -1) {
889 shm_nattch++;
890 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
891 }
892 }
893
894 #ifdef __CYGWIN__
895 int cygwin_shmfork_myhook (struct thread *td, struct proc *parent)
896 {
897 ipcexit_creat_hookthread (td);
898 ipc_p_vmspace (td->ipcblk);
899 ipc_p_vmspace (parent);
900 shmfork_myhook (parent, td->ipcblk);
901 return 0;
902 }
903 #endif
904
905 void
906 shmexit_myhook(struct vmspace *vm)
907 {
908 struct shmmap_state *base, *shm;
909 int i;
910
911 GIANT_REQUIRED;
912
913 if ((base = vm->vm_shm) != NULL) {
914 vm->vm_shm = NULL;
915 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
916 if (shm->shmid != -1)
917 shm_delete_mapping(vm, shm);
918 }
919 sys_free(base, M_SHM);
920 }
921 }
922
923 static void
924 shmrealloc(void)
925 {
926 int i;
927 struct shmid_ds *newsegs;
928
929 if (shmalloced >= shminfo.shmmni)
930 return;
931
932 newsegs = (struct shmid_ds *) sys_malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
933 if (newsegs == NULL)
934 return;
935 for (i = 0; i < shmalloced; i++)
936 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
937 for (; i < shminfo.shmmni; i++) {
938 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
939 shmsegs[i].shm_perm.seq = 0;
940 }
941 sys_free(shmsegs, M_SHM);
942 shmsegs = newsegs;
943 shmalloced = shminfo.shmmni;
944 }
945
946 void
947 shminit(void)
948 {
949 int i;
950
951 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
952 for (i = PAGE_SIZE; i > 0; i--) {
953 shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
954 if (shminfo.shmmax >= shminfo.shmall)
955 break;
956 }
957 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
958 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
959 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
960 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
961
962 shmalloced = shminfo.shmmni;
963 shmsegs = (struct shmid_ds *) sys_malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
964 if (shmsegs == NULL)
965 panic("cannot allocate initial memory for sysvshm");
966 for (i = 0; i < shmalloced; i++) {
967 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
968 shmsegs[i].shm_perm.seq = 0;
969 }
970 shm_last_free = 0;
971 shm_nused = 0;
972 shm_committed = 0;
973 #ifndef __CYGWIN__
974 shmexit_hook = &shmexit_myhook;
975 shmfork_hook = &shmfork_myhook;
976 #endif /* __CYGWIN__ */
977 }
978
979 int
980 shmunload(void)
981 {
982
983 if (shm_nused > 0)
984 return (EBUSY);
985
986 sys_free(shmsegs, M_SHM);
987 #ifndef __CYGWIN__
988 shmexit_hook = NULL;
989 shmfork_hook = NULL;
990 #endif /* __CYGWIN__ */
991 return (0);
992 }
993
994 #ifndef __CYGWIN__
995 static int
996 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
997 {
998
999 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
1000 }
1001
1002 static int
1003 sysvshm_modload(struct module *module, int cmd, void *arg)
1004 {
1005 int error = 0;
1006
1007 switch (cmd) {
1008 case MOD_LOAD:
1009 shminit();
1010 break;
1011 case MOD_UNLOAD:
1012 error = shmunload();
1013 break;
1014 case MOD_SHUTDOWN:
1015 break;
1016 default:
1017 error = EINVAL;
1018 break;
1019 }
1020 return (error);
1021 }
1022
1023 static moduledata_t sysvshm_mod = {
1024 "sysvshm",
1025 &sysvshm_modload,
1026 NULL
1027 };
1028
1029 SYSCALL_MODULE_HELPER(shmsys);
1030 SYSCALL_MODULE_HELPER(shmat);
1031 SYSCALL_MODULE_HELPER(shmctl);
1032 SYSCALL_MODULE_HELPER(shmdt);
1033 SYSCALL_MODULE_HELPER(shmget);
1034
1035 DECLARE_MODULE(sysvshm, sysvshm_mod,
1036 SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
1037 MODULE_VERSION(sysvshm, 1);
1038 #endif /* __CYGWIN__ */
1039 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.083077 seconds and 6 git commands to generate.