]> sourceware.org Git - newlib-cygwin.git/blob - winsup/cygserver/sysv_shm.cc
* smallprint.c: New file.
[newlib-cygwin.git] / winsup / cygserver / sysv_shm.cc
1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
2 /*
3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Adam Glass and Charles
16 * Hannum.
17 * 4. The names of the authors may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This file is heavily changed to become part of Cygwin's cygserver.
34 */
35
36 #ifdef __OUTSIDE_CYGWIN__
37 #include "woutsup.h"
38 #include <sys/cdefs.h>
39 #ifndef __FBSDID
40 #define __FBSDID(s) const char version[] = (s)
41 #endif
42 __FBSDID("$FreeBSD: /repoman/r/ncvs/src/sys/kern/sysv_shm.c,v 1.89 2003/11/07 04:47:14 rwatson Exp $");
43 /* CV, 2006-01-09: Inspected upstream up to version 1.104. */
44
45 #define _KERNEL 1
46 #define __BSD_VISIBLE 1
47 #include <sys/param.h>
48 #include <sys/lock.h>
49 #include <sys/shm.h>
50 #include <malloc.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/sysproto.h>
54
55 #include <errno.h>
56 #include <time.h>
57 #include <unistd.h>
58 #include "cygserver.h"
59 #include "process.h"
60 #include "cygserver_ipc.h"
61
62 #ifdef __CYGWIN__
63 #ifndef PAGE_SIZE
64 #define PAGE_SIZE (getpagesize ())
65 #endif
66 #ifndef PAGE_MASK
67 #define PAGE_MASK (PAGE_SIZE - 1)
68 #endif
69 #define btoc(b) (((b) + PAGE_MASK) / PAGE_SIZE)
70 #define round_page(p) ((((unsigned long)(p)) + PAGE_MASK) & ~(PAGE_MASK))
71 #define ACCESSPERMS (0777)
72 #ifdef __CYGWIN__
73 #define GIANT_REQUIRED
74 #else
75 #define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
76 #endif
77 #define KERN_SUCCESS 0
78 #define VM_PROT_READ PROT_READ
79 #define VM_PROT_WRITE PROT_WRITE
80 #define VM_INHERIT_SHARE 0
81 #define OBJT_PHYS 0
82 #define OBJT_SWAP 0
83 #define VM_PROT_DEFAULT 0
84 #define VM_OBJECT_LOCK(a)
85 #define vm_object_clear_flag(a,b)
86 #define vm_object_set_flag(a,b)
87 #define VM_OBJECT_UNLOCK(a)
88 #define vm_map_remove(a,b,c) KERN_SUCCESS
89 typedef int vm_prot_t;
90 #endif /* __CYGWIN__ */
91
92 #ifndef __CYGWIN__
93 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
94
95 struct oshmctl_args;
96 static int oshmctl(struct thread *td, struct oshmctl_args *uap);
97 #endif /* __CYGWIN__ */
98
99 static int shmget_allocate_segment(struct thread *td,
100 struct shmget_args *uap, int mode);
101 static int shmget_existing(struct thread *td, struct shmget_args *uap,
102 int mode, int segnum);
103
104 #ifndef __CYGWIN__
105 /* XXX casting to (sy_call_t *) is bogus, as usual. */
106 static sy_call_t *shmcalls[] = {
107 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
108 (sy_call_t *)shmdt, (sy_call_t *)shmget,
109 (sy_call_t *)shmctl
110 };
111 #endif /* __CYGWIN__ */
112
113 #define SHMSEG_FREE 0x0200
114 #define SHMSEG_REMOVED 0x0400
115 #define SHMSEG_ALLOCATED 0x0800
116 #define SHMSEG_WANTED 0x1000
117
118 static int shm_last_free, shm_nused, shm_committed, shmalloced, shm_nattch;
119 static struct shmid_ds *shmsegs;
120
121 struct shm_handle {
122 /* vm_offset_t kva; */
123 vm_object_t shm_object;
124 };
125
126 struct shmmap_state {
127 vm_offset_t va;
128 int shmid;
129 };
130
131 static void shm_deallocate_segment(struct shmid_ds *);
132 static int shm_find_segment_by_key(key_t);
133 static struct shmid_ds *shm_find_segment_by_shmid(int);
134 static struct shmid_ds *shm_find_segment_by_shmidx(int);
135 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
136 static void shmrealloc(void);
137
138 /*
139 * Tuneable values.
140 */
141 #ifndef SHMMAXPGS
142 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */
143 #endif
144 #ifndef SHMMAX
145 #define SHMMAX (SHMMAXPGS*PAGE_SIZE)
146 #endif
147 #ifndef SHMMIN
148 #define SHMMIN 1
149 #endif
150 #ifndef SHMMNI
151 #define SHMMNI 192
152 #endif
153 #ifndef SHMSEG
154 #define SHMSEG 128
155 #endif
156 #ifndef SHMALL
157 #define SHMALL (SHMMAXPGS)
158 #endif
159
160 struct shminfo shminfo = {
161 SHMMAX,
162 SHMMIN,
163 SHMMNI,
164 SHMSEG,
165 SHMALL
166 };
167
168 #ifndef __CYGWIN__
169 static int shm_use_phys;
170 #else
171 static long shm_use_phys;
172 static long shm_allow_removed;
173 #endif /* __CYGWIN__ */
174
175 #ifndef __CYGWIN__
176 struct shm_info shm_info;
177 #endif /* __CYGWIN__ */
178
179 #ifndef __CYGWIN__
180 SYSCTL_DECL(_kern_ipc);
181 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
182 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
183 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
184 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
185 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
186 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
187 &shm_use_phys, 0, "");
188 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
189 &shm_allow_removed, 0, "");
190 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
191 NULL, 0, sysctl_shmsegs, "", "");
192 #endif /* __CYGWIN__ */
193
194 static int
195 shm_find_segment_by_key(key_t key)
196 {
197 int i;
198
199 for (i = 0; i < shmalloced; i++)
200 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
201 shmsegs[i].shm_perm.key == key)
202 return (i);
203 return (-1);
204 }
205
206 static struct shmid_ds *
207 shm_find_segment_by_shmid(int shmid)
208 {
209 int segnum;
210 struct shmid_ds *shmseg;
211
212 segnum = IPCID_TO_IX(shmid);
213 if (segnum < 0 || segnum >= shmalloced)
214 return (NULL);
215 shmseg = &shmsegs[segnum];
216 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
217 (!shm_allow_removed &&
218 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
219 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
220 return (NULL);
221 return (shmseg);
222 }
223
224 static struct shmid_ds *
225 shm_find_segment_by_shmidx(int segnum)
226 {
227 struct shmid_ds *shmseg;
228
229 if (segnum < 0 || segnum >= shmalloced)
230 return (NULL);
231 shmseg = &shmsegs[segnum];
232 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
233 (!shm_allow_removed &&
234 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
235 return (NULL);
236 return (shmseg);
237 }
238
239 static void
240 shm_deallocate_segment(struct shmid_ds *shmseg)
241 {
242 struct shm_handle *shm_handle;
243 size_t size;
244
245 GIANT_REQUIRED;
246
247 shm_handle = shmseg->shm_internal;
248 vm_object_deallocate(shm_handle->shm_object);
249 sys_free(shm_handle, M_SHM);
250 shmseg->shm_internal = NULL;
251 size = round_page(shmseg->shm_segsz);
252 shm_committed -= btoc(size);
253 shm_nused--;
254 shmseg->shm_perm.mode = SHMSEG_FREE;
255 }
256
257 static int
258 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
259 {
260 struct shmid_ds *shmseg;
261 int segnum, result;
262 size_t size;
263
264 GIANT_REQUIRED;
265
266 segnum = IPCID_TO_IX(shmmap_s->shmid);
267 shmseg = &shmsegs[segnum];
268 size = round_page(shmseg->shm_segsz);
269 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
270 if (result != KERN_SUCCESS)
271 return (EINVAL);
272 shmmap_s->shmid = -1;
273 shmseg->shm_dtime = time (NULL);
274 --shm_nattch;
275 if ((--shmseg->shm_nattch <= 0) &&
276 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
277 shm_deallocate_segment(shmseg);
278 shm_last_free = segnum;
279 }
280 return (0);
281 }
282
283 #ifndef _SYS_SYSPROTO_H_
284 struct shmdt_args {
285 const void *shmaddr;
286 };
287 #endif
288
289 /*
290 * MPSAFE
291 */
292 int
293 shmdt(struct thread *td, struct shmdt_args *uap)
294 {
295 struct proc *p = td->td_proc;
296 struct shmmap_state *shmmap_s;
297 int i;
298 int error = 0;
299
300 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
301 return (ENOSYS);
302 mtx_lock(&Giant);
303 shmmap_s = p->p_vmspace->vm_shm;
304 if (shmmap_s == NULL) {
305 error = EINVAL;
306 goto done2;
307 }
308 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
309 if (shmmap_s->shmid != -1 &&
310 shmmap_s->va == (vm_offset_t)uap->shmaddr) {
311 break;
312 }
313 }
314 if (i == shminfo.shmseg) {
315 error = EINVAL;
316 goto done2;
317 }
318 error = shm_delete_mapping(p->p_vmspace, shmmap_s);
319 done2:
320 mtx_unlock(&Giant);
321 return (error);
322 }
323
324 #ifndef _SYS_SYSPROTO_H_
325 struct shmat_args {
326 int shmid;
327 const void *shmaddr;
328 int shmflg;
329 };
330 #endif
331
332 /*
333 * MPSAFE
334 */
335 int
336 kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg)
337 {
338 struct proc *p = td->td_proc;
339 int i, flags;
340 struct shmid_ds *shmseg;
341 struct shmmap_state *shmmap_s = NULL;
342 #ifndef __CYGWIN__
343 struct shm_handle *shm_handle;
344 #endif
345 vm_offset_t attach_va;
346 vm_prot_t prot;
347 vm_size_t size;
348 #ifndef __CYGWIN__
349 int rv;
350 #endif
351 int error = 0;
352
353 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
354 return (ENOSYS);
355 mtx_lock(&Giant);
356 shmmap_s = p->p_vmspace->vm_shm;
357 if (shmmap_s == NULL) {
358 size = shminfo.shmseg * sizeof(struct shmmap_state);
359 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
360 for (i = 0; i < shminfo.shmseg; i++)
361 shmmap_s[i].shmid = -1;
362 p->p_vmspace->vm_shm = shmmap_s;
363 }
364 shmseg = shm_find_segment_by_shmid(shmid);
365 if (shmseg == NULL) {
366 error = EINVAL;
367 goto done2;
368 }
369 error = ipcperm(td, &shmseg->shm_perm,
370 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
371 if (error)
372 goto done2;
373 for (i = 0; i < shminfo.shmseg; i++) {
374 if (shmmap_s->shmid == -1)
375 break;
376 shmmap_s++;
377 }
378 if (i >= shminfo.shmseg) {
379 error = EMFILE;
380 goto done2;
381 }
382 size = round_page(shmseg->shm_segsz);
383 #ifdef VM_PROT_READ_IS_EXEC
384 prot = VM_PROT_READ | VM_PROT_EXECUTE;
385 #else
386 prot = VM_PROT_READ;
387 #endif
388 if ((shmflg & SHM_RDONLY) == 0)
389 prot |= VM_PROT_WRITE;
390 flags = MAP_ANON | MAP_SHARED;
391 debug_printf ("shmaddr: %x, shmflg: %x", shmaddr, shmflg);
392 #ifdef __CYGWIN__
393 /* The alignment checks have already been made in the Cygwin DLL
394 and shmat's only job is to keep record of the attached mem.
395 These checks break shm on 9x since MapViewOfFileEx apparently
396 returns memory which isn't aligned to SHMLBA. Go figure! */
397 attach_va = (vm_offset_t)shmaddr;
398 #else
399 if (shmaddr) {
400 flags |= MAP_FIXED;
401 if (shmflg & SHM_RND) {
402 attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
403 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
404 attach_va = (vm_offset_t)shmaddr;
405 } else {
406 error = EINVAL;
407 goto done2;
408 }
409 } else {
410 /*
411 * This is just a hint to vm_map_find() about where to
412 * put it.
413 */
414 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr
415 + maxtsiz + maxdsiz);
416 }
417
418 shm_handle = shmseg->shm_internal;
419 vm_object_reference(shm_handle->shm_object);
420 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
421 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
422 if (rv != KERN_SUCCESS) {
423 error = ENOMEM;
424 goto done2;
425 }
426 vm_map_inherit(&p->p_vmspace->vm_map,
427 attach_va, attach_va + size, VM_INHERIT_SHARE);
428 #endif
429
430 shmmap_s->va = attach_va;
431 shmmap_s->shmid = shmid;
432 shmseg->shm_lpid = p->p_pid;
433 shmseg->shm_atime = time (NULL);
434 shmseg->shm_nattch++;
435 shm_nattch++;
436 td->td_retval[0] = attach_va;
437 done2:
438 mtx_unlock(&Giant);
439 return (error);
440 }
441
442 int
443 shmat(struct thread *td, struct shmat_args *uap)
444 {
445 return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
446 }
447
448 #ifndef __CYGWIN__
449 struct oshmid_ds {
450 struct ipc_perm shm_perm; /* operation perms */
451 int shm_segsz; /* size of segment (bytes) */
452 u_short shm_cpid; /* pid, creator */
453 u_short shm_lpid; /* pid, last operation */
454 short shm_nattch; /* no. of current attaches */
455 time_t shm_atime; /* last attach time */
456 time_t shm_dtime; /* last detach time */
457 time_t shm_ctime; /* last change time */
458 void *shm_handle; /* internal handle for shm segment */
459 };
460
461 struct oshmctl_args {
462 int shmid;
463 int cmd;
464 struct oshmid_ds *ubuf;
465 };
466
467 /*
468 * MPSAFE
469 */
470 static int
471 oshmctl(struct thread *td, struct oshmctl_args *uap)
472 {
473 #ifdef COMPAT_43
474 int error = 0;
475 struct shmid_ds *shmseg;
476 struct oshmid_ds outbuf;
477
478 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
479 return (ENOSYS);
480 mtx_lock(&Giant);
481 shmseg = shm_find_segment_by_shmid(uap->shmid);
482 if (shmseg == NULL) {
483 error = EINVAL;
484 goto done2;
485 }
486 switch (uap->cmd) {
487 case IPC_STAT:
488 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
489 if (error)
490 goto done2;
491 outbuf.shm_perm = shmseg->shm_perm;
492 outbuf.shm_segsz = shmseg->shm_segsz;
493 outbuf.shm_cpid = shmseg->shm_cpid;
494 outbuf.shm_lpid = shmseg->shm_lpid;
495 outbuf.shm_nattch = shmseg->shm_nattch;
496 outbuf.shm_atime = shmseg->shm_atime;
497 outbuf.shm_dtime = shmseg->shm_dtime;
498 outbuf.shm_ctime = shmseg->shm_ctime;
499 outbuf.shm_handle = shmseg->shm_internal;
500 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
501 if (error)
502 goto done2;
503 break;
504 default:
505 /* XXX casting to (sy_call_t *) is bogus, as usual. */
506 error = ((sy_call_t *)shmctl)(td, uap);
507 break;
508 }
509 done2:
510 mtx_unlock(&Giant);
511 return (error);
512 #else
513 return (EINVAL);
514 #endif
515 }
516 #endif /* __CYGWIN__ */
517
518 #ifndef _SYS_SYSPROTO_H_
519 struct shmctl_args {
520 int shmid;
521 int cmd;
522 struct shmid_ds *buf;
523 };
524 #endif
525
526 /*
527 * MPSAFE
528 */
529 int
530 kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz)
531 {
532 int error = 0;
533 struct shmid_ds *shmseg;
534
535 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
536 return (ENOSYS);
537
538 mtx_lock(&Giant);
539 switch (cmd) {
540 case IPC_INFO:
541 memcpy(buf, &shminfo, sizeof(shminfo));
542 if (bufsz)
543 *bufsz = sizeof(shminfo);
544 td->td_retval[0] = shmalloced;
545 goto done2;
546 case SHM_INFO: {
547 struct shm_info shm_info;
548 shm_info.used_ids = shm_nused;
549 shm_info.shm_tot = shm_committed * PAGE_SIZE;
550 #ifdef __CYGWIN__
551 shm_info.shm_atts = shm_nattch;
552 #else
553 shm_info.shm_rss = 0; /*XXX where to get from ? */
554 shm_info.shm_swp = 0; /*XXX where to get from ? */
555 shm_info.swap_attempts = 0; /*XXX where to get from ? */
556 shm_info.swap_successes = 0; /*XXX where to get from ? */
557 #endif /* __CYGWIN__ */
558 memcpy(buf, &shm_info, sizeof(shm_info));
559 if (bufsz)
560 *bufsz = sizeof(shm_info);
561 td->td_retval[0] = shmalloced;
562 goto done2;
563 }
564 }
565 if (cmd == SHM_STAT)
566 shmseg = shm_find_segment_by_shmidx(shmid);
567 else
568 shmseg = shm_find_segment_by_shmid(shmid);
569 if (shmseg == NULL) {
570 error = EINVAL;
571 goto done2;
572 }
573 switch (cmd) {
574 case SHM_STAT:
575 case IPC_STAT:
576 error = ipcperm(td, &shmseg->shm_perm, IPC_R);
577 if (error)
578 goto done2;
579 memcpy(buf, shmseg, sizeof(struct shmid_ds));
580 if (bufsz)
581 *bufsz = sizeof(struct shmid_ds);
582 if (cmd == SHM_STAT)
583 td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
584 break;
585 case IPC_SET: {
586 struct shmid_ds *shmid;
587
588 shmid = (struct shmid_ds *)buf;
589 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
590 if (error)
591 goto done2;
592 shmseg->shm_perm.uid = shmid->shm_perm.uid;
593 shmseg->shm_perm.gid = shmid->shm_perm.gid;
594 shmseg->shm_perm.mode =
595 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
596 (shmid->shm_perm.mode & ACCESSPERMS);
597 shmseg->shm_ctime = time (NULL);
598 break;
599 }
600 case IPC_RMID:
601 error = ipcperm(td, &shmseg->shm_perm, IPC_M);
602 if (error)
603 goto done2;
604 shmseg->shm_perm.key = IPC_PRIVATE;
605 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
606 if (shmseg->shm_nattch <= 0) {
607 shm_deallocate_segment(shmseg);
608 shm_last_free = IPCID_TO_IX(shmid);
609 }
610 break;
611 #if 0
612 case SHM_LOCK:
613 case SHM_UNLOCK:
614 #endif
615 default:
616 error = EINVAL;
617 break;
618 }
619 done2:
620 mtx_unlock(&Giant);
621 return (error);
622 }
623
624 int
625 shmctl(struct thread *td, struct shmctl_args *uap)
626 {
627 int error = 0;
628 struct shmid_ds buf;
629 size_t bufsz;
630
631 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */
632 if (uap->cmd == IPC_SET) {
633 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
634 goto done;
635 }
636 #ifdef __CYGWIN__
637 if (uap->cmd == IPC_INFO && uap->shmid > 0) {
638 /* Can't use the default kern_shmctl interface. */
639 int shmid = uap->shmid;
640 if (shmid > shminfo.shmmni)
641 shmid = shminfo.shmmni;
642 error = copyout(shmsegs, uap->buf,
643 shmid * sizeof(struct shmid_ds));
644 td->td_retval[0] = error ? -1 : 0;
645 return (error);
646 }
647 #endif /* __CYGWIN__ */
648
649 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
650 if (error)
651 goto done;
652
653 /* Cases in which we need to copyout */
654 switch (uap->cmd) {
655 case IPC_INFO:
656 case SHM_INFO:
657 case SHM_STAT:
658 case IPC_STAT:
659 error = copyout(&buf, uap->buf, bufsz);
660 break;
661 }
662
663 done:
664 if (error) {
665 /* Invalidate the return value */
666 td->td_retval[0] = -1;
667 }
668 return (error);
669 }
670
671
672 #ifndef _SYS_SYSPROTO_H_
673 struct shmget_args {
674 key_t key;
675 size_t size;
676 int shmflg;
677 };
678 #endif
679
680 static int
681 shmget_existing(struct thread *td, struct shmget_args *uap, int mode, int segnum)
682 {
683 struct shmid_ds *shmseg;
684 int error;
685
686 shmseg = &shmsegs[segnum];
687 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
688 /*
689 * This segment is in the process of being allocated. Wait
690 * until it's done, and look the key up again (in case the
691 * allocation failed or it was freed).
692 */
693 shmseg->shm_perm.mode |= SHMSEG_WANTED;
694 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
695 if (error)
696 return (error);
697 return (EAGAIN);
698 }
699 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
700 return (EEXIST);
701 error = ipcperm(td, &shmseg->shm_perm, mode);
702 if (error)
703 return (error);
704 if (uap->size && uap->size > shmseg->shm_segsz)
705 return (EINVAL);
706 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
707 #ifdef __CYGWIN__
708 td->td_retval[1] =
709 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
710 #endif /* __CYGWIN__ */
711 return (0);
712 }
713
714 static int
715 shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode)
716 {
717 int i, segnum, shmid, size;
718 #ifndef __CYGWIN__
719 struct ucred *cred = td->td_ucred;
720 #endif /* __CYGWIN__ */
721 struct shmid_ds *shmseg;
722 struct shm_handle *shm_handle;
723
724 GIANT_REQUIRED;
725
726 if (uap->size < (unsigned long) shminfo.shmmin ||
727 uap->size > (unsigned long) shminfo.shmmax)
728 return (EINVAL);
729 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
730 return (ENOSPC);
731 size = round_page(uap->size);
732 if (shm_committed + btoc(size) > shminfo.shmall)
733 return (ENOMEM);
734 if (shm_last_free < 0) {
735 shmrealloc(); /* Maybe expand the shmsegs[] array. */
736 for (i = 0; i < shmalloced; i++)
737 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
738 break;
739 if (i == shmalloced)
740 return (ENOSPC);
741 segnum = i;
742 } else {
743 segnum = shm_last_free;
744 shm_last_free = -1;
745 }
746 shmseg = &shmsegs[segnum];
747 /*
748 * In case we sleep in malloc(), mark the segment present but deleted
749 * so that noone else tries to create the same key.
750 */
751 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
752 shmseg->shm_perm.key = uap->key;
753 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
754 shm_handle = (struct shm_handle *)
755 sys_malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
756 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
757
758 /*
759 * We make sure that we have allocated a pager before we need
760 * to.
761 */
762 if (shm_use_phys) {
763 shm_handle->shm_object =
764 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
765 } else {
766 shm_handle->shm_object =
767 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
768 }
769 VM_OBJECT_LOCK(shm_handle->shm_object);
770 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
771 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
772 VM_OBJECT_UNLOCK(shm_handle->shm_object);
773
774 shmseg->shm_internal = shm_handle;
775 #ifdef __CYGWIN__
776 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = td->ipcblk->uid;
777 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = td->ipcblk->gid;
778 #else
779 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
780 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
781 #endif /* __CYGWIN__ */
782 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
783 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
784 shmseg->shm_segsz = uap->size;
785 shmseg->shm_cpid = td->td_proc->p_pid;
786 shmseg->shm_lpid = shmseg->shm_nattch = 0;
787 shmseg->shm_atime = shmseg->shm_dtime = 0;
788 shmseg->shm_ctime = time (NULL);
789 shm_committed += btoc(size);
790 shm_nused++;
791 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
792 /*
793 * Somebody else wanted this key while we were asleep. Wake
794 * them up now.
795 */
796 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
797 wakeup(shmseg);
798 }
799 td->td_retval[0] = shmid;
800 #ifdef __CYGWIN__
801 td->td_retval[1] =
802 vm_object_duplicate(td, shmseg->shm_internal->shm_object);
803 #endif /* __CYGWIN__ */
804 return (0);
805 }
806
807 /*
808 * MPSAFE
809 */
810 int
811 shmget(struct thread *td, struct shmget_args *uap)
812 {
813 int segnum, mode;
814 int error;
815
816 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
817 return (ENOSYS);
818 mtx_lock(&Giant);
819 mode = uap->shmflg & ACCESSPERMS;
820 if (uap->key != IPC_PRIVATE) {
821 again:
822 #ifdef __CYGWIN__
823 if (uap->shmflg & IPC_KEY_IS_SHMID)
824 segnum = shm_find_segment_by_shmid ((int) uap->key) ?
825 IPCID_TO_IX((int) uap->key) : -1;
826 else
827 #endif
828 segnum = shm_find_segment_by_key(uap->key);
829 if (segnum >= 0) {
830 error = shmget_existing(td, uap, mode, segnum);
831 if (error == EAGAIN)
832 goto again;
833 goto done2;
834 }
835 if ((uap->shmflg & IPC_CREAT) == 0) {
836 error = ENOENT;
837 goto done2;
838 }
839 }
840 error = shmget_allocate_segment(td, uap, mode);
841 done2:
842 #ifdef __CYGWIN__
843 if (!error)
844 ipcexit_creat_hookthread (td);
845 else
846 td->td_retval[0] = -1;
847 #endif
848 mtx_unlock(&Giant);
849 return (error);
850 }
851
852 #ifndef __CYGWIN__
853 /*
854 * MPSAFE
855 */
856 int
857 shmsys(td, uap)
858 struct thread *td;
859 /* XXX actually varargs. */
860 struct shmsys_args /* {
861 int which;
862 int a2;
863 int a3;
864 int a4;
865 } */ *uap;
866 {
867 int error;
868
869 if (!jail_sysvipc_allowed && jailed(td->td_ucred))
870 return (ENOSYS);
871 if (uap->which < 0 ||
872 uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
873 return (EINVAL);
874 mtx_lock(&Giant);
875 error = (*shmcalls[uap->which])(td, &uap->a2);
876 mtx_unlock(&Giant);
877 return (error);
878 }
879 #endif /* __CYGWIN__ */
880
881 static void
882 shmfork_myhook(struct proc *p1, struct proc *p2)
883 {
884 struct shmmap_state *shmmap_s;
885 size_t size;
886 int i;
887
888 size = shminfo.shmseg * sizeof(struct shmmap_state);
889 shmmap_s = (struct shmmap_state *) sys_malloc(size, M_SHM, M_WAITOK);
890 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
891 p2->p_vmspace->vm_shm = shmmap_s;
892 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
893 if (shmmap_s->shmid != -1) {
894 shm_nattch++;
895 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
896 }
897 }
898
899 #ifdef __CYGWIN__
900 int cygwin_shmfork_myhook (struct thread *td, struct proc *parent)
901 {
902 ipcexit_creat_hookthread (td);
903 ipc_p_vmspace (td->ipcblk);
904 ipc_p_vmspace (parent);
905 shmfork_myhook (parent, td->ipcblk);
906 return 0;
907 }
908 #endif
909
910 void
911 shmexit_myhook(struct vmspace *vm)
912 {
913 struct shmmap_state *base, *shm;
914 int i;
915
916 GIANT_REQUIRED;
917
918 if ((base = vm->vm_shm) != NULL) {
919 vm->vm_shm = NULL;
920 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
921 if (shm->shmid != -1)
922 shm_delete_mapping(vm, shm);
923 }
924 sys_free(base, M_SHM);
925 }
926 }
927
928 static void
929 shmrealloc(void)
930 {
931 int i;
932 struct shmid_ds *newsegs;
933
934 if (shmalloced >= shminfo.shmmni)
935 return;
936
937 newsegs = (struct shmid_ds *) sys_malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
938 if (newsegs == NULL)
939 return;
940 for (i = 0; i < shmalloced; i++)
941 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
942 for (; i < shminfo.shmmni; i++) {
943 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
944 shmsegs[i].shm_perm.seq = 0;
945 }
946 sys_free(shmsegs, M_SHM);
947 shmsegs = newsegs;
948 shmalloced = shminfo.shmmni;
949 }
950
951 void
952 shminit(void)
953 {
954 int i;
955
956 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
957 for (i = PAGE_SIZE; i > 0; i--) {
958 shminfo.shmmax = shminfo.shmall * i;
959 if (shminfo.shmmax >= shminfo.shmall)
960 break;
961 }
962 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
963 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
964 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
965 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
966
967 shmalloced = shminfo.shmmni;
968 shmsegs = (struct shmid_ds *) sys_malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
969 if (shmsegs == NULL)
970 panic("cannot allocate initial memory for sysvshm");
971 for (i = 0; i < shmalloced; i++) {
972 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
973 shmsegs[i].shm_perm.seq = 0;
974 }
975 shm_last_free = 0;
976 shm_nused = 0;
977 shm_committed = 0;
978 #ifndef __CYGWIN__
979 shmexit_hook = &shmexit_myhook;
980 shmfork_hook = &shmfork_myhook;
981 #endif /* __CYGWIN__ */
982 }
983
984 int
985 shmunload(void)
986 {
987
988 if (shm_nused > 0)
989 return (EBUSY);
990
991 sys_free(shmsegs, M_SHM);
992 #ifndef __CYGWIN__
993 shmexit_hook = NULL;
994 shmfork_hook = NULL;
995 #endif /* __CYGWIN__ */
996 return (0);
997 }
998
999 #ifndef __CYGWIN__
1000 static int
1001 sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
1002 {
1003
1004 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
1005 }
1006
1007 static int
1008 sysvshm_modload(struct module *module, int cmd, void *arg)
1009 {
1010 int error = 0;
1011
1012 switch (cmd) {
1013 case MOD_LOAD:
1014 shminit();
1015 break;
1016 case MOD_UNLOAD:
1017 error = shmunload();
1018 break;
1019 case MOD_SHUTDOWN:
1020 break;
1021 default:
1022 error = EINVAL;
1023 break;
1024 }
1025 return (error);
1026 }
1027
1028 static moduledata_t sysvshm_mod = {
1029 "sysvshm",
1030 &sysvshm_modload,
1031 NULL
1032 };
1033
1034 SYSCALL_MODULE_HELPER(shmsys);
1035 SYSCALL_MODULE_HELPER(shmat);
1036 SYSCALL_MODULE_HELPER(shmctl);
1037 SYSCALL_MODULE_HELPER(shmdt);
1038 SYSCALL_MODULE_HELPER(shmget);
1039
1040 DECLARE_MODULE(sysvshm, sysvshm_mod,
1041 SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
1042 MODULE_VERSION(sysvshm, 1);
1043 #endif /* __CYGWIN__ */
1044 #endif /* __OUTSIDE_CYGWIN__ */
This page took 0.083243 seconds and 5 git commands to generate.