]>
sourceware.org Git - glibc.git/blob - sysdeps/mach/hurd/mmap.c
30e369f0670956d5a40d7a60c785f37aa28d8a40
1 /* Copyright (C) 1994-2024 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
18 #include <sys/types.h>
25 /* Map addresses starting near ADDR and extending for LEN bytes. from
26 OFFSET into the file FD describes according to PROT and FLAGS. If ADDR
27 is nonzero, it is the desired mapping address. If the MAP_FIXED bit is
28 set in FLAGS, the mapping will be at ADDR exactly (which must be
29 page-aligned); otherwise the system chooses a convenient nearby address.
30 The return value is the actual mapping address chosen or (void *) -1
31 for errors (in which case `errno' is set). A successful `mmap' call
32 deallocates any previous mapping for the affected region. */
35 __mmap (void *addr
, size_t len
, int prot
, int flags
, int fd
, off_t offset
)
38 vm_prot_t vmprot
, max_vmprot
;
39 memory_object_t memobj
;
40 vm_address_t mapaddr
, mask
;
41 boolean_t copy
, anywhere
;
43 mapaddr
= (vm_address_t
) addr
;
45 /* ADDR and OFFSET must be page-aligned. */
46 if ((mapaddr
& (__vm_page_size
- 1)) || (offset
& (__vm_page_size
- 1)))
47 return (void *) (long int) __hurd_fail (EINVAL
);
49 if ((flags
& MAP_EXCL
) && ! (flags
& MAP_FIXED
))
50 return (void *) (long int) __hurd_fail (EINVAL
);
52 vmprot
= VM_PROT_NONE
;
54 vmprot
|= VM_PROT_READ
;
55 if (prot
& PROT_WRITE
)
56 vmprot
|= VM_PROT_WRITE
;
58 vmprot
|= VM_PROT_EXECUTE
;
60 copy
= ! (flags
& MAP_SHARED
);
61 anywhere
= ! (flags
& MAP_FIXED
);
64 if ((addr
== NULL
) && (prot
& PROT_EXEC
)
65 && HAS_ARCH_FEATURE (Prefer_MAP_32BIT_EXEC
))
68 mask
= (flags
& MAP_32BIT
) ? ~(vm_address_t
) 0x7FFFFFFF : 0;
70 switch (flags
& MAP_TYPE
)
73 return (void *) (long int) __hurd_fail (EINVAL
);
76 memobj
= MACH_PORT_NULL
;
77 max_vmprot
= VM_PROT_ALL
;
81 case 0: /* Allow, e.g., just MAP_SHARED. */
83 mach_port_t robj
, wobj
;
84 if (err
= HURD_DPORT_USE (fd
, __io_map (port
, &robj
, &wobj
)))
86 if (err
== MIG_BAD_ID
|| err
== EOPNOTSUPP
|| err
== ENOSYS
)
87 err
= ENODEV
; /* File descriptor doesn't support mmap. */
88 return (void *) (long int) __hurd_dfail (fd
, err
);
90 switch (prot
& (PROT_READ
|PROT_WRITE
))
92 /* Although it apparently doesn't make sense to map a file with
93 protection set to PROT_NONE, it is actually sometimes done.
94 In particular, that's how localedef reserves some space for
95 the locale archive file, the rationale being that some
96 implementations take into account whether the mapping is
97 anonymous or not when selecting addresses. */
100 max_vmprot
= VM_PROT_READ
|VM_PROT_EXECUTE
;
102 max_vmprot
|= VM_PROT_WRITE
;
104 if (MACH_PORT_VALID (wobj
))
105 __mach_port_deallocate (__mach_task_self (), wobj
);
108 max_vmprot
= VM_PROT_WRITE
;
110 max_vmprot
|= VM_PROT_READ
|VM_PROT_EXECUTE
;
112 if (MACH_PORT_VALID (robj
))
113 __mach_port_deallocate (__mach_task_self (), robj
);
115 case PROT_READ
|PROT_WRITE
:
116 max_vmprot
= VM_PROT_ALL
;
120 /* Remove extra reference. */
121 __mach_port_deallocate (__mach_task_self (), memobj
);
123 else if (wobj
== MACH_PORT_NULL
/* Not writable by mapping. */
125 /* The file can only be mapped for reading. Since we are
126 making a private mapping, we will never try to write the
127 object anyway, so we don't care. */
131 __mach_port_deallocate (__mach_task_self (), wobj
);
132 return (void *) (long int) __hurd_fail (EACCES
);
136 __builtin_unreachable ();
139 /* XXX handle MAP_NOEXTEND */
143 /* XXX handle MAP_INHERIT */
146 max_vmprot
= VM_PROT_ALL
;
148 /* When ANYWHERE is true but the caller has provided a preferred address,
149 try mapping at that address with anywhere = 0 first. If this fails,
150 we'll retry with anywhere = 1 below. */
151 err
= __vm_map (__mach_task_self (),
152 &mapaddr
, (vm_size_t
) len
, mask
,
153 anywhere
&& (mapaddr
== 0),
154 memobj
, (vm_offset_t
) offset
,
155 copy
, vmprot
, max_vmprot
,
156 copy
? VM_INHERIT_COPY
: VM_INHERIT_SHARE
);
158 if (flags
& MAP_FIXED
)
160 if (err
== KERN_NO_SPACE
)
162 if (flags
& MAP_EXCL
)
166 /* The region is already allocated; deallocate it first. */
167 /* XXX this is not atomic as it is in unix! */
168 err
= __vm_deallocate (__mach_task_self (), mapaddr
, len
);
170 err
= __vm_map (__mach_task_self (),
171 &mapaddr
, (vm_size_t
) len
, mask
,
172 0, memobj
, (vm_offset_t
) offset
,
173 copy
, vmprot
, max_vmprot
,
174 copy
? VM_INHERIT_COPY
: VM_INHERIT_SHARE
);
180 /* This mmap call is allowed to allocate anywhere, */
181 if (mapaddr
!= 0 && (err
== KERN_NO_SPACE
|| err
== KERN_INVALID_ADDRESS
))
182 /* ...but above, we tried allocating at the specific address,
183 and failed to. Now try again, with anywhere = 1 this time. */
184 err
= __vm_map (__mach_task_self (),
185 &mapaddr
, (vm_size_t
) len
, mask
,
186 1, memobj
, (vm_offset_t
) offset
,
187 copy
, vmprot
, max_vmprot
,
188 copy
? VM_INHERIT_COPY
: VM_INHERIT_SHARE
);
191 if (MACH_PORT_VALID (memobj
))
192 __mach_port_deallocate (__mach_task_self (), memobj
);
194 if (err
== KERN_PROTECTION_FAILURE
)
198 return (void *) (long int) __hurd_fail (err
);
200 return (void *) mapaddr
;
203 libc_hidden_def (__mmap
)
204 weak_alias (__mmap
, mmap
)
This page took 0.051475 seconds and 6 git commands to generate.