]>
Commit | Line | Data |
---|---|---|
1 | /* Copyright (C) 1994-2024 Free Software Foundation, Inc. | |
2 | This file is part of the GNU C Library. | |
3 | ||
4 | The GNU C Library is free software; you can redistribute it and/or | |
5 | modify it under the terms of the GNU Lesser General Public | |
6 | License as published by the Free Software Foundation; either | |
7 | version 2.1 of the License, or (at your option) any later version. | |
8 | ||
9 | The GNU C Library is distributed in the hope that it will be useful, | |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | Lesser General Public License for more details. | |
13 | ||
14 | You should have received a copy of the GNU Lesser General Public | |
15 | License along with the GNU C Library; if not, see | |
16 | <https://www.gnu.org/licenses/>. */ | |
17 | ||
18 | #include <sys/types.h> | |
19 | #include <sys/mman.h> | |
20 | #include <errno.h> | |
21 | #include <ldsodefs.h> | |
22 | #include <hurd.h> | |
23 | #include <hurd/fd.h> | |
24 | ||
25 | /* Map addresses starting near ADDR and extending for LEN bytes. from | |
26 | OFFSET into the file FD describes according to PROT and FLAGS. If ADDR | |
27 | is nonzero, it is the desired mapping address. If the MAP_FIXED bit is | |
28 | set in FLAGS, the mapping will be at ADDR exactly (which must be | |
29 | page-aligned); otherwise the system chooses a convenient nearby address. | |
30 | The return value is the actual mapping address chosen or (void *) -1 | |
31 | for errors (in which case `errno' is set). A successful `mmap' call | |
32 | deallocates any previous mapping for the affected region. */ | |
33 | ||
34 | void * | |
35 | __mmap (void *addr, size_t len, int prot, int flags, int fd, off_t offset) | |
36 | { | |
37 | error_t err; | |
38 | vm_prot_t vmprot, max_vmprot; | |
39 | memory_object_t memobj; | |
40 | vm_address_t mapaddr, mask; | |
41 | boolean_t copy, anywhere; | |
42 | ||
43 | mapaddr = (vm_address_t) addr; | |
44 | ||
45 | /* ADDR and OFFSET must be page-aligned. */ | |
46 | if ((mapaddr & (__vm_page_size - 1)) || (offset & (__vm_page_size - 1))) | |
47 | return (void *) (long int) __hurd_fail (EINVAL); | |
48 | ||
49 | if ((flags & MAP_EXCL) && ! (flags & MAP_FIXED)) | |
50 | return (void *) (long int) __hurd_fail (EINVAL); | |
51 | ||
52 | vmprot = VM_PROT_NONE; | |
53 | if (prot & PROT_READ) | |
54 | vmprot |= VM_PROT_READ; | |
55 | if (prot & PROT_WRITE) | |
56 | vmprot |= VM_PROT_WRITE; | |
57 | if (prot & PROT_EXEC) | |
58 | vmprot |= VM_PROT_EXECUTE; | |
59 | ||
60 | copy = ! (flags & MAP_SHARED); | |
61 | anywhere = ! (flags & MAP_FIXED); | |
62 | ||
63 | #ifdef __x86_64__ | |
64 | if ((addr == NULL) && (prot & PROT_EXEC) | |
65 | && HAS_ARCH_FEATURE (Prefer_MAP_32BIT_EXEC)) | |
66 | flags |= MAP_32BIT; | |
67 | #endif | |
68 | mask = (flags & MAP_32BIT) ? ~(vm_address_t) 0x7FFFFFFF : 0; | |
69 | ||
70 | switch (flags & MAP_TYPE) | |
71 | { | |
72 | default: | |
73 | return (void *) (long int) __hurd_fail (EINVAL); | |
74 | ||
75 | case MAP_ANON: | |
76 | memobj = MACH_PORT_NULL; | |
77 | max_vmprot = VM_PROT_ALL; | |
78 | break; | |
79 | ||
80 | case MAP_FILE: | |
81 | case 0: /* Allow, e.g., just MAP_SHARED. */ | |
82 | { | |
83 | mach_port_t robj, wobj; | |
84 | if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj))) | |
85 | { | |
86 | if (err == MIG_BAD_ID || err == EOPNOTSUPP || err == ENOSYS) | |
87 | err = ENODEV; /* File descriptor doesn't support mmap. */ | |
88 | return (void *) (long int) __hurd_dfail (fd, err); | |
89 | } | |
90 | switch (prot & (PROT_READ|PROT_WRITE)) | |
91 | { | |
92 | /* Although it apparently doesn't make sense to map a file with | |
93 | protection set to PROT_NONE, it is actually sometimes done. | |
94 | In particular, that's how localedef reserves some space for | |
95 | the locale archive file, the rationale being that some | |
96 | implementations take into account whether the mapping is | |
97 | anonymous or not when selecting addresses. */ | |
98 | case PROT_NONE: | |
99 | case PROT_READ: | |
100 | max_vmprot = VM_PROT_READ|VM_PROT_EXECUTE; | |
101 | if (wobj == robj) | |
102 | max_vmprot |= VM_PROT_WRITE; | |
103 | memobj = robj; | |
104 | if (MACH_PORT_VALID (wobj)) | |
105 | __mach_port_deallocate (__mach_task_self (), wobj); | |
106 | break; | |
107 | case PROT_WRITE: | |
108 | max_vmprot = VM_PROT_WRITE; | |
109 | if (robj == wobj) | |
110 | max_vmprot |= VM_PROT_READ|VM_PROT_EXECUTE; | |
111 | memobj = wobj; | |
112 | if (MACH_PORT_VALID (robj)) | |
113 | __mach_port_deallocate (__mach_task_self (), robj); | |
114 | break; | |
115 | case PROT_READ|PROT_WRITE: | |
116 | max_vmprot = VM_PROT_ALL; | |
117 | if (robj == wobj) | |
118 | { | |
119 | memobj = wobj; | |
120 | /* Remove extra reference. */ | |
121 | __mach_port_deallocate (__mach_task_self (), memobj); | |
122 | } | |
123 | else if (wobj == MACH_PORT_NULL /* Not writable by mapping. */ | |
124 | && copy) | |
125 | /* The file can only be mapped for reading. Since we are | |
126 | making a private mapping, we will never try to write the | |
127 | object anyway, so we don't care. */ | |
128 | memobj = robj; | |
129 | else | |
130 | { | |
131 | __mach_port_deallocate (__mach_task_self (), wobj); | |
132 | return (void *) (long int) __hurd_fail (EACCES); | |
133 | } | |
134 | break; | |
135 | default: | |
136 | __builtin_unreachable (); | |
137 | } | |
138 | break; | |
139 | /* XXX handle MAP_NOEXTEND */ | |
140 | } | |
141 | } | |
142 | ||
143 | /* XXX handle MAP_INHERIT */ | |
144 | ||
145 | if (copy) | |
146 | max_vmprot = VM_PROT_ALL; | |
147 | ||
148 | /* When ANYWHERE is true but the caller has provided a preferred address, | |
149 | try mapping at that address with anywhere = 0 first. If this fails, | |
150 | we'll retry with anywhere = 1 below. */ | |
151 | err = __vm_map (__mach_task_self (), | |
152 | &mapaddr, (vm_size_t) len, mask, | |
153 | anywhere && (mapaddr == 0), | |
154 | memobj, (vm_offset_t) offset, | |
155 | copy, vmprot, max_vmprot, | |
156 | copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE); | |
157 | ||
158 | if (flags & MAP_FIXED) | |
159 | { | |
160 | if (err == KERN_NO_SPACE) | |
161 | { | |
162 | if (flags & MAP_EXCL) | |
163 | err = EEXIST; | |
164 | else | |
165 | { | |
166 | /* The region is already allocated; deallocate it first. */ | |
167 | /* XXX this is not atomic as it is in unix! */ | |
168 | err = __vm_deallocate (__mach_task_self (), mapaddr, len); | |
169 | if (! err) | |
170 | err = __vm_map (__mach_task_self (), | |
171 | &mapaddr, (vm_size_t) len, mask, | |
172 | 0, memobj, (vm_offset_t) offset, | |
173 | copy, vmprot, max_vmprot, | |
174 | copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE); | |
175 | } | |
176 | } | |
177 | } | |
178 | else | |
179 | { | |
180 | /* This mmap call is allowed to allocate anywhere, */ | |
181 | if (mapaddr != 0 && (err == KERN_NO_SPACE || err == KERN_INVALID_ADDRESS)) | |
182 | /* ...but above, we tried allocating at the specific address, | |
183 | and failed to. Now try again, with anywhere = 1 this time. */ | |
184 | err = __vm_map (__mach_task_self (), | |
185 | &mapaddr, (vm_size_t) len, mask, | |
186 | 1, memobj, (vm_offset_t) offset, | |
187 | copy, vmprot, max_vmprot, | |
188 | copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE); | |
189 | } | |
190 | ||
191 | if (MACH_PORT_VALID (memobj)) | |
192 | __mach_port_deallocate (__mach_task_self (), memobj); | |
193 | ||
194 | if (err == KERN_PROTECTION_FAILURE) | |
195 | err = EACCES; | |
196 | ||
197 | if (err) | |
198 | return (void *) (long int) __hurd_fail (err); | |
199 | ||
200 | return (void *) mapaddr; | |
201 | } | |
202 | ||
203 | libc_hidden_def (__mmap) | |
204 | weak_alias (__mmap, mmap) |