]> sourceware.org Git - newlib-cygwin.git/blob - winsup/cygwin/mmap.cc
* mmap.cc (mmap64): Change address types from caddr_t to void *
[newlib-cygwin.git] / winsup / cygwin / mmap.cc
1 /* mmap.cc
2
3 Copyright 1996, 1997, 1998, 2000, 2001, 2002, 2003 Red Hat, Inc.
4
5 This file is part of Cygwin.
6
7 This software is a copyrighted work licensed under the terms of the
8 Cygwin license. Please consult the file "CYGWIN_LICENSE" for
9 details. */
10
11 #include "winsup.h"
12 #include <unistd.h>
13 #include <stdlib.h>
14 #include <stddef.h>
15 #include <sys/mman.h>
16 #include "security.h"
17 #include "fhandler.h"
18 #include "path.h"
19 #include "dtable.h"
20 #include "cygerrno.h"
21 #include "cygheap.h"
22 #include "pinfo.h"
23 #include "sys/cygwin.h"
24
25 #define PAGE_CNT(bytes) howmany((bytes),getpagesize())
26
27 #define PGBITS (sizeof (DWORD)*8)
28 #define MAPSIZE(pages) howmany ((pages), PGBITS)
29
30 #define MAP_SET(n) (page_map_[(n)/PGBITS] |= (1L << ((n) % PGBITS)))
31 #define MAP_CLR(n) (page_map_[(n)/PGBITS] &= ~(1L << ((n) % PGBITS)))
32 #define MAP_ISSET(n) (page_map_[(n)/PGBITS] & (1L << ((n) % PGBITS)))
33
34 /* Used for accessing the page file (anonymous mmaps). */
35 static fhandler_disk_file fh_paging_file;
36
37 /* Class structure used to keep a record of all current mmap areas
38 in a process. Needed for bookkeeping all mmaps in a process and
39 for duplicating all mmaps after fork() since mmaps are not propagated
40 to child processes by Windows. All information must be duplicated
41 by hand, see fixup_mmaps_after_fork().
42
43 The class structure:
44
45 One member of class map per process, global variable mmapped_areas.
46 Contains a dynamic class list array. Each list entry represents all
47 mapping to a file, keyed by file descriptor and file name hash.
48 Each list entry contains a dynamic class mmap_record array. Each
49 mmap_record represents exactly one mapping. For each mapping, there's
50 an additional so called `page_map'. It's an array of bits, one bit
51 per mapped memory page. The bit is set if the page is accessible,
52 unset otherwise. */
53
54 class mmap_record
55 {
56 private:
57 int fdesc_;
58 HANDLE mapping_handle_;
59 int devtype_;
60 DWORD access_mode_;
61 _off64_t offset_;
62 DWORD size_to_map_;
63 caddr_t base_address_;
64 DWORD *page_map_;
65
66 public:
67 mmap_record (int fd, HANDLE h, DWORD ac, _off64_t o, DWORD s, caddr_t b) :
68 fdesc_ (fd),
69 mapping_handle_ (h),
70 devtype_ (0),
71 access_mode_ (ac),
72 offset_ (o),
73 size_to_map_ (s),
74 base_address_ (b),
75 page_map_ (NULL)
76 {
77 if (fd >= 0 && !cygheap->fdtab.not_open (fd))
78 devtype_ = cygheap->fdtab[fd]->get_device ();
79 }
80
81 int get_fd () const { return fdesc_; }
82 HANDLE get_handle () const { return mapping_handle_; }
83 DWORD get_device () const { return devtype_; }
84 DWORD get_access () const { return access_mode_; }
85 DWORD get_offset () const { return offset_; }
86 DWORD get_size () const { return size_to_map_; }
87 caddr_t get_address () const { return base_address_; }
88
89 bool alloc_page_map (_off64_t off, DWORD len);
90 void free_page_map () { if (page_map_) cfree (page_map_); }
91 void fixup_page_map (void);
92
93 DWORD find_unused_pages (DWORD pages);
94 _off64_t map_pages (_off64_t off, DWORD len);
95 BOOL unmap_pages (caddr_t addr, DWORD len);
96 int access (caddr_t address);
97
98 fhandler_base *alloc_fh ();
99 void free_fh (fhandler_base *fh);
100 };
101
102 class list
103 {
104 private:
105 mmap_record *recs;
106 int nrecs, maxrecs;
107 int fd;
108 DWORD hash;
109
110 public:
111 int get_fd () const { return fd; }
112 DWORD get_hash () const { return hash; }
113 mmap_record *get_record (int i) { return i >= nrecs ? NULL : recs + i; }
114
115 void set (int nfd);
116 mmap_record *add_record (mmap_record r, _off64_t off, DWORD len);
117 bool del_record (int i);
118 void free_recs () { if (recs) cfree (recs); }
119 mmap_record *search_record (_off64_t off, DWORD len);
120 long search_record (caddr_t addr, DWORD len, caddr_t &m_addr, DWORD &m_len,
121 long start);
122 };
123
124 class map
125 {
126 private:
127 list *lists;
128 int nlists, maxlists;
129
130 public:
131 list *get_list (int i) { return i >= nlists ? NULL : lists + i; }
132 list *get_list_by_fd (int fd);
133 list *add_list (int fd);
134 void del_list (int i);
135 };
136
137 /* This is the global map structure pointer. It's allocated once on the
138 first call to mmap64(). */
139 static map *mmapped_areas;
140
141 DWORD
142 mmap_record::find_unused_pages (DWORD pages)
143 {
144 DWORD mapped_pages = PAGE_CNT (size_to_map_);
145 DWORD start;
146
147 if (pages > mapped_pages)
148 return (DWORD)-1;
149 for (start = 0; start <= mapped_pages - pages; ++start)
150 if (!MAP_ISSET (start))
151 {
152 DWORD cnt;
153 for (cnt = 0; cnt < pages; ++cnt)
154 if (MAP_ISSET (start + cnt))
155 break;
156 if (cnt >= pages)
157 return start;
158 }
159 return (DWORD)-1;
160 }
161
162 bool
163 mmap_record::alloc_page_map (_off64_t off, DWORD len)
164 {
165 /* Allocate one bit per page */
166 if (!(page_map_ = (DWORD *) ccalloc (HEAP_MMAP,
167 MAPSIZE (PAGE_CNT (size_to_map_)),
168 sizeof (DWORD))))
169 return false;
170
171 if (wincap.virtual_protect_works_on_shared_pages ())
172 {
173 DWORD old_prot;
174
175 off -= offset_;
176 len = PAGE_CNT (len) * getpagesize ();
177 if (off > 0 &&
178 !VirtualProtect (base_address_, off, PAGE_NOACCESS, &old_prot))
179 syscall_printf ("VirtualProtect(%x,%D) failed: %E", base_address_, off);
180 if (off + len < size_to_map_
181 && !VirtualProtect (base_address_ + off + len,
182 size_to_map_ - len - off,
183 PAGE_NOACCESS, &old_prot))
184 syscall_printf ("VirtualProtect(%x,%D) failed: %E",
185 base_address_ + off + len, size_to_map_ - len - off);
186 off /= getpagesize ();
187 len /= getpagesize ();
188 while (len-- > 0)
189 MAP_SET (off + len);
190 }
191 return true;
192 }
193
194 _off64_t
195 mmap_record::map_pages (_off64_t off, DWORD len)
196 {
197 /* Used ONLY if this mapping matches into the chunk of another already
198 performed mapping in a special case of MAP_ANON|MAP_PRIVATE.
199
200 Otherwise it's job is now done by alloc_page_map(). */
201 DWORD prot, old_prot;
202 switch (access_mode_)
203 {
204 case FILE_MAP_WRITE:
205 prot = PAGE_READWRITE;
206 break;
207 case FILE_MAP_READ:
208 prot = PAGE_READONLY;
209 break;
210 default:
211 prot = PAGE_WRITECOPY;
212 break;
213 }
214
215 debug_printf ("map_pages (fd=%d, off=%D, len=%u)", fdesc_, off, len);
216 len = PAGE_CNT (len);
217
218 if ((off = find_unused_pages (len)) == (DWORD)-1)
219 return 0L;
220 if (wincap.virtual_protect_works_on_shared_pages ()
221 && !VirtualProtect (base_address_ + off * getpagesize (),
222 len * getpagesize (), prot, &old_prot))
223 {
224 __seterrno ();
225 return (_off64_t)-1;
226 }
227
228 while (len-- > 0)
229 MAP_SET (off + len);
230 return off * getpagesize ();
231 }
232
233 BOOL
234 mmap_record::unmap_pages (caddr_t addr, DWORD len)
235 {
236 DWORD old_prot;
237 DWORD off = addr - base_address_;
238 off /= getpagesize ();
239 len = PAGE_CNT (len);
240 if (wincap.virtual_protect_works_on_shared_pages ()
241 && !VirtualProtect (base_address_ + off * getpagesize (),
242 len * getpagesize (), PAGE_NOACCESS, &old_prot))
243 syscall_printf ("-1 = unmap_pages (): %E");
244
245 for (; len-- > 0; ++off)
246 MAP_CLR (off);
247 /* Return TRUE if all pages are free'd which may result in unmapping
248 the whole chunk. */
249 for (len = MAPSIZE (PAGE_CNT (size_to_map_)); len > 0; )
250 if (page_map_[--len])
251 return FALSE;
252 return TRUE;
253 }
254
255 void
256 mmap_record::fixup_page_map ()
257 {
258 if (!wincap.virtual_protect_works_on_shared_pages ())
259 return;
260
261 DWORD prot, old_prot;
262 switch (access_mode_)
263 {
264 case FILE_MAP_WRITE:
265 prot = PAGE_READWRITE;
266 break;
267 case FILE_MAP_READ:
268 prot = PAGE_READONLY;
269 break;
270 default:
271 prot = PAGE_WRITECOPY;
272 break;
273 }
274
275 for (DWORD off = PAGE_CNT (size_to_map_); off > 0; --off)
276 VirtualProtect (base_address_ + off * getpagesize (), getpagesize (),
277 MAP_ISSET (off - 1) ? prot : PAGE_NOACCESS, &old_prot);
278 }
279
280 int
281 mmap_record::access (caddr_t address)
282 {
283 if (address < base_address_ || address >= base_address_ + size_to_map_)
284 return 0;
285 DWORD off = (address - base_address_) / getpagesize ();
286 return MAP_ISSET (off);
287 }
288
289 fhandler_base *
290 mmap_record::alloc_fh ()
291 {
292 if (get_fd () == -1)
293 {
294 fh_paging_file.set_io_handle (INVALID_HANDLE_VALUE);
295 return &fh_paging_file;
296 }
297
298 /* The file descriptor could have been closed or, even
299 worse, could have been reused for another file before
300 the call to fork(). This requires creating a fhandler
301 of the correct type to be sure to call the method of the
302 correct class. */
303 return cygheap->fdtab.build_fhandler (-1, get_device ());
304 }
305
306 void
307 mmap_record::free_fh (fhandler_base *fh)
308 {
309 if (get_fd () != -1)
310 cfree (fh);
311 }
312
313 mmap_record *
314 list::add_record (mmap_record r, _off64_t off, DWORD len)
315 {
316 if (nrecs == maxrecs)
317 {
318 mmap_record *new_recs;
319 if (maxrecs == 0)
320 new_recs = (mmap_record *)
321 cmalloc (HEAP_MMAP, 5 * sizeof (mmap_record));
322 else
323 new_recs = (mmap_record *)
324 crealloc (recs, (maxrecs + 5) * sizeof (mmap_record));
325 if (!new_recs)
326 return NULL;
327 maxrecs += 5;
328 recs = new_recs;
329 }
330 recs[nrecs] = r;
331 if (!recs[nrecs].alloc_page_map (off, len))
332 return NULL;
333 return recs + nrecs++;
334 }
335
336 /* Used in mmap() */
337 mmap_record *
338 list::search_record (_off64_t off, DWORD len)
339 {
340 if (fd == -1 && !off)
341 {
342 len = PAGE_CNT (len);
343 for (int i = 0; i < nrecs; ++i)
344 if (recs[i].find_unused_pages (len) != (DWORD)-1)
345 return recs + i;
346 }
347 else
348 {
349 for (int i = 0; i < nrecs; ++i)
350 if (off >= recs[i].get_offset ()
351 && off + len <= recs[i].get_offset ()
352 + (PAGE_CNT (recs[i].get_size ()) * getpagesize ()))
353 return recs + i;
354 }
355 return NULL;
356 }
357
358 /* Used in munmap() */
359 long
360 list::search_record (caddr_t addr, DWORD len, caddr_t &m_addr, DWORD &m_len,
361 _off_t start)
362 {
363 caddr_t low, high;
364
365 for (int i = start + 1; i < nrecs; ++i)
366 {
367 low = (addr >= recs[i].get_address ()) ? addr : recs[i].get_address ();
368 high = recs[i].get_address ()
369 + (PAGE_CNT (recs[i].get_size ()) * getpagesize ());
370 high = (addr + len < high) ? addr + len : high;
371 if (low < high)
372 {
373 m_addr = low;
374 m_len = high - low;
375 return i;
376 }
377 }
378 return -1;
379 }
380
381 void
382 list::set (int nfd)
383 {
384 if ((fd = nfd) != -1)
385 hash = cygheap->fdtab[fd]->get_namehash ();
386 nrecs = maxrecs = 0;
387 recs = NULL;
388 }
389
390 bool
391 list::del_record (int i)
392 {
393 if (i < nrecs)
394 {
395 recs[i].free_page_map ();
396 for (; i < nrecs - 1; i++)
397 recs[i] = recs[i + 1];
398 nrecs--;
399 }
400 /* Return true if the list is empty which allows the caller to remove
401 this list from the list array. */
402 return !nrecs;
403 }
404
405 list *
406 map::get_list_by_fd (int fd)
407 {
408 int i;
409 for (i=0; i<nlists; i++)
410 /* The fd isn't sufficient since it could already be the fd of another
411 file. So we use the name hash value to identify the file unless
412 it's an anonymous mapping in which case the fd (-1) is sufficient. */
413 if ((fd == -1 && lists[i].get_fd () == -1)
414 || (fd != -1
415 && lists[i].get_hash () == cygheap->fdtab[fd]->get_namehash ()))
416 return lists + i;
417 return 0;
418 }
419
420 list *
421 map::add_list (int fd)
422 {
423 if (nlists == maxlists)
424 {
425 list *new_lists;
426 if (maxlists == 0)
427 new_lists = (list *) cmalloc (HEAP_MMAP, 5 * sizeof (list));
428 else
429 new_lists = (list *) crealloc (lists, (maxlists + 5) * sizeof (list));
430 if (!new_lists)
431 return NULL;
432 maxlists += 5;
433 lists = new_lists;
434 }
435 lists[nlists].set (fd);
436 return lists + nlists++;
437 }
438
439 void
440 map::del_list (int i)
441 {
442 if (i < nlists)
443 {
444 lists[i].free_recs ();
445 for (; i < nlists - 1; i++)
446 lists[i] = lists[i + 1];
447 nlists--;
448 }
449 }
450
451 extern "C" void *
452 mmap64 (void *addr, size_t len, int prot, int flags, int fd, _off64_t off)
453 {
454 syscall_printf ("addr %x, len %u, prot %x, flags %x, fd %d, off %D",
455 addr, len, prot, flags, fd, off);
456
457 static DWORD granularity;
458 if (!granularity)
459 {
460 SYSTEM_INFO si;
461 GetSystemInfo (&si);
462 granularity = si.dwAllocationGranularity;
463 }
464
465 /* Error conditions according to SUSv2 */
466 if (off % getpagesize ()
467 || (!(flags & MAP_SHARED) && !(flags & MAP_PRIVATE))
468 || ((flags & MAP_SHARED) && (flags & MAP_PRIVATE))
469 || ((flags & MAP_FIXED) && ((DWORD)addr % getpagesize ()))
470 || !len)
471 {
472 set_errno (EINVAL);
473 syscall_printf ("-1 = mmap(): EINVAL");
474 return MAP_FAILED;
475 }
476
477 SetResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
478
479 if (mmapped_areas == NULL)
480 {
481 /* First mmap call, create STL map */
482 mmapped_areas = (map *) ccalloc (HEAP_MMAP, 1, sizeof (map));
483 if (mmapped_areas == NULL)
484 {
485 set_errno (ENOMEM);
486 syscall_printf ("-1 = mmap(): ENOMEM");
487 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
488 return MAP_FAILED;
489 }
490 }
491
492 if (flags & MAP_ANONYMOUS)
493 fd = -1;
494
495 /* Map always in multipliers of `granularity'-sized chunks. */
496 _off64_t gran_off = off & ~(granularity - 1);
497 DWORD gran_len = howmany (off + len, granularity) * granularity - gran_off;
498
499 fhandler_base *fh;
500
501 if (fd != -1)
502 {
503 /* Ensure that fd is open */
504 cygheap_fdget cfd (fd);
505 if (cfd < 0)
506 {
507 syscall_printf ("-1 = mmap(): EBADF");
508 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
509 return MAP_FAILED;
510 }
511 fh = cfd;
512 if (fh->get_device () == FH_DISK)
513 {
514 DWORD high;
515 DWORD low = GetFileSize (fh->get_handle (), &high);
516 _off64_t fsiz = ((_off64_t)high << 32) + low;
517 /* Don't allow mappings beginning beyond EOF since Windows can't
518 handle that POSIX like. FIXME: Still looking for a good idea
519 to allow that nevertheless. */
520 if (gran_off >= fsiz)
521 {
522 set_errno (ENXIO);
523 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK,
524 "mmap");
525 return MAP_FAILED;
526 }
527 fsiz -= gran_off;
528 if (gran_len > fsiz)
529 gran_len = fsiz;
530 }
531 else if (fh->get_device () == FH_ZERO)
532 /* mmap /dev/zero is like MAP_ANONYMOUS. */
533 fd = -1;
534 }
535 if (fd == -1)
536 {
537 fh_paging_file.set_io_handle (INVALID_HANDLE_VALUE);
538 fh = &fh_paging_file;
539 }
540
541 list *map_list = mmapped_areas->get_list_by_fd (fd);
542
543 /* First check if this mapping matches into the chunk of another
544 already performed mapping. Only valid for MAP_ANON in a special
545 case of MAP_PRIVATE. */
546 if (map_list && fd == -1 && off == 0 && !(flags & MAP_FIXED))
547 {
548 mmap_record *rec;
549 if ((rec = map_list->search_record (off, len)) != NULL)
550 {
551 if ((off = rec->map_pages (off, len)) == (_off64_t)-1)
552 {
553 syscall_printf ("-1 = mmap()");
554 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK|WRITE_LOCK, "mmap");
555 return MAP_FAILED;
556 }
557 caddr_t ret = rec->get_address () + off;
558 syscall_printf ("%x = mmap() succeeded", ret);
559 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
560 return ret;
561 }
562 }
563
564 DWORD access = (prot & PROT_WRITE) ? FILE_MAP_WRITE : FILE_MAP_READ;
565 /* copy-on-write doesn't work at all on 9x using anonymous maps.
566 Workaround: Anonymous mappings always use normal READ or WRITE
567 access and don't use named file mapping.
568 copy-on-write doesn't also work properly on 9x with real files.
569 While the changes are not propagated to the file, they are
570 visible to other processes sharing the same file mapping object.
571 Workaround: Don't use named file mapping. That should work since
572 sharing file mappings only works reliable using named
573 file mapping on 9x.
574 */
575 if ((flags & MAP_PRIVATE)
576 && (wincap.has_working_copy_on_write () || fd != -1))
577 access = FILE_MAP_COPY;
578
579 caddr_t base = (caddr_t)addr;
580 /* This shifts the base address to the next lower 64K boundary.
581 The offset is re-added when evaluating the return value. */
582 if (base)
583 base -= off - gran_off;
584
585 HANDLE h = fh->mmap (&base, gran_len, access, flags, gran_off);
586
587 if (h == INVALID_HANDLE_VALUE)
588 {
589 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
590 return MAP_FAILED;
591 }
592
593 /* At this point we should have a successfully mmapped area.
594 Now it's time for bookkeeping stuff. */
595 if (fd == -1)
596 gran_len = PAGE_CNT (gran_len) * getpagesize ();
597 mmap_record mmap_rec (fd, h, access, gran_off, gran_len, base);
598
599 /* Get list of mmapped areas for this fd, create a new one if
600 one does not exist yet.
601 */
602 if (!map_list)
603 {
604 /* Create a new one */
605 map_list = mmapped_areas->add_list (fd);
606 if (!map_list)
607 {
608 fh->munmap (h, base, gran_len);
609 set_errno (ENOMEM);
610 syscall_printf ("-1 = mmap(): ENOMEM");
611 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
612 return MAP_FAILED;
613 }
614 }
615
616 /* Insert into the list */
617 mmap_record *rec = map_list->add_record (mmap_rec, off,
618 len > gran_len ? gran_len : len);
619 if (!rec)
620 {
621 fh->munmap (h, base, gran_len);
622 set_errno (ENOMEM);
623 syscall_printf ("-1 = mmap(): ENOMEM");
624 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
625 return MAP_FAILED;
626 }
627
628 caddr_t ret = rec->get_address () + (off - gran_off);
629 syscall_printf ("%x = mmap() succeeded", ret);
630 ReleaseResourceLock (LOCK_MMAP_LIST, READ_LOCK | WRITE_LOCK, "mmap");
631 return ret;
632 }
633
634 extern "C" void *
635 mmap (void *addr, size_t len, int prot, int flags, int fd, _off_t off)
636 {
637 return mmap64 (addr, len, prot, flags, fd, (_off64_t)off);
638 }
639
640 /* munmap () removes all mmapped pages between addr and addr+len. */
641
642 extern "C" int
643 munmap (void *addr, size_t len)
644 {
645 syscall_printf ("munmap (addr %x, len %u)", addr, len);
646
647 /* Error conditions according to SUSv3 */
648 if (!addr || ((DWORD)addr % getpagesize ()) || !len
649 || IsBadReadPtr (addr, len))
650 {
651 set_errno (EINVAL);
652 syscall_printf ("-1 = munmap(): Invalid parameters");
653 return -1;
654 }
655
656 SetResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK, "munmap");
657 if (mmapped_areas == NULL)
658 {
659 syscall_printf ("-1 = munmap(): mmapped_areas == NULL");
660 ReleaseResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK, "munmap");
661 return 0;
662 }
663
664 /* Iterate through the map, unmap pages between addr and addr+len
665 in all maps. */
666 list *map_list;
667 for (int list_idx = 0;
668 (map_list = mmapped_areas->get_list (list_idx));
669 ++list_idx)
670 {
671 long record_idx = -1;
672 caddr_t u_addr;
673 DWORD u_len;
674
675 while ((record_idx = map_list->search_record((caddr_t)addr, len, u_addr,
676 u_len, record_idx)) >= 0)
677 {
678 mmap_record *rec = map_list->get_record (record_idx);
679 if (rec->unmap_pages (u_addr, u_len))
680 {
681 /* The whole record has been unmapped, so... */
682 fhandler_base *fh = rec->alloc_fh ();
683 fh->munmap (rec->get_handle (), (caddr_t)addr, len);
684 rec->free_fh (fh);
685
686 /* ...delete the record. */
687 if (map_list->del_record (record_idx--))
688 {
689 /* Yay, the last record has been removed from the list,
690 we can remove the list now, too. */
691 mmapped_areas->del_list (list_idx--);
692 break;
693 }
694 }
695 }
696 }
697
698 ReleaseResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK, "munmap");
699 syscall_printf ("0 = munmap(): %x", addr);
700 return 0;
701 }
702
703 /* Sync file with memory. Ignore flags for now. */
704
705 extern "C" int
706 msync (void *addr, size_t len, int flags)
707 {
708 syscall_printf ("addr = %x, len = %u, flags = %x",
709 addr, len, flags);
710
711 /* However, check flags for validity. */
712 if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE))
713 || ((flags & MS_ASYNC) && (flags & MS_SYNC)))
714 {
715 syscall_printf ("-1 = msync(): Invalid flags");
716 set_errno (EINVAL);
717 return -1;
718 }
719
720 SetResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK, "msync");
721 /* Check if a mmap'ed area was ever created */
722 if (mmapped_areas == NULL)
723 {
724 syscall_printf ("-1 = msync(): mmapped_areas == NULL");
725 set_errno (EINVAL);
726 ReleaseResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK, "msync");
727 return -1;
728 }
729
730 /* Iterate through the map, looking for the mmapped area.
731 Error if not found. */
732
733 list *map_list;
734 for (int list_idx = 0;
735 (map_list = mmapped_areas->get_list (list_idx));
736 ++list_idx)
737 {
738 mmap_record *rec;
739 for (int record_idx = 0;
740 (rec = map_list->get_record (record_idx));
741 ++record_idx)
742 {
743 if (rec->access ((caddr_t)addr))
744 {
745 /* Check whole area given by len. */
746 for (DWORD i = getpagesize (); i < len; ++i)
747 if (!rec->access ((caddr_t)addr + i))
748 goto invalid_address_range;
749 fhandler_base *fh = rec->alloc_fh ();
750 int ret = fh->msync (rec->get_handle (), (caddr_t)addr, len,
751 flags);
752 rec->free_fh (fh);
753
754 if (ret)
755 syscall_printf ("%d = msync(): %E", ret);
756 else
757 syscall_printf ("0 = msync()");
758
759 ReleaseResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK,
760 "msync");
761 return 0;
762 }
763 }
764 }
765
766 invalid_address_range:
767 /* SUSv2: Return code if indicated memory was not mapped is ENOMEM. */
768 set_errno (ENOMEM);
769 syscall_printf ("-1 = msync(): ENOMEM");
770
771 ReleaseResourceLock (LOCK_MMAP_LIST, WRITE_LOCK | READ_LOCK, "msync");
772 return -1;
773 }
774
775 /* Set memory protection */
776
777 extern "C" int
778 mprotect (void *addr, size_t len, int prot)
779 {
780 DWORD old_prot;
781 DWORD new_prot = 0;
782
783 syscall_printf ("mprotect (addr %x, len %u, prot %x)", addr, len, prot);
784
785 if (!wincap.virtual_protect_works_on_shared_pages ()
786 && addr >= (caddr_t)0x80000000 && addr <= (caddr_t)0xBFFFFFFF)
787 {
788 syscall_printf ("0 = mprotect (9x: No VirtualProtect on shared memory)");
789 return 0;
790 }
791
792 switch (prot)
793 {
794 case PROT_READ | PROT_WRITE | PROT_EXEC:
795 case PROT_WRITE | PROT_EXEC:
796 new_prot = PAGE_EXECUTE_READWRITE;
797 break;
798 case PROT_READ | PROT_WRITE:
799 case PROT_WRITE:
800 new_prot = PAGE_READWRITE;
801 break;
802 case PROT_READ | PROT_EXEC:
803 new_prot = PAGE_EXECUTE_READ;
804 break;
805 case PROT_READ:
806 new_prot = PAGE_READONLY;
807 break;
808 case PROT_EXEC:
809 new_prot = PAGE_EXECUTE;
810 break;
811 case PROT_NONE:
812 new_prot = PAGE_NOACCESS;
813 break;
814 default:
815 syscall_printf ("-1 = mprotect (): invalid prot value");
816 set_errno (EINVAL);
817 return -1;
818 }
819
820 if (VirtualProtect (addr, len, new_prot, &old_prot) == 0)
821 {
822 __seterrno ();
823 syscall_printf ("-1 = mprotect (): %E");
824 return -1;
825 }
826
827 syscall_printf ("0 = mprotect ()");
828 return 0;
829 }
830
831 /*
832 * Base implementation:
833 *
834 * `mmap' returns ENODEV as documented in SUSv2.
835 * In contrast to the global function implementation, the member function
836 * `mmap' has to return the mapped base address in `addr' and the handle to
837 * the mapping object as return value. In case of failure, the fhandler
838 * mmap has to close that handle by itself and return INVALID_HANDLE_VALUE.
839 *
840 * `munmap' and `msync' get the handle to the mapping object as first parameter
841 * additionally.
842 */
843 HANDLE
844 fhandler_base::mmap (caddr_t *addr, size_t len, DWORD access,
845 int flags, _off64_t off)
846 {
847 set_errno (ENODEV);
848 return INVALID_HANDLE_VALUE;
849 }
850
851 int
852 fhandler_base::munmap (HANDLE h, caddr_t addr, size_t len)
853 {
854 set_errno (ENODEV);
855 return -1;
856 }
857
858 int
859 fhandler_base::msync (HANDLE h, caddr_t addr, size_t len, int flags)
860 {
861 set_errno (ENODEV);
862 return -1;
863 }
864
865 BOOL
866 fhandler_base::fixup_mmap_after_fork (HANDLE h, DWORD access, DWORD offset,
867 DWORD size, void *address)
868 {
869 set_errno (ENODEV);
870 return -1;
871 }
872
873 /* Implementation for disk files. */
874 HANDLE
875 fhandler_disk_file::mmap (caddr_t *addr, size_t len, DWORD access,
876 int flags, _off64_t off)
877 {
878 DWORD protect;
879
880 switch (access)
881 {
882 case FILE_MAP_WRITE:
883 protect = PAGE_READWRITE;
884 break;
885 case FILE_MAP_READ:
886 protect = PAGE_READONLY;
887 break;
888 default:
889 protect = PAGE_WRITECOPY;
890 break;
891 }
892
893 HANDLE h;
894
895 /* On 9x/ME try first to open the mapping by name when opening a
896 shared file object. This is needed since 9x/ME only shares
897 objects between processes by name. What a mess... */
898 if (wincap.share_mmaps_only_by_name ()
899 && get_handle () != INVALID_HANDLE_VALUE
900 && !(access & FILE_MAP_COPY))
901 {
902 /* Grrr, the whole stuff is just needed to try to get a reliable
903 mapping of the same file. Even that uprising isn't bullet
904 proof but it does it's best... */
905 char namebuf[MAX_PATH];
906 cygwin_conv_to_full_posix_path (get_name (), namebuf);
907 for (int i = strlen (namebuf) - 1; i >= 0; --i)
908 namebuf[i] = cyg_tolower (namebuf [i]);
909
910 debug_printf ("named sharing");
911 if (!(h = OpenFileMapping (access, TRUE, namebuf)))
912 h = CreateFileMapping (get_handle (), &sec_none, protect, 0, 0, namebuf);
913 }
914 else
915 h = CreateFileMapping (get_handle (), &sec_none, protect, 0,
916 get_handle () == INVALID_HANDLE_VALUE ? len : 0,
917 NULL);
918 if (!h)
919 {
920 __seterrno ();
921 syscall_printf ("-1 = mmap(): CreateFileMapping failed with %E");
922 return INVALID_HANDLE_VALUE;
923 }
924
925 DWORD high = off >> 32, low = off & 0xffffffff;
926 void *base = NULL;
927 /* If a non-zero address is given, try mapping using the given address first.
928 If it fails and flags is not MAP_FIXED, try again with NULL address. */
929 if (*addr)
930 base = MapViewOfFileEx (h, access, high, low, len, *addr);
931 if (!base && !(flags & MAP_FIXED))
932 base = MapViewOfFileEx (h, access, high, low, len, NULL);
933 debug_printf ("%x = MapViewOfFileEx (h:%x, access:%x, 0, off:%D, "
934 "len:%u, addr:%x)", base, h, access, off, len, *addr);
935 if (!base || ((flags & MAP_FIXED) && base != *addr))
936 {
937 if (!base)
938 {
939 __seterrno ();
940 syscall_printf ("-1 = mmap(): MapViewOfFileEx failed with %E");
941 }
942 else
943 {
944 set_errno (EINVAL);
945 syscall_printf ("-1 = mmap(): address shift with MAP_FIXED given");
946 }
947 CloseHandle (h);
948 return INVALID_HANDLE_VALUE;
949 }
950
951 *addr = (caddr_t) base;
952 return h;
953 }
954
955 int
956 fhandler_disk_file::munmap (HANDLE h, caddr_t addr, size_t len)
957 {
958 UnmapViewOfFile (addr);
959 CloseHandle (h);
960 return 0;
961 }
962
963 int
964 fhandler_disk_file::msync (HANDLE h, caddr_t addr, size_t len, int flags)
965 {
966 if (FlushViewOfFile (addr, len) == 0)
967 {
968 __seterrno ();
969 return -1;
970 }
971 return 0;
972 }
973
974 BOOL
975 fhandler_disk_file::fixup_mmap_after_fork (HANDLE h, DWORD access, DWORD offset,
976 DWORD size, void *address)
977 {
978 /* Re-create the MapViewOfFileEx call */
979 void *base = MapViewOfFileEx (h, access, 0, offset, size, address);
980 if (base != address)
981 {
982 MEMORY_BASIC_INFORMATION m;
983 (void) VirtualQuery (address, &m, sizeof (m));
984 system_printf ("requested %p != %p mem alloc base %p, state %p, size %d, %E",
985 address, base, m.AllocationBase, m.State, m.RegionSize);
986 }
987 return base == address;
988 }
989
990 /*
991 * Call to re-create all the file mappings in a forked
992 * child. Called from the child in initialization. At this
993 * point we are passed a valid mmapped_areas map, and all the
994 * HANDLE's are valid for the child, but none of the
995 * mapped areas are in our address space. We need to iterate
996 * through the map, doing the MapViewOfFile calls.
997 */
998
999 int __stdcall
1000 fixup_mmaps_after_fork (HANDLE parent)
1001 {
1002
1003 debug_printf ("recreate_mmaps_after_fork, mmapped_areas %p", mmapped_areas);
1004
1005 /* Check if a mmapped area was ever created */
1006 if (mmapped_areas == NULL)
1007 return 0;
1008
1009 /* Iterate through the map */
1010 list *map_list;
1011 for (int list_idx = 0;
1012 (map_list = mmapped_areas->get_list (list_idx));
1013 ++list_idx)
1014 {
1015 mmap_record *rec;
1016 for (int record_idx = 0;
1017 (rec = map_list->get_record (record_idx));
1018 ++record_idx)
1019 {
1020
1021 debug_printf ("fd %d, h %x, access %x, offset %D, size %u, address %p",
1022 rec->get_fd (), rec->get_handle (), rec->get_access (),
1023 rec->get_offset (), rec->get_size (), rec->get_address ());
1024
1025 fhandler_base *fh = rec->alloc_fh ();
1026 BOOL ret = fh->fixup_mmap_after_fork (rec->get_handle (),
1027 rec->get_access (),
1028 rec->get_offset (),
1029 rec->get_size (),
1030 rec->get_address ());
1031 rec->free_fh (fh);
1032
1033 if (!ret)
1034 return -1;
1035 if (rec->get_access () == FILE_MAP_COPY)
1036 {
1037 for (char *address = rec->get_address ();
1038 address < rec->get_address () + rec->get_size ();
1039 address += getpagesize ())
1040 if (rec->access (address)
1041 && !ReadProcessMemory (parent, address, address,
1042 getpagesize (), NULL))
1043 {
1044 DWORD old_prot;
1045 DWORD last_error = GetLastError ();
1046
1047 if (last_error != ERROR_PARTIAL_COPY
1048 && last_error != ERROR_NOACCESS
1049 || !wincap.virtual_protect_works_on_shared_pages ())
1050 {
1051 system_printf ("ReadProcessMemory failed for "
1052 "MAP_PRIVATE address %p, %E",
1053 rec->get_address ());
1054 return -1;
1055 }
1056 if (!VirtualProtectEx (parent,
1057 address, getpagesize (),
1058 PAGE_READONLY, &old_prot))
1059 {
1060 system_printf ("VirtualProtectEx failed for "
1061 "MAP_PRIVATE address %p, %E",
1062 rec->get_address ());
1063 return -1;
1064 }
1065 else
1066 {
1067 BOOL ret;
1068 DWORD dummy_prot;
1069
1070 ret = ReadProcessMemory (parent, address, address,
1071 getpagesize (), NULL);
1072 if (!VirtualProtectEx(parent,
1073 address, getpagesize (),
1074 old_prot, &dummy_prot))
1075 system_printf ("WARNING: VirtualProtectEx to "
1076 "return to previous state "
1077 "in parent failed for "
1078 "MAP_PRIVATE address %p, %E",
1079 rec->get_address ());
1080 if (!VirtualProtect (address, getpagesize (),
1081 old_prot, &dummy_prot))
1082 system_printf ("WARNING: VirtualProtect to copy "
1083 "protection to child failed for"
1084 "MAP_PRIVATE address %p, %E",
1085 rec->get_address ());
1086 if (!ret)
1087 {
1088 system_printf ("ReadProcessMemory (2nd try) "
1089 "failed for "
1090 "MAP_PRIVATE address %p, %E",
1091 rec->get_address ());
1092 return -1;
1093 }
1094 }
1095 }
1096 }
1097 rec->fixup_page_map ();
1098 }
1099 }
1100
1101 debug_printf ("succeeded");
1102 return 0;
1103 }
This page took 0.084037 seconds and 5 git commands to generate.