3 Copyright 1996, 1997, 1998, 2000, 2001, 2002, 2003 Red Hat, Inc.
5 This file is part of Cygwin.
7 This software is a copyrighted work licensed under the terms of the
8 Cygwin license. Please consult the file "CYGWIN_LICENSE" for
23 #include "sys/cygwin.h"
25 #define PAGE_CNT(bytes) howmany((bytes),getpagesize())
27 #define PGBITS (sizeof (DWORD)*8)
28 #define MAPSIZE(pages) howmany ((pages), PGBITS)
30 #define MAP_SET(n) (page_map_[(n)/PGBITS] |= (1L << ((n) % PGBITS)))
31 #define MAP_CLR(n) (page_map_[(n)/PGBITS] &= ~(1L << ((n) % PGBITS)))
32 #define MAP_ISSET(n) (page_map_[(n)/PGBITS] & (1L << ((n) % PGBITS)))
34 /* Used for accessing the page file (anonymous mmaps). */
35 static fhandler_disk_file fh_paging_file
;
37 /* Class structure used to keep a record of all current mmap areas
38 in a process. Needed for bookkeeping all mmaps in a process and
39 for duplicating all mmaps after fork() since mmaps are not propagated
40 to child processes by Windows. All information must be duplicated
41 by hand, see fixup_mmaps_after_fork().
45 One member of class map per process, global variable mmapped_areas.
46 Contains a dynamic class list array. Each list entry represents all
47 mapping to a file, keyed by file descriptor and file name hash.
48 Each list entry contains a dynamic class mmap_record array. Each
49 mmap_record represents exactly one mapping. For each mapping, there's
50 an additional so called `page_map'. It's an array of bits, one bit
51 per mapped memory page. The bit is set if the page is accessible,
58 HANDLE mapping_handle_
;
63 caddr_t base_address_
;
67 mmap_record (int fd
, HANDLE h
, DWORD ac
, _off64_t o
, DWORD s
, caddr_t b
) :
77 if (fd
>= 0 && !cygheap
->fdtab
.not_open (fd
))
78 devtype_
= cygheap
->fdtab
[fd
]->get_device ();
81 int get_fd () const { return fdesc_
; }
82 HANDLE
get_handle () const { return mapping_handle_
; }
83 DWORD
get_device () const { return devtype_
; }
84 DWORD
get_access () const { return access_mode_
; }
85 DWORD
get_offset () const { return offset_
; }
86 DWORD
get_size () const { return size_to_map_
; }
87 caddr_t
get_address () const { return base_address_
; }
89 bool alloc_page_map (_off64_t off
, DWORD len
);
90 void free_page_map () { if (page_map_
) cfree (page_map_
); }
91 void fixup_page_map (void);
93 DWORD
find_unused_pages (DWORD pages
);
94 _off64_t
map_pages (_off64_t off
, DWORD len
);
95 BOOL
unmap_pages (caddr_t addr
, DWORD len
);
96 int access (caddr_t address
);
98 fhandler_base
*alloc_fh ();
99 void free_fh (fhandler_base
*fh
);
111 int get_fd () const { return fd
; }
112 DWORD
get_hash () const { return hash
; }
113 mmap_record
*get_record (int i
) { return i
>= nrecs
? NULL
: recs
+ i
; }
116 mmap_record
*add_record (mmap_record r
, _off64_t off
, DWORD len
);
117 bool del_record (int i
);
118 void free_recs () { if (recs
) cfree (recs
); }
119 mmap_record
*search_record (_off64_t off
, DWORD len
);
120 long search_record (caddr_t addr
, DWORD len
, caddr_t
&m_addr
, DWORD
&m_len
,
128 int nlists
, maxlists
;
131 list
*get_list (int i
) { return i
>= nlists
? NULL
: lists
+ i
; }
132 list
*get_list_by_fd (int fd
);
133 list
*add_list (int fd
);
134 void del_list (int i
);
137 /* This is the global map structure pointer. It's allocated once on the
138 first call to mmap64(). */
139 static map
*mmapped_areas
;
142 mmap_record::find_unused_pages (DWORD pages
)
144 DWORD mapped_pages
= PAGE_CNT (size_to_map_
);
147 if (pages
> mapped_pages
)
149 for (start
= 0; start
<= mapped_pages
- pages
; ++start
)
150 if (!MAP_ISSET (start
))
153 for (cnt
= 0; cnt
< pages
; ++cnt
)
154 if (MAP_ISSET (start
+ cnt
))
163 mmap_record::alloc_page_map (_off64_t off
, DWORD len
)
165 /* Allocate one bit per page */
166 if (!(page_map_
= (DWORD
*) ccalloc (HEAP_MMAP
,
167 MAPSIZE (PAGE_CNT (size_to_map_
)),
171 if (wincap
.virtual_protect_works_on_shared_pages ())
176 len
= PAGE_CNT (len
) * getpagesize ();
178 !VirtualProtect (base_address_
, off
, PAGE_NOACCESS
, &old_prot
))
179 syscall_printf ("VirtualProtect(%x,%D) failed: %E", base_address_
, off
);
180 if (off
+ len
< size_to_map_
181 && !VirtualProtect (base_address_
+ off
+ len
,
182 size_to_map_
- len
- off
,
183 PAGE_NOACCESS
, &old_prot
))
184 syscall_printf ("VirtualProtect(%x,%D) failed: %E",
185 base_address_
+ off
+ len
, size_to_map_
- len
- off
);
186 off
/= getpagesize ();
187 len
/= getpagesize ();
195 mmap_record::map_pages (_off64_t off
, DWORD len
)
197 /* Used ONLY if this mapping matches into the chunk of another already
198 performed mapping in a special case of MAP_ANON|MAP_PRIVATE.
200 Otherwise it's job is now done by alloc_page_map(). */
201 DWORD prot
, old_prot
;
202 switch (access_mode_
)
205 prot
= PAGE_READWRITE
;
208 prot
= PAGE_READONLY
;
211 prot
= PAGE_WRITECOPY
;
215 debug_printf ("map_pages (fd=%d, off=%D, len=%u)", fdesc_
, off
, len
);
216 len
= PAGE_CNT (len
);
218 if ((off
= find_unused_pages (len
)) == (DWORD
)-1)
220 if (wincap
.virtual_protect_works_on_shared_pages ()
221 && !VirtualProtect (base_address_
+ off
* getpagesize (),
222 len
* getpagesize (), prot
, &old_prot
))
230 return off
* getpagesize ();
234 mmap_record::unmap_pages (caddr_t addr
, DWORD len
)
237 DWORD off
= addr
- base_address_
;
238 off
/= getpagesize ();
239 len
= PAGE_CNT (len
);
240 if (wincap
.virtual_protect_works_on_shared_pages ()
241 && !VirtualProtect (base_address_
+ off
* getpagesize (),
242 len
* getpagesize (), PAGE_NOACCESS
, &old_prot
))
243 syscall_printf ("-1 = unmap_pages (): %E");
245 for (; len
-- > 0; ++off
)
247 /* Return TRUE if all pages are free'd which may result in unmapping
249 for (len
= MAPSIZE (PAGE_CNT (size_to_map_
)); len
> 0; )
250 if (page_map_
[--len
])
256 mmap_record::fixup_page_map ()
258 if (!wincap
.virtual_protect_works_on_shared_pages ())
261 DWORD prot
, old_prot
;
262 switch (access_mode_
)
265 prot
= PAGE_READWRITE
;
268 prot
= PAGE_READONLY
;
271 prot
= PAGE_WRITECOPY
;
275 for (DWORD off
= PAGE_CNT (size_to_map_
); off
> 0; --off
)
276 VirtualProtect (base_address_
+ off
* getpagesize (), getpagesize (),
277 MAP_ISSET (off
- 1) ? prot
: PAGE_NOACCESS
, &old_prot
);
281 mmap_record::access (caddr_t address
)
283 if (address
< base_address_
|| address
>= base_address_
+ size_to_map_
)
285 DWORD off
= (address
- base_address_
) / getpagesize ();
286 return MAP_ISSET (off
);
290 mmap_record::alloc_fh ()
294 fh_paging_file
.set_io_handle (INVALID_HANDLE_VALUE
);
295 return &fh_paging_file
;
298 /* The file descriptor could have been closed or, even
299 worse, could have been reused for another file before
300 the call to fork(). This requires creating a fhandler
301 of the correct type to be sure to call the method of the
303 return cygheap
->fdtab
.build_fhandler (-1, get_device ());
307 mmap_record::free_fh (fhandler_base
*fh
)
314 list::add_record (mmap_record r
, _off64_t off
, DWORD len
)
316 if (nrecs
== maxrecs
)
318 mmap_record
*new_recs
;
320 new_recs
= (mmap_record
*)
321 cmalloc (HEAP_MMAP
, 5 * sizeof (mmap_record
));
323 new_recs
= (mmap_record
*)
324 crealloc (recs
, (maxrecs
+ 5) * sizeof (mmap_record
));
331 if (!recs
[nrecs
].alloc_page_map (off
, len
))
333 return recs
+ nrecs
++;
338 list::search_record (_off64_t off
, DWORD len
)
340 if (fd
== -1 && !off
)
342 len
= PAGE_CNT (len
);
343 for (int i
= 0; i
< nrecs
; ++i
)
344 if (recs
[i
].find_unused_pages (len
) != (DWORD
)-1)
349 for (int i
= 0; i
< nrecs
; ++i
)
350 if (off
>= recs
[i
].get_offset ()
351 && off
+ len
<= recs
[i
].get_offset ()
352 + (PAGE_CNT (recs
[i
].get_size ()) * getpagesize ()))
358 /* Used in munmap() */
360 list::search_record (caddr_t addr
, DWORD len
, caddr_t
&m_addr
, DWORD
&m_len
,
365 for (int i
= start
+ 1; i
< nrecs
; ++i
)
367 low
= (addr
>= recs
[i
].get_address ()) ? addr
: recs
[i
].get_address ();
368 high
= recs
[i
].get_address ()
369 + (PAGE_CNT (recs
[i
].get_size ()) * getpagesize ());
370 high
= (addr
+ len
< high
) ? addr
+ len
: high
;
384 if ((fd
= nfd
) != -1)
385 hash
= cygheap
->fdtab
[fd
]->get_namehash ();
391 list::del_record (int i
)
395 recs
[i
].free_page_map ();
396 for (; i
< nrecs
- 1; i
++)
397 recs
[i
] = recs
[i
+ 1];
400 /* Return true if the list is empty which allows the caller to remove
401 this list from the list array. */
406 map::get_list_by_fd (int fd
)
409 for (i
=0; i
<nlists
; i
++)
410 /* The fd isn't sufficient since it could already be the fd of another
411 file. So we use the name hash value to identify the file unless
412 it's an anonymous mapping in which case the fd (-1) is sufficient. */
413 if ((fd
== -1 && lists
[i
].get_fd () == -1)
415 && lists
[i
].get_hash () == cygheap
->fdtab
[fd
]->get_namehash ()))
421 map::add_list (int fd
)
423 if (nlists
== maxlists
)
427 new_lists
= (list
*) cmalloc (HEAP_MMAP
, 5 * sizeof (list
));
429 new_lists
= (list
*) crealloc (lists
, (maxlists
+ 5) * sizeof (list
));
435 lists
[nlists
].set (fd
);
436 return lists
+ nlists
++;
440 map::del_list (int i
)
444 lists
[i
].free_recs ();
445 for (; i
< nlists
- 1; i
++)
446 lists
[i
] = lists
[i
+ 1];
452 mmap64 (void *addr
, size_t len
, int prot
, int flags
, int fd
, _off64_t off
)
454 syscall_printf ("addr %x, len %u, prot %x, flags %x, fd %d, off %D",
455 addr
, len
, prot
, flags
, fd
, off
);
457 static DWORD granularity
;
462 granularity
= si
.dwAllocationGranularity
;
465 /* Error conditions according to SUSv2 */
466 if (off
% getpagesize ()
467 || (!(flags
& MAP_SHARED
) && !(flags
& MAP_PRIVATE
))
468 || ((flags
& MAP_SHARED
) && (flags
& MAP_PRIVATE
))
469 || ((flags
& MAP_FIXED
) && ((DWORD
)addr
% getpagesize ()))
473 syscall_printf ("-1 = mmap(): EINVAL");
477 SetResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
479 if (mmapped_areas
== NULL
)
481 /* First mmap call, create STL map */
482 mmapped_areas
= (map
*) ccalloc (HEAP_MMAP
, 1, sizeof (map
));
483 if (mmapped_areas
== NULL
)
486 syscall_printf ("-1 = mmap(): ENOMEM");
487 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
492 if (flags
& MAP_ANONYMOUS
)
495 /* Map always in multipliers of `granularity'-sized chunks. */
496 _off64_t gran_off
= off
& ~(granularity
- 1);
497 DWORD gran_len
= howmany (off
+ len
, granularity
) * granularity
- gran_off
;
503 /* Ensure that fd is open */
504 cygheap_fdget
cfd (fd
);
507 syscall_printf ("-1 = mmap(): EBADF");
508 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
512 if (fh
->get_device () == FH_DISK
)
515 DWORD low
= GetFileSize (fh
->get_handle (), &high
);
516 _off64_t fsiz
= ((_off64_t
)high
<< 32) + low
;
517 /* Don't allow mappings beginning beyond EOF since Windows can't
518 handle that POSIX like. FIXME: Still looking for a good idea
519 to allow that nevertheless. */
520 if (gran_off
>= fsiz
)
523 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
,
531 else if (fh
->get_device () == FH_ZERO
)
532 /* mmap /dev/zero is like MAP_ANONYMOUS. */
537 fh_paging_file
.set_io_handle (INVALID_HANDLE_VALUE
);
538 fh
= &fh_paging_file
;
541 list
*map_list
= mmapped_areas
->get_list_by_fd (fd
);
543 /* First check if this mapping matches into the chunk of another
544 already performed mapping. Only valid for MAP_ANON in a special
545 case of MAP_PRIVATE. */
546 if (map_list
&& fd
== -1 && off
== 0 && !(flags
& MAP_FIXED
))
549 if ((rec
= map_list
->search_record (off
, len
)) != NULL
)
551 if ((off
= rec
->map_pages (off
, len
)) == (_off64_t
)-1)
553 syscall_printf ("-1 = mmap()");
554 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
|WRITE_LOCK
, "mmap");
557 caddr_t ret
= rec
->get_address () + off
;
558 syscall_printf ("%x = mmap() succeeded", ret
);
559 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
564 DWORD access
= (prot
& PROT_WRITE
) ? FILE_MAP_WRITE
: FILE_MAP_READ
;
565 /* copy-on-write doesn't work at all on 9x using anonymous maps.
566 Workaround: Anonymous mappings always use normal READ or WRITE
567 access and don't use named file mapping.
568 copy-on-write doesn't also work properly on 9x with real files.
569 While the changes are not propagated to the file, they are
570 visible to other processes sharing the same file mapping object.
571 Workaround: Don't use named file mapping. That should work since
572 sharing file mappings only works reliable using named
575 if ((flags
& MAP_PRIVATE
)
576 && (wincap
.has_working_copy_on_write () || fd
!= -1))
577 access
= FILE_MAP_COPY
;
579 caddr_t base
= (caddr_t
)addr
;
580 /* This shifts the base address to the next lower 64K boundary.
581 The offset is re-added when evaluating the return value. */
583 base
-= off
- gran_off
;
585 HANDLE h
= fh
->mmap (&base
, gran_len
, access
, flags
, gran_off
);
587 if (h
== INVALID_HANDLE_VALUE
)
589 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
593 /* At this point we should have a successfully mmapped area.
594 Now it's time for bookkeeping stuff. */
596 gran_len
= PAGE_CNT (gran_len
) * getpagesize ();
597 mmap_record
mmap_rec (fd
, h
, access
, gran_off
, gran_len
, base
);
599 /* Get list of mmapped areas for this fd, create a new one if
600 one does not exist yet.
604 /* Create a new one */
605 map_list
= mmapped_areas
->add_list (fd
);
608 fh
->munmap (h
, base
, gran_len
);
610 syscall_printf ("-1 = mmap(): ENOMEM");
611 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
616 /* Insert into the list */
617 mmap_record
*rec
= map_list
->add_record (mmap_rec
, off
,
618 len
> gran_len
? gran_len
: len
);
621 fh
->munmap (h
, base
, gran_len
);
623 syscall_printf ("-1 = mmap(): ENOMEM");
624 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
628 caddr_t ret
= rec
->get_address () + (off
- gran_off
);
629 syscall_printf ("%x = mmap() succeeded", ret
);
630 ReleaseResourceLock (LOCK_MMAP_LIST
, READ_LOCK
| WRITE_LOCK
, "mmap");
635 mmap (void *addr
, size_t len
, int prot
, int flags
, int fd
, _off_t off
)
637 return mmap64 (addr
, len
, prot
, flags
, fd
, (_off64_t
)off
);
640 /* munmap () removes all mmapped pages between addr and addr+len. */
643 munmap (void *addr
, size_t len
)
645 syscall_printf ("munmap (addr %x, len %u)", addr
, len
);
647 /* Error conditions according to SUSv3 */
648 if (!addr
|| ((DWORD
)addr
% getpagesize ()) || !len
649 || IsBadReadPtr (addr
, len
))
652 syscall_printf ("-1 = munmap(): Invalid parameters");
656 SetResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
, "munmap");
657 if (mmapped_areas
== NULL
)
659 syscall_printf ("-1 = munmap(): mmapped_areas == NULL");
660 ReleaseResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
, "munmap");
664 /* Iterate through the map, unmap pages between addr and addr+len
667 for (int list_idx
= 0;
668 (map_list
= mmapped_areas
->get_list (list_idx
));
671 long record_idx
= -1;
675 while ((record_idx
= map_list
->search_record((caddr_t
)addr
, len
, u_addr
,
676 u_len
, record_idx
)) >= 0)
678 mmap_record
*rec
= map_list
->get_record (record_idx
);
679 if (rec
->unmap_pages (u_addr
, u_len
))
681 /* The whole record has been unmapped, so... */
682 fhandler_base
*fh
= rec
->alloc_fh ();
683 fh
->munmap (rec
->get_handle (), (caddr_t
)addr
, len
);
686 /* ...delete the record. */
687 if (map_list
->del_record (record_idx
--))
689 /* Yay, the last record has been removed from the list,
690 we can remove the list now, too. */
691 mmapped_areas
->del_list (list_idx
--);
698 ReleaseResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
, "munmap");
699 syscall_printf ("0 = munmap(): %x", addr
);
703 /* Sync file with memory. Ignore flags for now. */
706 msync (void *addr
, size_t len
, int flags
)
708 syscall_printf ("addr = %x, len = %u, flags = %x",
711 /* However, check flags for validity. */
712 if ((flags
& ~(MS_ASYNC
| MS_SYNC
| MS_INVALIDATE
))
713 || ((flags
& MS_ASYNC
) && (flags
& MS_SYNC
)))
715 syscall_printf ("-1 = msync(): Invalid flags");
720 SetResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
, "msync");
721 /* Check if a mmap'ed area was ever created */
722 if (mmapped_areas
== NULL
)
724 syscall_printf ("-1 = msync(): mmapped_areas == NULL");
726 ReleaseResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
, "msync");
730 /* Iterate through the map, looking for the mmapped area.
731 Error if not found. */
734 for (int list_idx
= 0;
735 (map_list
= mmapped_areas
->get_list (list_idx
));
739 for (int record_idx
= 0;
740 (rec
= map_list
->get_record (record_idx
));
743 if (rec
->access ((caddr_t
)addr
))
745 /* Check whole area given by len. */
746 for (DWORD i
= getpagesize (); i
< len
; ++i
)
747 if (!rec
->access ((caddr_t
)addr
+ i
))
748 goto invalid_address_range
;
749 fhandler_base
*fh
= rec
->alloc_fh ();
750 int ret
= fh
->msync (rec
->get_handle (), (caddr_t
)addr
, len
,
755 syscall_printf ("%d = msync(): %E", ret
);
757 syscall_printf ("0 = msync()");
759 ReleaseResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
,
766 invalid_address_range
:
767 /* SUSv2: Return code if indicated memory was not mapped is ENOMEM. */
769 syscall_printf ("-1 = msync(): ENOMEM");
771 ReleaseResourceLock (LOCK_MMAP_LIST
, WRITE_LOCK
| READ_LOCK
, "msync");
775 /* Set memory protection */
778 mprotect (void *addr
, size_t len
, int prot
)
783 syscall_printf ("mprotect (addr %x, len %u, prot %x)", addr
, len
, prot
);
785 if (!wincap
.virtual_protect_works_on_shared_pages ()
786 && addr
>= (caddr_t
)0x80000000 && addr
<= (caddr_t
)0xBFFFFFFF)
788 syscall_printf ("0 = mprotect (9x: No VirtualProtect on shared memory)");
794 case PROT_READ
| PROT_WRITE
| PROT_EXEC
:
795 case PROT_WRITE
| PROT_EXEC
:
796 new_prot
= PAGE_EXECUTE_READWRITE
;
798 case PROT_READ
| PROT_WRITE
:
800 new_prot
= PAGE_READWRITE
;
802 case PROT_READ
| PROT_EXEC
:
803 new_prot
= PAGE_EXECUTE_READ
;
806 new_prot
= PAGE_READONLY
;
809 new_prot
= PAGE_EXECUTE
;
812 new_prot
= PAGE_NOACCESS
;
815 syscall_printf ("-1 = mprotect (): invalid prot value");
820 if (VirtualProtect (addr
, len
, new_prot
, &old_prot
) == 0)
823 syscall_printf ("-1 = mprotect (): %E");
827 syscall_printf ("0 = mprotect ()");
832 * Base implementation:
834 * `mmap' returns ENODEV as documented in SUSv2.
835 * In contrast to the global function implementation, the member function
836 * `mmap' has to return the mapped base address in `addr' and the handle to
837 * the mapping object as return value. In case of failure, the fhandler
838 * mmap has to close that handle by itself and return INVALID_HANDLE_VALUE.
840 * `munmap' and `msync' get the handle to the mapping object as first parameter
844 fhandler_base::mmap (caddr_t
*addr
, size_t len
, DWORD access
,
845 int flags
, _off64_t off
)
848 return INVALID_HANDLE_VALUE
;
852 fhandler_base::munmap (HANDLE h
, caddr_t addr
, size_t len
)
859 fhandler_base::msync (HANDLE h
, caddr_t addr
, size_t len
, int flags
)
866 fhandler_base::fixup_mmap_after_fork (HANDLE h
, DWORD access
, DWORD offset
,
867 DWORD size
, void *address
)
873 /* Implementation for disk files. */
875 fhandler_disk_file::mmap (caddr_t
*addr
, size_t len
, DWORD access
,
876 int flags
, _off64_t off
)
883 protect
= PAGE_READWRITE
;
886 protect
= PAGE_READONLY
;
889 protect
= PAGE_WRITECOPY
;
895 /* On 9x/ME try first to open the mapping by name when opening a
896 shared file object. This is needed since 9x/ME only shares
897 objects between processes by name. What a mess... */
898 if (wincap
.share_mmaps_only_by_name ()
899 && get_handle () != INVALID_HANDLE_VALUE
900 && !(access
& FILE_MAP_COPY
))
902 /* Grrr, the whole stuff is just needed to try to get a reliable
903 mapping of the same file. Even that uprising isn't bullet
904 proof but it does it's best... */
905 char namebuf
[MAX_PATH
];
906 cygwin_conv_to_full_posix_path (get_name (), namebuf
);
907 for (int i
= strlen (namebuf
) - 1; i
>= 0; --i
)
908 namebuf
[i
] = cyg_tolower (namebuf
[i
]);
910 debug_printf ("named sharing");
911 if (!(h
= OpenFileMapping (access
, TRUE
, namebuf
)))
912 h
= CreateFileMapping (get_handle (), &sec_none
, protect
, 0, 0, namebuf
);
915 h
= CreateFileMapping (get_handle (), &sec_none
, protect
, 0,
916 get_handle () == INVALID_HANDLE_VALUE
? len
: 0,
921 syscall_printf ("-1 = mmap(): CreateFileMapping failed with %E");
922 return INVALID_HANDLE_VALUE
;
925 DWORD high
= off
>> 32, low
= off
& 0xffffffff;
927 /* If a non-zero address is given, try mapping using the given address first.
928 If it fails and flags is not MAP_FIXED, try again with NULL address. */
930 base
= MapViewOfFileEx (h
, access
, high
, low
, len
, *addr
);
931 if (!base
&& !(flags
& MAP_FIXED
))
932 base
= MapViewOfFileEx (h
, access
, high
, low
, len
, NULL
);
933 debug_printf ("%x = MapViewOfFileEx (h:%x, access:%x, 0, off:%D, "
934 "len:%u, addr:%x)", base
, h
, access
, off
, len
, *addr
);
935 if (!base
|| ((flags
& MAP_FIXED
) && base
!= *addr
))
940 syscall_printf ("-1 = mmap(): MapViewOfFileEx failed with %E");
945 syscall_printf ("-1 = mmap(): address shift with MAP_FIXED given");
948 return INVALID_HANDLE_VALUE
;
951 *addr
= (caddr_t
) base
;
956 fhandler_disk_file::munmap (HANDLE h
, caddr_t addr
, size_t len
)
958 UnmapViewOfFile (addr
);
964 fhandler_disk_file::msync (HANDLE h
, caddr_t addr
, size_t len
, int flags
)
966 if (FlushViewOfFile (addr
, len
) == 0)
975 fhandler_disk_file::fixup_mmap_after_fork (HANDLE h
, DWORD access
, DWORD offset
,
976 DWORD size
, void *address
)
978 /* Re-create the MapViewOfFileEx call */
979 void *base
= MapViewOfFileEx (h
, access
, 0, offset
, size
, address
);
982 MEMORY_BASIC_INFORMATION m
;
983 (void) VirtualQuery (address
, &m
, sizeof (m
));
984 system_printf ("requested %p != %p mem alloc base %p, state %p, size %d, %E",
985 address
, base
, m
.AllocationBase
, m
.State
, m
.RegionSize
);
987 return base
== address
;
991 * Call to re-create all the file mappings in a forked
992 * child. Called from the child in initialization. At this
993 * point we are passed a valid mmapped_areas map, and all the
994 * HANDLE's are valid for the child, but none of the
995 * mapped areas are in our address space. We need to iterate
996 * through the map, doing the MapViewOfFile calls.
1000 fixup_mmaps_after_fork (HANDLE parent
)
1003 debug_printf ("recreate_mmaps_after_fork, mmapped_areas %p", mmapped_areas
);
1005 /* Check if a mmapped area was ever created */
1006 if (mmapped_areas
== NULL
)
1009 /* Iterate through the map */
1011 for (int list_idx
= 0;
1012 (map_list
= mmapped_areas
->get_list (list_idx
));
1016 for (int record_idx
= 0;
1017 (rec
= map_list
->get_record (record_idx
));
1021 debug_printf ("fd %d, h %x, access %x, offset %D, size %u, address %p",
1022 rec
->get_fd (), rec
->get_handle (), rec
->get_access (),
1023 rec
->get_offset (), rec
->get_size (), rec
->get_address ());
1025 fhandler_base
*fh
= rec
->alloc_fh ();
1026 BOOL ret
= fh
->fixup_mmap_after_fork (rec
->get_handle (),
1030 rec
->get_address ());
1035 if (rec
->get_access () == FILE_MAP_COPY
)
1037 for (char *address
= rec
->get_address ();
1038 address
< rec
->get_address () + rec
->get_size ();
1039 address
+= getpagesize ())
1040 if (rec
->access (address
)
1041 && !ReadProcessMemory (parent
, address
, address
,
1042 getpagesize (), NULL
))
1045 DWORD last_error
= GetLastError ();
1047 if (last_error
!= ERROR_PARTIAL_COPY
1048 && last_error
!= ERROR_NOACCESS
1049 || !wincap
.virtual_protect_works_on_shared_pages ())
1051 system_printf ("ReadProcessMemory failed for "
1052 "MAP_PRIVATE address %p, %E",
1053 rec
->get_address ());
1056 if (!VirtualProtectEx (parent
,
1057 address
, getpagesize (),
1058 PAGE_READONLY
, &old_prot
))
1060 system_printf ("VirtualProtectEx failed for "
1061 "MAP_PRIVATE address %p, %E",
1062 rec
->get_address ());
1070 ret
= ReadProcessMemory (parent
, address
, address
,
1071 getpagesize (), NULL
);
1072 if (!VirtualProtectEx(parent
,
1073 address
, getpagesize (),
1074 old_prot
, &dummy_prot
))
1075 system_printf ("WARNING: VirtualProtectEx to "
1076 "return to previous state "
1077 "in parent failed for "
1078 "MAP_PRIVATE address %p, %E",
1079 rec
->get_address ());
1080 if (!VirtualProtect (address
, getpagesize (),
1081 old_prot
, &dummy_prot
))
1082 system_printf ("WARNING: VirtualProtect to copy "
1083 "protection to child failed for"
1084 "MAP_PRIVATE address %p, %E",
1085 rec
->get_address ());
1088 system_printf ("ReadProcessMemory (2nd try) "
1090 "MAP_PRIVATE address %p, %E",
1091 rec
->get_address ());
1097 rec
->fixup_page_map ();
1101 debug_printf ("succeeded");