• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/interprocess for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10 
11 #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
12 #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
13 
14 #ifndef BOOST_CONFIG_HPP
15 #  include <boost/config.hpp>
16 #endif
17 #
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
19 #  pragma once
20 #endif
21 
22 #include <boost/interprocess/detail/config_begin.hpp>
23 #include <boost/interprocess/detail/workaround.hpp>
24 
25 #include <boost/interprocess/interprocess_fwd.hpp>
26 #include <boost/interprocess/exceptions.hpp>
27 #include <boost/move/utility_core.hpp>
28 #include <boost/interprocess/detail/utilities.hpp>
29 #include <boost/interprocess/detail/os_file_functions.hpp>
30 #include <string>
31 #include <boost/cstdint.hpp>
32 #include <boost/assert.hpp>
33 #include <boost/move/adl_move_swap.hpp>
34 
35 //Some Unixes use caddr_t instead of void * in madvise
36 //              SunOS                                 Tru64                               HP-UX                    AIX
37 #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
38 #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
39 #include <sys/types.h>
40 #endif
41 
42 //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
43 //we need to be careful to allow it.
44 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
45 #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
46 #endif
47 
48 #if defined (BOOST_INTERPROCESS_WINDOWS)
49 #  include <boost/interprocess/detail/win32_api.hpp>
50 #  include <boost/interprocess/sync/windows/sync_utils.hpp>
51 #else
52 #  ifdef BOOST_HAS_UNISTD_H
53 #    include <fcntl.h>
54 #    include <sys/mman.h>     //mmap
55 #    include <unistd.h>
56 #    include <sys/stat.h>
57 #    include <sys/types.h>
58 #    if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
59 #      include <sys/shm.h>      //System V shared memory...
60 #    endif
61 #    include <boost/assert.hpp>
62 #  else
63 #    error Unknown platform
64 #  endif
65 
66 #endif   //#if defined (BOOST_INTERPROCESS_WINDOWS)
67 
68 //!\file
69 //!Describes mapped region class
70 
71 namespace boost {
72 namespace interprocess {
73 
74 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
75 
76 //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
77 //Predeclare it here to avoid any compilation error
78 #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
79 extern "C" int madvise(caddr_t, size_t, int);
80 #endif
81 
82 namespace ipcdetail{ class interprocess_tester; }
83 namespace ipcdetail{ class raw_mapped_region_creator; }
84 
85 #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
86 
87 //!The mapped_region class represents a portion or region created from a
88 //!memory_mappable object.
89 //!
90 //!The OS can map a region bigger than the requested one, as region must
91 //!be multiple of the page size, but mapped_region will always refer to
92 //!the region specified by the user.
93 class mapped_region
94 {
95    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
96    //Non-copyable
97    BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
98    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
99 
100    public:
101 
102    //!Creates a mapping region of the mapped memory "mapping", starting in
103    //!offset "offset", and the mapping's size will be "size". The mapping
104    //!can be opened for read only, read-write or copy-on-write.
105    //!
106    //!If an address is specified, both the offset and the address must be
107    //!multiples of the page size.
108    //!
109    //!The map is created using "default_map_options". This flag is OS
110    //!dependant and it should not be changed unless the user needs to
111    //!specify special options.
112    //!
113    //!In Windows systems "map_options" is a DWORD value passed as
114    //!"dwDesiredAccess" to "MapViewOfFileEx". If "default_map_options" is passed
115    //!it's initialized to zero. "map_options" is XORed with FILE_MAP_[COPY|READ|WRITE].
116    //!
117    //!In UNIX systems and POSIX mappings "map_options" is an int value passed as "flags"
118    //!to "mmap". If "default_map_options" is specified it's initialized to MAP_NOSYNC
119    //!if that option exists and to zero otherwise. "map_options" XORed with MAP_PRIVATE or MAP_SHARED.
120    //!
121    //!In UNIX systems and XSI mappings "map_options" is an int value passed as "shmflg"
122    //!to "shmat". If "default_map_options" is specified it's initialized to zero.
123    //!"map_options" is XORed with SHM_RDONLY if needed.
124    //!
125    //!The OS could allocate more pages than size/page_size(), but get_address()
126    //!will always return the address passed in this function (if not null) and
127    //!get_size() will return the specified size.
128    template<class MemoryMappable>
129    mapped_region(const MemoryMappable& mapping
130                 ,mode_t mode
131                 ,offset_t offset = 0
132                 ,std::size_t size = 0
133                 ,const void *address = 0
134                 ,map_options_t map_options = default_map_options);
135 
136    //!Default constructor. Address will be 0 (nullptr).
137    //!Size will be 0.
138    //!Does not throw
139    mapped_region();
140 
141    //!Move constructor. *this will be constructed taking ownership of "other"'s
142    //!region and "other" will be left in default constructor state.
mapped_region(BOOST_RV_REF (mapped_region)other)143    mapped_region(BOOST_RV_REF(mapped_region) other)
144    #if defined (BOOST_INTERPROCESS_WINDOWS)
145    :  m_base(0), m_size(0)
146    ,  m_page_offset(0)
147    ,  m_mode(read_only)
148    ,  m_file_or_mapping_hnd(ipcdetail::invalid_file())
149    #else
150    :  m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
151    #endif
152    {  this->swap(other);   }
153 
154    //!Destroys the mapped region.
155    //!Does not throw
156    ~mapped_region();
157 
158    //!Move assignment. If *this owns a memory mapped region, it will be
159    //!destroyed and it will take ownership of "other"'s memory mapped region.
operator =(BOOST_RV_REF (mapped_region)other)160    mapped_region &operator=(BOOST_RV_REF(mapped_region) other)
161    {
162       mapped_region tmp(boost::move(other));
163       this->swap(tmp);
164       return *this;
165    }
166 
167    //!Swaps the mapped_region with another
168    //!mapped region
169    void swap(mapped_region &other);
170 
171    //!Returns the size of the mapping. Never throws.
172    std::size_t get_size() const;
173 
174    //!Returns the base address of the mapping.
175    //!Never throws.
176    void*       get_address() const;
177 
178    //!Returns the mode of the mapping used to construct the mapped region.
179    //!Never throws.
180    mode_t get_mode() const;
181 
182    //!Flushes to the disk a byte range within the mapped memory.
183    //!If 'async' is true, the function will return before flushing operation is completed
184    //!If 'async' is false, function will return once data has been written into the underlying
185    //!device (i.e., in mapped files OS cached information is written to disk).
186    //!Never throws. Returns false if operation could not be performed.
187    bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
188 
189    //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
190    //!mapped memory page, accessing that page can trigger a segmentation fault.
191    //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
192    //!and free a portion of the virtual address space (e.g.POSIX) or this
193    //!function can release some physical memory wihout freeing any virtual address space(Windows).
194    //!Returns true on success. Never throws.
195    bool shrink_by(std::size_t bytes, bool from_back = true);
196 
197    //!This enum specifies region usage behaviors that an application can specify
198    //!to the mapped region implementation.
199    enum advice_types{
200       //!Specifies that the application has no advice to give on its behavior with respect to
201       //!the region. It is the default characteristic if no advice is given for a range of memory.
202       advice_normal,
203       //!Specifies that the application expects to access the region sequentially from
204       //!lower addresses to higher addresses. The implementation can lower the priority of
205       //!preceding pages within the region once a page have been accessed.
206       advice_sequential,
207       //!Specifies that the application expects to access the region in a random order,
208       //!and prefetching is likely not advantageous.
209       advice_random,
210       //!Specifies that the application expects to access the region in the near future.
211       //!The implementation can prefetch pages of the region.
212       advice_willneed,
213       //!Specifies that the application expects that it will not access the region in the near future.
214       //!The implementation can unload pages within the range to save system resources.
215       advice_dontneed
216    };
217 
218    //!Advises the implementation on the expected behavior of the application with respect to the data
219    //!in the region. The implementation may use this information to optimize handling of the region data.
220    //!This function has no effect on the semantics of access to memory in the region, although it may affect
221    //!the performance of access.
222    //!If the advise type is not known to the implementation, the function returns false. True otherwise.
223    bool advise(advice_types advise);
224 
225    //!Returns the size of the page. This size is the minimum memory that
226    //!will be used by the system when mapping a memory mappable source and
227    //!will restrict the address and the offset to map.
228    static std::size_t get_page_size();
229 
230    #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
231    private:
232    //!Closes a previously opened memory mapping. Never throws
233    void priv_close();
234 
235    void* priv_map_address()  const;
236    std::size_t priv_map_size()  const;
237    bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
238    bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
239    static void priv_size_from_mapping_size
240       (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
241    static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
242 
243    template<int dummy>
244    struct page_size_holder
245    {
246       static const std::size_t PageSize;
247       static std::size_t get_page_size();
248    };
249 
250    void*             m_base;
251    std::size_t       m_size;
252    std::size_t       m_page_offset;
253    mode_t            m_mode;
254    #if defined(BOOST_INTERPROCESS_WINDOWS)
255    file_handle_t     m_file_or_mapping_hnd;
256    #else
257    bool              m_is_xsi;
258    #endif
259 
260    friend class ipcdetail::interprocess_tester;
261    friend class ipcdetail::raw_mapped_region_creator;
262    void dont_close_on_destruction();
263    #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
264    template<int Dummy>
265    static void destroy_syncs_in_range(const void *addr, std::size_t size);
266    #endif
267    #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
268 };
269 
270 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
271 
swap(mapped_region & x,mapped_region & y)272 inline void swap(mapped_region &x, mapped_region &y)
273 {  x.swap(y);  }
274 
~mapped_region()275 inline mapped_region::~mapped_region()
276 {  this->priv_close(); }
277 
get_size() const278 inline std::size_t mapped_region::get_size()  const
279 {  return m_size; }
280 
get_mode() const281 inline mode_t mapped_region::get_mode()  const
282 {  return m_mode;   }
283 
get_address() const284 inline void*    mapped_region::get_address()  const
285 {  return m_base; }
286 
priv_map_address() const287 inline void*    mapped_region::priv_map_address()  const
288 {  return static_cast<char*>(m_base) - m_page_offset; }
289 
priv_map_size() const290 inline std::size_t mapped_region::priv_map_size()  const
291 {  return m_size + m_page_offset; }
292 
priv_flush_param_check(std::size_t mapping_offset,void * & addr,std::size_t & numbytes) const293 inline bool mapped_region::priv_flush_param_check
294    (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
295 {
296    //Check some errors
297    if(m_base == 0)
298       return false;
299 
300    if(mapping_offset >= m_size || numbytes > (m_size - size_t(mapping_offset))){
301       return false;
302    }
303 
304    //Update flush size if the user does not provide it
305    if(numbytes == 0){
306       numbytes = m_size - mapping_offset;
307    }
308    addr = (char*)this->priv_map_address() + mapping_offset;
309    numbytes += m_page_offset;
310    return true;
311 }
312 
priv_shrink_param_check(std::size_t bytes,bool from_back,void * & shrink_page_start,std::size_t & shrink_page_bytes)313 inline bool mapped_region::priv_shrink_param_check
314    (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
315 {
316    //Check some errors
317    if(m_base == 0 || bytes > m_size){
318       return false;
319    }
320    else if(bytes == m_size){
321       this->priv_close();
322       return true;
323    }
324    else{
325       const std::size_t page_size = mapped_region::get_page_size();
326       if(from_back){
327          const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
328          shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
329          shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
330          m_size -= bytes;
331       }
332       else{
333          shrink_page_start = this->priv_map_address();
334          m_page_offset += bytes;
335          shrink_page_bytes = (m_page_offset/page_size)*page_size;
336          m_page_offset = m_page_offset % page_size;
337          m_size -= bytes;
338          m_base  = static_cast<char *>(m_base) + bytes;
339          BOOST_ASSERT(shrink_page_bytes%page_size == 0);
340       }
341       return true;
342    }
343 }
344 
priv_size_from_mapping_size(offset_t mapping_size,offset_t offset,offset_t page_offset,std::size_t & size)345 inline void mapped_region::priv_size_from_mapping_size
346    (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
347 {
348    //Check if mapping size fits in the user address space
349    //as offset_t is the maximum file size and it's signed.
350    if(mapping_size < offset ||
351       boost::uintmax_t(mapping_size - (offset - page_offset)) >
352          boost::uintmax_t(std::size_t(-1))){
353       error_info err(size_error);
354       throw interprocess_exception(err);
355    }
356    size = static_cast<std::size_t>(mapping_size - offset);
357 }
358 
priv_page_offset_addr_fixup(offset_t offset,const void * & address)359 inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
360 {
361    //We can't map any offset so we have to obtain system's
362    //memory granularity
363    const std::size_t page_size  = mapped_region::get_page_size();
364 
365    //We calculate the difference between demanded and valid offset
366    //(always less than a page in std::size_t, thus, representable by std::size_t)
367    const std::size_t page_offset =
368       static_cast<std::size_t>(offset - (offset / page_size) * page_size);
369    //Update the mapping address
370    if(address){
371       address = static_cast<const char*>(address) - page_offset;
372    }
373    return page_offset;
374 }
375 
376 #if defined (BOOST_INTERPROCESS_WINDOWS)
377 
mapped_region()378 inline mapped_region::mapped_region()
379    :  m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
380    ,  m_file_or_mapping_hnd(ipcdetail::invalid_file())
381 {}
382 
383 template<int dummy>
get_page_size()384 inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
385 {
386    winapi::interprocess_system_info info;
387    winapi::get_system_info(&info);
388    return std::size_t(info.dwAllocationGranularity);
389 }
390 
391 template<class MemoryMappable>
mapped_region(const MemoryMappable & mapping,mode_t mode,offset_t offset,std::size_t size,const void * address,map_options_t map_options)392 inline mapped_region::mapped_region
393    (const MemoryMappable &mapping
394    ,mode_t mode
395    ,offset_t offset
396    ,std::size_t size
397    ,const void *address
398    ,map_options_t map_options)
399    :  m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
400    ,  m_file_or_mapping_hnd(ipcdetail::invalid_file())
401 {
402    mapping_handle_t mhandle = mapping.get_mapping_handle();
403    {
404       file_handle_t native_mapping_handle = 0;
405 
406       //Set accesses
407       //For "create_file_mapping"
408       unsigned long protection = 0;
409       //For "mapviewoffile"
410       unsigned long map_access = map_options == default_map_options ? 0 : map_options;
411 
412       switch(mode)
413       {
414          case read_only:
415          case read_private:
416             protection   |= winapi::page_readonly;
417             map_access   |= winapi::file_map_read;
418          break;
419          case read_write:
420             protection   |= winapi::page_readwrite;
421             map_access   |= winapi::file_map_write;
422          break;
423          case copy_on_write:
424             protection   |= winapi::page_writecopy;
425             map_access   |= winapi::file_map_copy;
426          break;
427          default:
428             {
429                error_info err(mode_error);
430                throw interprocess_exception(err);
431             }
432          break;
433       }
434 
435       //For file mapping (including emulated shared memory through temporary files),
436       //the device is a file handle so we need to obtain file's size and call create_file_mapping
437       //to obtain the mapping handle.
438       //For files we don't need the file mapping after mapping the memory, as the file is there
439       //so we'll program the handle close
440       void * handle_to_close = winapi::invalid_handle_value;
441       if(!mhandle.is_shm){
442          //Create mapping handle
443          native_mapping_handle = winapi::create_file_mapping
444             ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
445             , protection, 0, 0, 0);
446 
447          //Check if all is correct
448          if(!native_mapping_handle){
449             error_info err = winapi::get_last_error();
450             throw interprocess_exception(err);
451          }
452          handle_to_close = native_mapping_handle;
453       }
454       else{
455          //For windows_shared_memory the device handle is already a mapping handle
456          //and we need to maintain it
457          native_mapping_handle = mhandle.handle;
458       }
459       //RAII handle close on scope exit
460       const winapi::handle_closer close_handle(handle_to_close);
461       (void)close_handle;
462 
463       const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
464 
465       //Obtain mapping size if user provides 0 size
466       if(size == 0){
467          offset_t mapping_size;
468          if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
469             error_info err = winapi::get_last_error();
470             throw interprocess_exception(err);
471          }
472          //This can throw
473          priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
474       }
475 
476       //Map with new offsets and size
477       void *base = winapi::map_view_of_file_ex
478                                  (native_mapping_handle,
479                                  map_access,
480                                  offset - page_offset,
481                                  static_cast<std::size_t>(page_offset + size),
482                                  const_cast<void*>(address));
483       //Check error
484       if(!base){
485          error_info err = winapi::get_last_error();
486          throw interprocess_exception(err);
487       }
488 
489       //Calculate new base for the user
490       m_base = static_cast<char*>(base) + page_offset;
491       m_page_offset = page_offset;
492       m_size = size;
493    }
494    //Windows shared memory needs the duplication of the handle if we want to
495    //make mapped_region independent from the mappable device
496    //
497    //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
498    if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
499       error_info err = winapi::get_last_error();
500       this->priv_close();
501       throw interprocess_exception(err);
502    }
503 }
504 
flush(std::size_t mapping_offset,std::size_t numbytes,bool async)505 inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
506 {
507    void *addr;
508    if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
509       return false;
510    }
511    //Flush it all
512    if(!winapi::flush_view_of_file(addr, numbytes)){
513       return false;
514    }
515    //m_file_or_mapping_hnd can be a file handle or a mapping handle.
516    //so flushing file buffers has only sense for files...
517    else if(!async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
518            winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
519       return winapi::flush_file_buffers(m_file_or_mapping_hnd);
520    }
521    return true;
522 }
523 
shrink_by(std::size_t bytes,bool from_back)524 inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
525 {
526    void *shrink_page_start = 0;
527    std::size_t shrink_page_bytes = 0;
528    if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
529       return false;
530    }
531    else if(shrink_page_bytes){
532       //In Windows, we can't decommit the storage or release the virtual address space,
533       //the best we can do is try to remove some memory from the process working set.
534       //With a bit of luck we can free some physical memory.
535       unsigned long old_protect_ignored;
536       bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
537                            || (winapi::get_last_error() == winapi::error_not_locked);
538       (void)old_protect_ignored;
539       //Change page protection to forbid any further access
540       b_ret = b_ret && winapi::virtual_protect
541          (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
542       return b_ret;
543    }
544    else{
545       return true;
546    }
547 }
548 
advise(advice_types)549 inline bool mapped_region::advise(advice_types)
550 {
551    //Windows has no madvise/posix_madvise equivalent
552    return false;
553 }
554 
priv_close()555 inline void mapped_region::priv_close()
556 {
557    if(m_base){
558       void *addr = this->priv_map_address();
559       #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
560       mapped_region::destroy_syncs_in_range<0>(addr, m_size);
561       #endif
562       winapi::unmap_view_of_file(addr);
563       m_base = 0;
564    }
565    if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
566       winapi::close_handle(m_file_or_mapping_hnd);
567       m_file_or_mapping_hnd = ipcdetail::invalid_file();
568    }
569 }
570 
dont_close_on_destruction()571 inline void mapped_region::dont_close_on_destruction()
572 {}
573 
574 #else    //#if defined (BOOST_INTERPROCESS_WINDOWS)
575 
mapped_region()576 inline mapped_region::mapped_region()
577    :  m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
578 {}
579 
580 template<int dummy>
get_page_size()581 inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
582 {  return std::size_t(sysconf(_SC_PAGESIZE)); }
583 
584 template<class MemoryMappable>
mapped_region(const MemoryMappable & mapping,mode_t mode,offset_t offset,std::size_t size,const void * address,map_options_t map_options)585 inline mapped_region::mapped_region
586    ( const MemoryMappable &mapping
587    , mode_t mode
588    , offset_t offset
589    , std::size_t size
590    , const void *address
591    , map_options_t map_options)
592    : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
593 {
594    mapping_handle_t map_hnd = mapping.get_mapping_handle();
595 
596    //Some systems dont' support XSI shared memory
597    #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
598    if(map_hnd.is_xsi){
599       //Get the size
600       ::shmid_ds xsi_ds;
601       int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
602       if(ret == -1){
603          error_info err(system_error_code());
604          throw interprocess_exception(err);
605       }
606       //Compare sizess
607       if(size == 0){
608          size = (std::size_t)xsi_ds.shm_segsz;
609       }
610       else if(size != (std::size_t)xsi_ds.shm_segsz){
611          error_info err(size_error);
612          throw interprocess_exception(err);
613       }
614       //Calculate flag
615       int flag = map_options == default_map_options ? 0 : map_options;
616       if(m_mode == read_only){
617          flag |= SHM_RDONLY;
618       }
619       else if(m_mode != read_write){
620          error_info err(mode_error);
621          throw interprocess_exception(err);
622       }
623       //Attach memory
624       //Some old shmat implementation take the address as a non-const void pointer
625       //so uncast it to make code portable.
626       void *const final_address = const_cast<void *>(address);
627       void *base = ::shmat(map_hnd.handle, final_address, flag);
628       if(base == (void*)-1){
629          error_info err(system_error_code());
630          throw interprocess_exception(err);
631       }
632       //Update members
633       m_base   = base;
634       m_size   = size;
635       m_mode   = mode;
636       m_page_offset = 0;
637       m_is_xsi = true;
638       return;
639    }
640    #endif   //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
641 
642    //We calculate the difference between demanded and valid offset
643    const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
644 
645    if(size == 0){
646       struct ::stat buf;
647       if(0 != fstat(map_hnd.handle, &buf)){
648          error_info err(system_error_code());
649          throw interprocess_exception(err);
650       }
651       //This can throw
652       priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
653    }
654 
655    #ifdef MAP_NOSYNC
656       #define BOOST_INTERPROCESS_MAP_NOSYNC MAP_NOSYNC
657    #else
658       #define BOOST_INTERPROCESS_MAP_NOSYNC 0
659    #endif   //MAP_NOSYNC
660 
661    //Create new mapping
662    int prot    = 0;
663    int flags   = map_options == default_map_options ? BOOST_INTERPROCESS_MAP_NOSYNC : map_options;
664 
665    #undef BOOST_INTERPROCESS_MAP_NOSYNC
666 
667    switch(mode)
668    {
669       case read_only:
670          prot  |= PROT_READ;
671          flags |= MAP_SHARED;
672       break;
673 
674       case read_private:
675          prot  |= (PROT_READ);
676          flags |= MAP_PRIVATE;
677       break;
678 
679       case read_write:
680          prot  |= (PROT_WRITE | PROT_READ);
681          flags |= MAP_SHARED;
682       break;
683 
684       case copy_on_write:
685          prot  |= (PROT_WRITE | PROT_READ);
686          flags |= MAP_PRIVATE;
687       break;
688 
689       default:
690          {
691             error_info err(mode_error);
692             throw interprocess_exception(err);
693          }
694       break;
695    }
696 
697    //Map it to the address space
698    void* base = mmap ( const_cast<void*>(address)
699                      , static_cast<std::size_t>(page_offset + size)
700                      , prot
701                      , flags
702                      , mapping.get_mapping_handle().handle
703                      , offset - page_offset);
704 
705    //Check if mapping was successful
706    if(base == MAP_FAILED){
707       error_info err = system_error_code();
708       throw interprocess_exception(err);
709    }
710 
711    //Calculate new base for the user
712    m_base = static_cast<char*>(base) + page_offset;
713    m_page_offset = page_offset;
714    m_size   = size;
715 
716    //Check for fixed mapping error
717    if(address && (base != address)){
718       error_info err(busy_error);
719       this->priv_close();
720       throw interprocess_exception(err);
721    }
722 }
723 
shrink_by(std::size_t bytes,bool from_back)724 inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
725 {
726    void *shrink_page_start = 0;
727    std::size_t shrink_page_bytes = 0;
728    if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
729       return false;
730    }
731    else if(shrink_page_bytes){
732       //In UNIX we can decommit and free virtual address space.
733       return 0 == munmap(shrink_page_start, shrink_page_bytes);
734    }
735    else{
736       return true;
737    }
738 }
739 
flush(std::size_t mapping_offset,std::size_t numbytes,bool async)740 inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
741 {
742    void *addr;
743    if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
744       return false;
745    }
746    //Flush it all
747    return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
748 }
749 
advise(advice_types advice)750 inline bool mapped_region::advise(advice_types advice)
751 {
752    int unix_advice = 0;
753    //Modes; 0: none, 2: posix, 1: madvise
754    const unsigned int mode_none = 0;
755    const unsigned int mode_padv = 1;
756    const unsigned int mode_madv = 2;
757    // Suppress "unused variable" warnings
758    (void)mode_padv;
759    (void)mode_madv;
760    unsigned int mode = mode_none;
761    //Choose advice either from POSIX (preferred) or native Unix
762    switch(advice){
763       case advice_normal:
764          #if defined(POSIX_MADV_NORMAL)
765          unix_advice = POSIX_MADV_NORMAL;
766          mode = mode_padv;
767          #elif defined(MADV_NORMAL)
768          unix_advice = MADV_NORMAL;
769          mode = mode_madv;
770          #endif
771       break;
772       case advice_sequential:
773          #if defined(POSIX_MADV_SEQUENTIAL)
774          unix_advice = POSIX_MADV_SEQUENTIAL;
775          mode = mode_padv;
776          #elif defined(MADV_SEQUENTIAL)
777          unix_advice = MADV_SEQUENTIAL;
778          mode = mode_madv;
779          #endif
780       break;
781       case advice_random:
782          #if defined(POSIX_MADV_RANDOM)
783          unix_advice = POSIX_MADV_RANDOM;
784          mode = mode_padv;
785          #elif defined(MADV_RANDOM)
786          unix_advice = MADV_RANDOM;
787          mode = mode_madv;
788          #endif
789       break;
790       case advice_willneed:
791          #if defined(POSIX_MADV_WILLNEED)
792          unix_advice = POSIX_MADV_WILLNEED;
793          mode = mode_padv;
794          #elif defined(MADV_WILLNEED)
795          unix_advice = MADV_WILLNEED;
796          mode = mode_madv;
797          #endif
798       break;
799       case advice_dontneed:
800          #if defined(POSIX_MADV_DONTNEED)
801          unix_advice = POSIX_MADV_DONTNEED;
802          mode = mode_padv;
803          #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
804          unix_advice = MADV_DONTNEED;
805          mode = mode_madv;
806          #endif
807       break;
808       default:
809       return false;
810    }
811    switch(mode){
812       #if defined(POSIX_MADV_NORMAL)
813          case mode_padv:
814          return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
815       #endif
816       #if defined(MADV_NORMAL)
817          case mode_madv:
818          return 0 == madvise(
819             #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
820             (caddr_t)
821             #endif
822             this->priv_map_address(), this->priv_map_size(), unix_advice);
823       #endif
824       default:
825       return false;
826 
827    }
828 }
829 
priv_close()830 inline void mapped_region::priv_close()
831 {
832    if(m_base != 0){
833       #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
834       if(m_is_xsi){
835          int ret = ::shmdt(m_base);
836          BOOST_ASSERT(ret == 0);
837          (void)ret;
838          return;
839       }
840       #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
841       munmap(this->priv_map_address(), this->priv_map_size());
842       m_base = 0;
843    }
844 }
845 
dont_close_on_destruction()846 inline void mapped_region::dont_close_on_destruction()
847 {  m_base = 0;   }
848 
849 #endif   //#if defined (BOOST_INTERPROCESS_WINDOWS)
850 
851 template<int dummy>
852 const std::size_t mapped_region::page_size_holder<dummy>::PageSize
853    = mapped_region::page_size_holder<dummy>::get_page_size();
854 
get_page_size()855 inline std::size_t mapped_region::get_page_size()
856 {
857    if(!page_size_holder<0>::PageSize)
858       return page_size_holder<0>::get_page_size();
859    else
860       return page_size_holder<0>::PageSize;
861 }
862 
swap(mapped_region & other)863 inline void mapped_region::swap(mapped_region &other)
864 {
865    ::boost::adl_move_swap(this->m_base, other.m_base);
866    ::boost::adl_move_swap(this->m_size, other.m_size);
867    ::boost::adl_move_swap(this->m_page_offset, other.m_page_offset);
868    ::boost::adl_move_swap(this->m_mode,  other.m_mode);
869    #if defined (BOOST_INTERPROCESS_WINDOWS)
870    ::boost::adl_move_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
871    #else
872    ::boost::adl_move_swap(this->m_is_xsi, other.m_is_xsi);
873    #endif
874 }
875 
876 //!No-op functor
877 struct null_mapped_region_function
878 {
operator ()boost::interprocess::null_mapped_region_function879    bool operator()(void *, std::size_t , bool) const
880       {   return true;   }
881 
get_min_sizeboost::interprocess::null_mapped_region_function882    static std::size_t get_min_size()
883    {  return 0;  }
884 };
885 
886 #endif   //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
887 
888 }  //namespace interprocess {
889 }  //namespace boost {
890 
891 #include <boost/interprocess/detail/config_end.hpp>
892 
893 #endif   //BOOST_INTERPROCESS_MAPPED_REGION_HPP
894 
895 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
896 
897 #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
898 #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
899 
900 #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
901 #  include <boost/interprocess/sync/windows/sync_utils.hpp>
902 #  include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
903 
904 namespace boost {
905 namespace interprocess {
906 
907 template<int Dummy>
destroy_syncs_in_range(const void * addr,std::size_t size)908 inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
909 {
910    ipcdetail::sync_handles &handles =
911       ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
912    handles.destroy_syncs_in_range(addr, size);
913 }
914 
915 }  //namespace interprocess {
916 }  //namespace boost {
917 
918 #endif   //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
919 
920 #endif   //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
921 
922 #endif   //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
923 
924