• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_memory_region.h"
18 
19 #include <fcntl.h>
20 #include <unistd.h>
21 
22 #include <android-base/unique_fd.h>
23 #include <log/log.h>
24 #include "base/bit_utils.h"  // For RoundDown, RoundUp
25 #include "base/globals.h"
26 #include "base/logging.h"  // For VLOG.
27 #include "base/membarrier.h"
28 #include "base/memfd.h"
29 #include "base/systrace.h"
30 #include "gc/allocator/art-dlmalloc.h"
31 #include "jit/jit_scoped_code_cache_write.h"
32 #include "oat/oat_quick_method_header.h"
33 #include "palette/palette.h"
34 
35 using android::base::unique_fd;
36 
37 namespace art HIDDEN {
38 namespace jit {
39 
40 // Data cache will be half of the capacity
41 // Code cache will be the other half of the capacity.
42 // TODO: Make this adjustable. Currently must be 2. JitCodeCache relies on that.
43 static constexpr size_t kCodeAndDataCapacityDivider = 2;
44 
Initialize(size_t initial_capacity,size_t max_capacity,bool rwx_memory_allowed,bool is_zygote,std::string * error_msg)45 bool JitMemoryRegion::Initialize(size_t initial_capacity,
46                                  size_t max_capacity,
47                                  bool rwx_memory_allowed,
48                                  bool is_zygote,
49                                  std::string* error_msg) {
50   ScopedTrace trace(__PRETTY_FUNCTION__);
51 
52   CHECK_GE(max_capacity, initial_capacity);
53   CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
54   // Align both capacities to page size, as that's the unit mspaces use.
55   initial_capacity_ = RoundDown(initial_capacity, 2 * gPageSize);
56   max_capacity_ = RoundDown(max_capacity, 2 * gPageSize);
57   current_capacity_ = initial_capacity,
58   data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
59   exec_end_ = initial_capacity - data_end_;
60 
61   const size_t capacity = max_capacity_;
62   const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
63   const size_t exec_capacity = capacity - data_capacity;
64 
65   // File descriptor enabling dual-view mapping of code section.
66   unique_fd mem_fd;
67 
68 
69   // The memory mappings we are going to create.
70   MemMap data_pages;
71   MemMap exec_pages;
72   MemMap non_exec_pages;
73   MemMap writable_data_pages;
74 
75   if (is_zygote) {
76     android_errorWriteLog(0x534e4554, "200284993");  // Report to SafetyNet.
77     // Because we are not going to GC code generated by the zygote, just use all available.
78     current_capacity_ = max_capacity;
79     mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg));
80     if (mem_fd.get() < 0) {
81       return false;
82     }
83   } else {
84     // Bionic supports memfd_create, but the call may fail on older kernels.
85     mem_fd = unique_fd(art::memfd_create("jit-cache", /* flags= */ 0));
86     if (mem_fd.get() < 0) {
87       std::ostringstream oss;
88       oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
89       if (!rwx_memory_allowed) {
90         // Without using RWX page permissions, the JIT can not fallback to single mapping as it
91         // requires tranitioning the code pages to RWX for updates.
92         *error_msg = oss.str();
93         return false;
94       }
95       VLOG(jit) << oss.str();
96     } else if (ftruncate(mem_fd, capacity) != 0) {
97       std::ostringstream oss;
98       oss << "Failed to initialize memory file: " << strerror(errno);
99       *error_msg = oss.str();
100       return false;
101     }
102   }
103 
104   // Map name specific for android_os_Debug.cpp accounting.
105   std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
106   std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
107 
108   std::string error_str;
109   int base_flags;
110   if (mem_fd.get() >= 0) {
111     // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
112     // for data and non-writable view of JIT code pages. We use the memory file descriptor to
113     // enable dual mapping - we'll create a second mapping using the descriptor below. The
114     // mappings will look like:
115     //
116     //       VA                  PA
117     //
118     //       +---------------+
119     //       | non exec code |\
120     //       +---------------+ \
121     //       | writable data |\ \
122     //       +---------------+ \ \
123     //       :               :\ \ \
124     //       +---------------+.\.\.+---------------+
125     //       |  exec code    |  \ \|     code      |
126     //       +---------------+...\.+---------------+
127     //       | readonly data |    \|     data      |
128     //       +---------------+.....+---------------+
129     //
130     // In this configuration code updates are written to the non-executable view of the code
131     // cache, and the executable view of the code cache has fixed RX memory protections.
132     //
133     // This memory needs to be mapped shared as the code portions will have two mappings.
134     //
135     // Additionally, the zyzote will create a dual view of the data portion of
136     // the cache. This mapping will be read-only, whereas the second mapping
137     // will be writable.
138 
139     base_flags = MAP_SHARED;
140 
141     // Create the writable mappings now, so that in case of the zygote, we can
142     // prevent any future writable mappings through sealing.
143     if (exec_capacity > 0) {
144       // For dual view, create the secondary view of code memory used for updating code. This view
145       // is never executable.
146       std::string name = exec_cache_name + "-rw";
147       non_exec_pages = MemMap::MapFile(exec_capacity,
148                                        kIsDebugBuild ? kProtR : kProtRW,
149                                        base_flags,
150                                        mem_fd,
151                                        /* start= */ data_capacity,
152                                        /* low_4GB= */ false,
153                                        name.c_str(),
154                                        &error_str);
155       if (!non_exec_pages.IsValid()) {
156         // This is unexpected.
157         *error_msg = "Failed to map non-executable view of JIT code cache";
158         return false;
159       }
160       // Create a dual view of the data cache.
161       name = data_cache_name + "-rw";
162       writable_data_pages = MemMap::MapFile(data_capacity,
163                                             kProtRW,
164                                             base_flags,
165                                             mem_fd,
166                                             /* start= */ 0,
167                                             /* low_4GB= */ false,
168                                             name.c_str(),
169                                             &error_str);
170       if (!writable_data_pages.IsValid()) {
171         std::ostringstream oss;
172         oss << "Failed to create dual data view: " << error_str;
173         *error_msg = oss.str();
174         return false;
175       }
176       if (writable_data_pages.MadviseDontFork() != 0) {
177         *error_msg = "Failed to MadviseDontFork the writable data view";
178         return false;
179       }
180       if (non_exec_pages.MadviseDontFork() != 0) {
181         *error_msg = "Failed to MadviseDontFork the writable code view";
182         return false;
183       }
184       // Now that we have created the writable and executable mappings, prevent creating any new
185       // ones.
186       if (is_zygote && !ProtectZygoteMemory(mem_fd.get(), error_msg)) {
187         return false;
188       }
189     }
190 
191     // Map in low 4gb to simplify accessing root tables for x86_64.
192     // We could do PC-relative addressing to avoid this problem, but that
193     // would require reserving code and data area before submitting, which
194     // means more windows for the code memory to be RWX.
195     data_pages = MemMap::MapFile(
196         data_capacity + exec_capacity,
197         kProtR,
198         base_flags,
199         mem_fd,
200         /* start= */ 0,
201         /* low_4gb= */ true,
202         data_cache_name.c_str(),
203         &error_str);
204   } else {
205     // Single view of JIT code cache case. Create an initial mapping of data pages large enough
206     // for data and JIT code pages. The mappings will look like:
207     //
208     //       VA                  PA
209     //
210     //       +---------------+...+---------------+
211     //       |  exec code    |   |     code      |
212     //       +---------------+...+---------------+
213     //       |      data     |   |     data      |
214     //       +---------------+...+---------------+
215     //
216     // In this configuration code updates are written to the executable view of the code cache,
217     // and the executable view of the code cache transitions RX to RWX for the update and then
218     // back to RX after the update.
219     base_flags = MAP_PRIVATE | MAP_ANON;
220     data_pages = MemMap::MapAnonymous(
221         data_cache_name.c_str(),
222         data_capacity + exec_capacity,
223         kProtRW,
224         /* low_4gb= */ true,
225         &error_str);
226   }
227 
228   if (!data_pages.IsValid()) {
229     std::ostringstream oss;
230     oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
231     *error_msg = oss.str();
232     return false;
233   }
234 
235   if (exec_capacity > 0) {
236     uint8_t* const divider = data_pages.Begin() + data_capacity;
237     // Set initial permission for executable view to catch any SELinux permission problems early
238     // (for processes that cannot map WX pages). Otherwise, this region does not need to be
239     // executable as there is no code in the cache yet.
240     exec_pages = data_pages.RemapAtEnd(divider,
241                                        exec_cache_name.c_str(),
242                                        kProtRX,
243                                        base_flags | MAP_FIXED,
244                                        mem_fd.get(),
245                                        (mem_fd.get() >= 0) ? data_capacity : 0,
246                                        &error_str);
247     if (!exec_pages.IsValid()) {
248       std::ostringstream oss;
249       oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
250       *error_msg = oss.str();
251       return false;
252     }
253   } else {
254     // Profiling only. No memory for code required.
255   }
256 
257   data_pages_ = std::move(data_pages);
258   exec_pages_ = std::move(exec_pages);
259   non_exec_pages_ = std::move(non_exec_pages);
260   writable_data_pages_ = std::move(writable_data_pages);
261 
262   VLOG(jit) << "Created JitMemoryRegion"
263             << ": data_pages=" << reinterpret_cast<void*>(data_pages_.Begin())
264             << ", exec_pages=" << reinterpret_cast<void*>(exec_pages_.Begin())
265             << ", non_exec_pages=" << reinterpret_cast<void*>(non_exec_pages_.Begin())
266             << ", writable_data_pages=" << reinterpret_cast<void*>(writable_data_pages_.Begin());
267 
268   // Now that the pages are initialized, initialize the spaces.
269 
270   // Initialize the data heap.
271   data_mspace_ = create_mspace_with_base(
272       HasDualDataMapping() ? writable_data_pages_.Begin() : data_pages_.Begin(),
273       data_end_,
274       /* locked= */ false);
275   CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
276 
277   // Allow mspace to use the full data capacity.
278   // It will still only use as litle memory as possible and ask for MoreCore as needed.
279   CHECK(IsAlignedParam(data_capacity, gPageSize));
280   mspace_set_footprint_limit(data_mspace_, data_capacity);
281 
282   // Initialize the code heap.
283   MemMap* code_heap = nullptr;
284   if (non_exec_pages_.IsValid()) {
285     code_heap = &non_exec_pages_;
286   } else if (exec_pages_.IsValid()) {
287     code_heap = &exec_pages_;
288   }
289   if (code_heap != nullptr) {
290     // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
291     // heap, will take and initialize pages in create_mspace_with_base().
292     {
293       ScopedCodeCacheWrite scc(*this);
294       exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
295     }
296     CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
297     SetFootprintLimit(current_capacity_);
298   } else {
299     exec_mspace_ = nullptr;
300     SetFootprintLimit(current_capacity_);
301   }
302   return true;
303 }
304 
SetFootprintLimit(size_t new_footprint)305 void JitMemoryRegion::SetFootprintLimit(size_t new_footprint) {
306   size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
307   DCHECK(IsAlignedParam(data_space_footprint, gPageSize));
308   DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
309   if (HasCodeMapping()) {
310     ScopedCodeCacheWrite scc(*this);
311     mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
312   }
313 }
314 
IncreaseCodeCacheCapacity()315 bool JitMemoryRegion::IncreaseCodeCacheCapacity() {
316   if (current_capacity_ == max_capacity_) {
317     return false;
318   }
319 
320   // Double the capacity if we're below 1MB, or increase it by 1MB if
321   // we're above.
322   if (current_capacity_ < 1 * MB) {
323     current_capacity_ *= 2;
324   } else {
325     current_capacity_ += 1 * MB;
326   }
327   if (current_capacity_ > max_capacity_) {
328     current_capacity_ = max_capacity_;
329   }
330 
331   VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
332 
333   SetFootprintLimit(current_capacity_);
334 
335   return true;
336 }
337 
338 // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
339 // is already held.
MoreCore(const void * mspace,intptr_t increment)340 void* JitMemoryRegion::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
341   if (mspace == exec_mspace_) {
342     CHECK(exec_mspace_ != nullptr);
343     const MemMap* const code_pages = GetUpdatableCodeMapping();
344     void* result = code_pages->Begin() + exec_end_;
345     exec_end_ += increment;
346     return result;
347   } else {
348     CHECK_EQ(data_mspace_, mspace);
349     const MemMap* const writable_data_pages = GetWritableDataMapping();
350     void* result = writable_data_pages->Begin() + data_end_;
351     data_end_ += increment;
352     return result;
353   }
354 }
355 
CommitCode(ArrayRef<const uint8_t> reserved_code,ArrayRef<const uint8_t> code,const uint8_t * stack_map)356 const uint8_t* JitMemoryRegion::CommitCode(ArrayRef<const uint8_t> reserved_code,
357                                            ArrayRef<const uint8_t> code,
358                                            const uint8_t* stack_map) {
359   DCHECK(IsInExecSpace(reserved_code.data()));
360   ScopedCodeCacheWrite scc(*this);
361 
362   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
363   size_t header_size = OatQuickMethodHeader::InstructionAlignedSize();
364   size_t total_size = header_size + code.size();
365 
366   // Each allocation should be on its own set of cache lines.
367   // `total_size` covers the OatQuickMethodHeader, the JIT generated machine code,
368   // and any alignment padding.
369   DCHECK_GT(total_size, header_size);
370   DCHECK_LE(total_size, reserved_code.size());
371   uint8_t* x_memory = const_cast<uint8_t*>(reserved_code.data());
372   uint8_t* w_memory = const_cast<uint8_t*>(GetNonExecutableAddress(x_memory));
373   // Ensure the header ends up at expected instruction alignment.
374   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(w_memory + header_size), alignment);
375   const uint8_t* result = x_memory + header_size;
376 
377   // Write the code.
378   std::copy(code.begin(), code.end(), w_memory + header_size);
379 
380   // Write the header.
381   OatQuickMethodHeader* method_header =
382       OatQuickMethodHeader::FromCodePointer(w_memory + header_size);
383   new (method_header) OatQuickMethodHeader((stack_map != nullptr) ? result - stack_map : 0u);
384 
385   // Both instruction and data caches need flushing to the point of unification where both share
386   // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
387   // newly added code are written out to the point of unification. Flushing the instruction
388   // cache ensures the newly written code will be fetched from the point of unification before
389   // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
390   // prevent stale code from residing in the instruction cache.
391   //
392   // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
393   // may trigger a segfault if a page fault occurs when requesting a cache maintenance
394   // operation. This is a kernel bug that we need to work around until affected devices
395   // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
396   //
397   // For reference, this behavior is caused by this commit:
398   // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
399   //
400   bool cache_flush_success = true;
401   if (HasDualCodeMapping()) {
402     // Flush d-cache for the non-executable mapping.
403     cache_flush_success = FlushCpuCaches(w_memory, w_memory + total_size);
404   }
405 
406   // Invalidate i-cache for the executable mapping.
407   if (cache_flush_success) {
408     cache_flush_success = FlushCpuCaches(x_memory, x_memory + total_size);
409   }
410 
411   // If flushing the cache has failed, reject the allocation because we can't guarantee
412   // correctness of the instructions present in the processor caches.
413   if (!cache_flush_success) {
414     PLOG(ERROR) << "Cache flush failed triggering code allocation failure";
415     return nullptr;
416   }
417 
418   // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
419   // correctness as code may still be in instruction pipelines despite the i-cache flush. It is
420   // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB
421   // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
422   // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
423   // hardware support that broadcasts TLB invalidations and so their kernels have no software
424   // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
425   // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
426   // platforms lacking the appropriate support.
427   art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
428 
429   return result;
430 }
431 
FillRootTable(uint8_t * roots_data,const std::vector<Handle<mirror::Object>> & roots)432 static void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
433     REQUIRES(Locks::jit_lock_)
434     REQUIRES_SHARED(Locks::mutator_lock_) {
435   GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
436   const uint32_t length = roots.size();
437   // Put all roots in `roots_data`.
438   for (uint32_t i = 0; i < length; ++i) {
439     ObjPtr<mirror::Object> object = roots[i].Get();
440     gc_roots[i] = GcRoot<mirror::Object>(object);
441   }
442   // Store the length of the table at the end. This will allow fetching it from a stack_map
443   // pointer.
444   reinterpret_cast<uint32_t*>(roots_data)[length] = length;
445 }
446 
CommitData(ArrayRef<const uint8_t> reserved_data,const std::vector<Handle<mirror::Object>> & roots,ArrayRef<const uint8_t> stack_map)447 bool JitMemoryRegion::CommitData(ArrayRef<const uint8_t> reserved_data,
448                                  const std::vector<Handle<mirror::Object>>& roots,
449                                  ArrayRef<const uint8_t> stack_map) {
450   DCHECK(IsInDataSpace(reserved_data.data()));
451   uint8_t* roots_data = GetWritableDataAddress(reserved_data.data());
452   size_t root_table_size = ComputeRootTableSize(roots.size());
453   uint8_t* stack_map_data = roots_data + root_table_size;
454   DCHECK_LE(root_table_size + stack_map.size(), reserved_data.size());
455   FillRootTable(roots_data, roots);
456   memcpy(stack_map_data, stack_map.data(), stack_map.size());
457   // Flush data cache, as compiled code references literals in it.
458   // TODO(oth): establish whether this is necessary.
459   if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map.size()))) {
460     VLOG(jit) << "Failed to flush data in CommitData";
461     return false;
462   }
463   return true;
464 }
465 
AllocateCode(size_t size)466 const uint8_t* JitMemoryRegion::AllocateCode(size_t size) {
467   size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA);
468   void* result = mspace_memalign(exec_mspace_, alignment, size);
469   if (UNLIKELY(result == nullptr)) {
470     return nullptr;
471   }
472   used_memory_for_code_ += mspace_usable_size(result);
473   return reinterpret_cast<uint8_t*>(GetExecutableAddress(result));
474 }
475 
FreeCode(const uint8_t * code)476 void JitMemoryRegion::FreeCode(const uint8_t* code) {
477   code = GetNonExecutableAddress(code);
478   used_memory_for_code_ -= mspace_usable_size(code);
479   mspace_free(exec_mspace_, const_cast<uint8_t*>(code));
480 }
481 
AllocateData(size_t data_size)482 const uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
483   void* result = mspace_malloc(data_mspace_, data_size);
484   if (UNLIKELY(result == nullptr)) {
485     return nullptr;
486   }
487   used_memory_for_data_ += mspace_usable_size(result);
488   return reinterpret_cast<uint8_t*>(GetNonWritableDataAddress(result));
489 }
490 
FreeData(const uint8_t * data)491 void JitMemoryRegion::FreeData(const uint8_t* data) {
492   FreeWritableData(GetWritableDataAddress(data));
493 }
494 
FreeWritableData(uint8_t * writable_data)495 void JitMemoryRegion::FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) {
496   used_memory_for_data_ -= mspace_usable_size(writable_data);
497   mspace_free(data_mspace_, writable_data);
498 }
499 
500 #if defined(__BIONIC__) && defined(ART_TARGET)
501 // The code below only works on bionic on target.
502 
CreateZygoteMemory(size_t capacity,std::string * error_msg)503 int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
504   if (CacheOperationsMaySegFault()) {
505     // Zygote JIT requires dual code mappings by design. We can only do this if the cache flush
506     // and invalidate instructions work without raising faults.
507     *error_msg = "Zygote memory only works with dual mappings";
508     return -1;
509   }
510   /* Check if kernel support exists, otherwise fall back to ashmem */
511   static const char* kRegionName = "jit-zygote-cache";
512   if (art::IsSealFutureWriteSupported()) {
513     int fd = art::memfd_create(kRegionName, MFD_ALLOW_SEALING);
514     if (fd == -1) {
515       std::ostringstream oss;
516       oss << "Failed to create zygote mapping: " << strerror(errno);
517       *error_msg = oss.str();
518       return -1;
519     }
520 
521     if (ftruncate(fd, capacity) != 0) {
522       std::ostringstream oss;
523       oss << "Failed to create zygote mapping: " << strerror(errno);
524       *error_msg = oss.str();
525       return -1;
526     }
527 
528     return fd;
529   }
530 
531   LOG(INFO) << "Falling back to ashmem implementation for JIT zygote mapping";
532 
533   int fd;
534   palette_status_t status = PaletteAshmemCreateRegion(kRegionName, capacity, &fd);
535   if (status != PALETTE_STATUS_OK) {
536     CHECK_EQ(status, PALETTE_STATUS_CHECK_ERRNO);
537     std::ostringstream oss;
538     oss << "Failed to create zygote mapping: " << strerror(errno);
539     *error_msg = oss.str();
540     return -1;
541   }
542   return fd;
543 }
544 
ProtectZygoteMemory(int fd,std::string * error_msg)545 bool JitMemoryRegion::ProtectZygoteMemory(int fd, std::string* error_msg) {
546   if (art::IsSealFutureWriteSupported()) {
547     if (fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL | F_SEAL_FUTURE_WRITE)
548             == -1) {
549       std::ostringstream oss;
550       oss << "Failed to protect zygote mapping: " << strerror(errno);
551       *error_msg = oss.str();
552       return false;
553     }
554   } else {
555     palette_status_t status = PaletteAshmemSetProtRegion(fd, PROT_READ | PROT_EXEC);
556     if (status != PALETTE_STATUS_OK) {
557       CHECK_EQ(status, PALETTE_STATUS_CHECK_ERRNO);
558       std::ostringstream oss;
559       oss << "Failed to protect zygote mapping: " << strerror(errno);
560       *error_msg = oss.str();
561       return false;
562     }
563   }
564   return true;
565 }
566 
567 #else
568 
CreateZygoteMemory(size_t capacity,std::string * error_msg)569 int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
570   // To simplify host building, we don't rely on the latest memfd features.
571   LOG(WARNING) << "Returning un-sealable region on non-bionic";
572   static const char* kRegionName = "/jit-zygote-cache";
573   int fd = art::memfd_create(kRegionName, 0);
574   if (fd == -1) {
575     std::ostringstream oss;
576     oss << "Failed to create zygote mapping: " << strerror(errno);
577     *error_msg = oss.str();
578     return -1;
579   }
580   if (ftruncate(fd, capacity) != 0) {
581     std::ostringstream oss;
582     oss << "Failed to create zygote mapping: " << strerror(errno);
583     *error_msg = oss.str();
584     return -1;
585   }
586   return fd;
587 }
588 
ProtectZygoteMemory(int fd,std::string * error_msg)589 bool JitMemoryRegion::ProtectZygoteMemory([[maybe_unused]] int fd,
590                                           [[maybe_unused]] std::string* error_msg) {
591   return true;
592 }
593 
594 #endif
595 
596 }  // namespace jit
597 }  // namespace art
598