1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 #include <dlfcn.h>
21
MachVMMemory()22 MachVMMemory::MachVMMemory() :
23 m_page_size (kInvalidPageSize),
24 m_err (0)
25 {
26 }
27
~MachVMMemory()28 MachVMMemory::~MachVMMemory()
29 {
30 }
31
32 nub_size_t
PageSize(task_t task)33 MachVMMemory::PageSize(task_t task)
34 {
35 if (m_page_size == kInvalidPageSize)
36 {
37 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
38 if (task != TASK_NULL)
39 {
40 kern_return_t kr;
41 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
42 task_vm_info_data_t vm_info;
43 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
44 if (kr == KERN_SUCCESS)
45 {
46 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
47 m_page_size = vm_info.page_size;
48 return m_page_size;
49 }
50 else
51 {
52 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53 }
54 }
55 #endif
56 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
57 if (m_err.Fail())
58 m_page_size = 0;
59 }
60 return m_page_size;
61 }
62
63 nub_size_t
MaxBytesLeftInPage(task_t task,nub_addr_t addr,nub_size_t count)64 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
65 {
66 const nub_size_t page_size = PageSize(task);
67 if (page_size > 0)
68 {
69 nub_size_t page_offset = (addr % page_size);
70 nub_size_t bytes_left_in_page = page_size - page_offset;
71 if (count > bytes_left_in_page)
72 count = bytes_left_in_page;
73 }
74 return count;
75 }
76
77 nub_bool_t
GetMemoryRegionInfo(task_t task,nub_addr_t address,DNBRegionInfo * region_info)78 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
79 {
80 MachVMRegion vmRegion(task);
81
82 if (vmRegion.GetRegionForAddress(address))
83 {
84 region_info->addr = vmRegion.StartAddress();
85 region_info->size = vmRegion.GetByteSize();
86 region_info->permissions = vmRegion.GetDNBPermissions();
87 }
88 else
89 {
90 region_info->addr = address;
91 region_info->size = 0;
92 if (vmRegion.GetError().Success())
93 {
94 // vmRegion.GetRegionForAddress() return false, indicating that "address"
95 // wasn't in a valid region, but the "vmRegion" info was successfully
96 // read from the task which means the info describes the next valid
97 // region from which we can infer the size of this invalid region
98 mach_vm_address_t start_addr = vmRegion.StartAddress();
99 if (address < start_addr)
100 region_info->size = start_addr - address;
101 }
102 // If we can't get any infor about the size from the next region, just fill
103 // 1 in as the byte size
104 if (region_info->size == 0)
105 region_info->size = 1;
106
107 // Not readable, writeable or executable
108 region_info->permissions = 0;
109 }
110 return true;
111 }
112
113 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
114 uint64_t
GetStolenPages(task_t task)115 MachVMMemory::GetStolenPages(task_t task)
116 {
117 static uint64_t stolenPages = 0;
118 static bool calculated = false;
119 if (calculated) return stolenPages;
120
121 static int mib_reserved[CTL_MAXNAME];
122 static int mib_unusable[CTL_MAXNAME];
123 static int mib_other[CTL_MAXNAME];
124 static size_t mib_reserved_len = 0;
125 static size_t mib_unusable_len = 0;
126 static size_t mib_other_len = 0;
127 int r;
128
129 /* This can be used for testing: */
130 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
131
132 if(0 == mib_reserved_len)
133 {
134 mib_reserved_len = CTL_MAXNAME;
135
136 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
137 &mib_reserved_len);
138
139 if(-1 == r)
140 {
141 mib_reserved_len = 0;
142 return 0;
143 }
144
145 mib_unusable_len = CTL_MAXNAME;
146
147 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
148 &mib_unusable_len);
149
150 if(-1 == r)
151 {
152 mib_reserved_len = 0;
153 return 0;
154 }
155
156
157 mib_other_len = CTL_MAXNAME;
158
159 r = sysctlnametomib("machdep.memmap.Other", mib_other,
160 &mib_other_len);
161
162 if(-1 == r)
163 {
164 mib_reserved_len = 0;
165 return 0;
166 }
167 }
168
169 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
170 {
171 uint64_t reserved = 0, unusable = 0, other = 0;
172 size_t reserved_len;
173 size_t unusable_len;
174 size_t other_len;
175
176 reserved_len = sizeof(reserved);
177 unusable_len = sizeof(unusable);
178 other_len = sizeof(other);
179
180 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
181
182 if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
183 &reserved_len, NULL, 0))
184 {
185 return 0;
186 }
187
188 if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
189 &unusable_len, NULL, 0))
190 {
191 return 0;
192 }
193
194 if(-1 == sysctl(mib_other, mib_other_len, &other,
195 &other_len, NULL, 0))
196 {
197 return 0;
198 }
199
200 if(reserved_len == sizeof(reserved)
201 && unusable_len == sizeof(unusable)
202 && other_len == sizeof(other))
203 {
204 uint64_t stolen = reserved + unusable + other;
205 uint64_t mb128 = 128 * 1024 * 1024ULL;
206
207 if(stolen >= mb128)
208 {
209 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
210 stolenPages = stolen / PageSize (task);
211 }
212 }
213 }
214
215 calculated = true;
216 return stolenPages;
217 }
218
GetPhysicalMemory()219 static uint64_t GetPhysicalMemory()
220 {
221 // This doesn't change often at all. No need to poll each time.
222 static uint64_t physical_memory = 0;
223 static bool calculated = false;
224 if (calculated) return physical_memory;
225
226 int mib[2];
227 mib[0] = CTL_HW;
228 mib[1] = HW_MEMSIZE;
229 size_t len = sizeof(physical_memory);
230 sysctl(mib, 2, &physical_memory, &len, NULL, 0);
231 return physical_memory;
232 }
233
234 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
235 void
GetRegionSizes(task_t task,mach_vm_size_t & rsize,mach_vm_size_t & dirty_size)236 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
237 {
238 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
239
240 task_vm_info_data_t vm_info;
241 mach_msg_type_number_t info_count;
242 kern_return_t kr;
243
244 info_count = TASK_VM_INFO_COUNT;
245 #ifdef TASK_VM_INFO_PURGEABLE
246 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
247 #else
248 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
249 #endif
250 if (kr == KERN_SUCCESS)
251 dirty_size = vm_info.internal;
252
253 #else
254 mach_vm_address_t address = 0;
255 mach_vm_size_t size;
256 kern_return_t err = 0;
257 unsigned nestingDepth = 0;
258 mach_vm_size_t pages_resident = 0;
259 mach_vm_size_t pages_dirtied = 0;
260
261 while (1)
262 {
263 mach_msg_type_number_t count;
264 struct vm_region_submap_info_64 info;
265
266 count = VM_REGION_SUBMAP_INFO_COUNT_64;
267 err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
268 if (err == KERN_INVALID_ADDRESS)
269 {
270 // It seems like this is a good break too.
271 break;
272 }
273 else if (err)
274 {
275 mach_error("vm_region",err);
276 break; // reached last region
277 }
278
279 bool should_count = true;
280 if (info.is_submap)
281 { // is it a submap?
282 nestingDepth++;
283 should_count = false;
284 }
285 else
286 {
287 // Don't count malloc stack logging data in the TOTAL VM usage lines.
288 if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
289 should_count = false;
290
291 address = address+size;
292 }
293
294 if (should_count)
295 {
296 pages_resident += info.pages_resident;
297 pages_dirtied += info.pages_dirtied;
298 }
299 }
300
301 vm_size_t pagesize = PageSize (task);
302 rsize = pages_resident * pagesize;
303 dirty_size = pages_dirtied * pagesize;
304
305 #endif
306 }
307
308 // Test whether the virtual address is within the architecture's shared region.
InSharedRegion(mach_vm_address_t addr,cpu_type_t type)309 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
310 {
311 mach_vm_address_t base = 0, size = 0;
312
313 switch(type) {
314 case CPU_TYPE_ARM:
315 base = SHARED_REGION_BASE_ARM;
316 size = SHARED_REGION_SIZE_ARM;
317 break;
318
319 case CPU_TYPE_X86_64:
320 base = SHARED_REGION_BASE_X86_64;
321 size = SHARED_REGION_SIZE_X86_64;
322 break;
323
324 case CPU_TYPE_I386:
325 base = SHARED_REGION_BASE_I386;
326 size = SHARED_REGION_SIZE_I386;
327 break;
328
329 default: {
330 // Log error abut unknown CPU type
331 break;
332 }
333 }
334
335
336 return(addr >= base && addr < (base + size));
337 }
338
339 void
GetMemorySizes(task_t task,cpu_type_t cputype,nub_process_t pid,mach_vm_size_t & rprvt,mach_vm_size_t & vprvt)340 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
341 {
342 // Collecting some other info cheaply but not reporting for now.
343 mach_vm_size_t empty = 0;
344 mach_vm_size_t fw_private = 0;
345
346 mach_vm_size_t aliased = 0;
347 bool global_shared_text_data_mapped = false;
348 vm_size_t pagesize = PageSize (task);
349
350 for (mach_vm_address_t addr=0, size=0; ; addr += size)
351 {
352 vm_region_top_info_data_t info;
353 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
354 mach_port_t object_name;
355
356 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
357 if (kr != KERN_SUCCESS) break;
358
359 if (InSharedRegion(addr, cputype))
360 {
361 // Private Shared
362 fw_private += info.private_pages_resident * pagesize;
363
364 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
365 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
366 vm_region_basic_info_data_64_t b_info;
367 mach_vm_address_t b_addr = addr;
368 mach_vm_size_t b_size = size;
369 count = VM_REGION_BASIC_INFO_COUNT_64;
370
371 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
372 if (kr != KERN_SUCCESS) break;
373
374 if (b_info.reserved) {
375 global_shared_text_data_mapped = TRUE;
376 }
377 }
378
379 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
380 if (info.share_mode != SM_PRIVATE)
381 {
382 continue;
383 }
384 }
385
386 // Update counters according to the region type.
387 if (info.share_mode == SM_COW && info.ref_count == 1)
388 {
389 // Treat single reference SM_COW as SM_PRIVATE
390 info.share_mode = SM_PRIVATE;
391 }
392
393 switch (info.share_mode)
394 {
395 case SM_LARGE_PAGE:
396 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
397 // since they are not shareable and are wired.
398 case SM_PRIVATE:
399 rprvt += info.private_pages_resident * pagesize;
400 rprvt += info.shared_pages_resident * pagesize;
401 vprvt += size;
402 break;
403
404 case SM_EMPTY:
405 empty += size;
406 break;
407
408 case SM_COW:
409 case SM_SHARED:
410 {
411 if (pid == 0)
412 {
413 // Treat kernel_task specially
414 if (info.share_mode == SM_COW)
415 {
416 rprvt += info.private_pages_resident * pagesize;
417 vprvt += size;
418 }
419 break;
420 }
421
422 if (info.share_mode == SM_COW)
423 {
424 rprvt += info.private_pages_resident * pagesize;
425 vprvt += info.private_pages_resident * pagesize;
426 }
427 break;
428 }
429 default:
430 // log that something is really bad.
431 break;
432 }
433 }
434
435 rprvt += aliased;
436 }
437
438 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
439 #ifndef TASK_VM_INFO_PURGEABLE
440 // cribbed from sysmond
441 static uint64_t
SumVMPurgeableInfo(const vm_purgeable_info_t info)442 SumVMPurgeableInfo(const vm_purgeable_info_t info)
443 {
444 uint64_t sum = 0;
445 int i;
446
447 for (i = 0; i < 8; i++)
448 {
449 sum += info->fifo_data[i].size;
450 }
451 sum += info->obsolete_data.size;
452 for (i = 0; i < 8; i++)
453 {
454 sum += info->lifo_data[i].size;
455 }
456
457 return sum;
458 }
459 #endif /* !TASK_VM_INFO_PURGEABLE */
460 #endif
461
462 static void
GetPurgeableAndAnonymous(task_t task,uint64_t & purgeable,uint64_t & anonymous)463 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
464 {
465 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
466
467 kern_return_t kr;
468 #ifndef TASK_VM_INFO_PURGEABLE
469 task_purgable_info_t purgeable_info;
470 uint64_t purgeable_sum = 0;
471 #endif /* !TASK_VM_INFO_PURGEABLE */
472 mach_msg_type_number_t info_count;
473 task_vm_info_data_t vm_info;
474
475 #ifndef TASK_VM_INFO_PURGEABLE
476 typedef kern_return_t (*task_purgable_info_type) (task_t, task_purgable_info_t *);
477 task_purgable_info_type task_purgable_info_ptr = NULL;
478 task_purgable_info_ptr = (task_purgable_info_type)dlsym(RTLD_NEXT, "task_purgable_info");
479 if (task_purgable_info_ptr != NULL)
480 {
481 kr = (*task_purgable_info_ptr)(task, &purgeable_info);
482 if (kr == KERN_SUCCESS) {
483 purgeable_sum = SumVMPurgeableInfo(&purgeable_info);
484 purgeable = purgeable_sum;
485 }
486 }
487 #endif /* !TASK_VM_INFO_PURGEABLE */
488
489 info_count = TASK_VM_INFO_COUNT;
490 #ifdef TASK_VM_INFO_PURGEABLE
491 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
492 #else
493 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
494 #endif
495 if (kr == KERN_SUCCESS)
496 {
497 #ifdef TASK_VM_INFO_PURGEABLE
498 purgeable = vm_info.purgeable_volatile_resident;
499 anonymous = vm_info.internal - vm_info.purgeable_volatile_pmap;
500 #else
501 if (purgeable_sum < vm_info.internal)
502 {
503 anonymous = vm_info.internal - purgeable_sum;
504 }
505 else
506 {
507 anonymous = 0;
508 }
509 #endif
510 }
511
512 #endif
513 }
514
515 nub_bool_t
GetMemoryProfile(DNBProfileDataScanType scanType,task_t task,struct task_basic_info ti,cpu_type_t cputype,nub_process_t pid,vm_statistics_data_t & vm_stats,uint64_t & physical_memory,mach_vm_size_t & rprvt,mach_vm_size_t & rsize,mach_vm_size_t & vprvt,mach_vm_size_t & vsize,mach_vm_size_t & dirty_size,mach_vm_size_t & purgeable,mach_vm_size_t & anonymous)516 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
517 {
518 if (scanType & eProfileHostMemory)
519 physical_memory = GetPhysicalMemory();
520
521 if (scanType & eProfileMemory)
522 {
523 static mach_port_t localHost = mach_host_self();
524 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
525 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
526 vm_stats.wire_count += GetStolenPages(task);
527
528 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
529
530 rsize = ti.resident_size;
531 vsize = ti.virtual_size;
532
533 if (scanType & eProfileMemoryDirtyPage)
534 {
535 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
536 GetRegionSizes(task, rsize, dirty_size);
537 }
538
539 if (scanType & eProfileMemoryAnonymous)
540 {
541 GetPurgeableAndAnonymous(task, purgeable, anonymous);
542 }
543 }
544
545 return true;
546 }
547
548 nub_size_t
Read(task_t task,nub_addr_t address,void * data,nub_size_t data_count)549 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
550 {
551 if (data == NULL || data_count == 0)
552 return 0;
553
554 nub_size_t total_bytes_read = 0;
555 nub_addr_t curr_addr = address;
556 uint8_t *curr_data = (uint8_t*)data;
557 while (total_bytes_read < data_count)
558 {
559 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
560 mach_msg_type_number_t curr_bytes_read = 0;
561 vm_offset_t vm_memory = NULL;
562 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
563
564 if (DNBLogCheckLogBit(LOG_MEMORY))
565 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
566
567 if (m_err.Success())
568 {
569 if (curr_bytes_read != curr_size)
570 {
571 if (DNBLogCheckLogBit(LOG_MEMORY))
572 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
573 }
574 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
575 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
576 total_bytes_read += curr_bytes_read;
577 curr_addr += curr_bytes_read;
578 curr_data += curr_bytes_read;
579 }
580 else
581 {
582 break;
583 }
584 }
585 return total_bytes_read;
586 }
587
588
589 nub_size_t
Write(task_t task,nub_addr_t address,const void * data,nub_size_t data_count)590 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
591 {
592 MachVMRegion vmRegion(task);
593
594 nub_size_t total_bytes_written = 0;
595 nub_addr_t curr_addr = address;
596 const uint8_t *curr_data = (const uint8_t*)data;
597
598
599 while (total_bytes_written < data_count)
600 {
601 if (vmRegion.GetRegionForAddress(curr_addr))
602 {
603 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
604 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
605 if (region_bytes_left == 0)
606 {
607 break;
608 }
609 if (curr_data_count > region_bytes_left)
610 curr_data_count = region_bytes_left;
611
612 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
613 {
614 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
615 if (bytes_written <= 0)
616 {
617 // Error should have already be posted by WriteRegion...
618 break;
619 }
620 else
621 {
622 total_bytes_written += bytes_written;
623 curr_addr += bytes_written;
624 curr_data += bytes_written;
625 }
626 }
627 else
628 {
629 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
630 break;
631 }
632 }
633 else
634 {
635 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
636 break;
637 }
638 }
639
640 return total_bytes_written;
641 }
642
643
644 nub_size_t
WriteRegion(task_t task,const nub_addr_t address,const void * data,const nub_size_t data_count)645 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
646 {
647 if (data == NULL || data_count == 0)
648 return 0;
649
650 nub_size_t total_bytes_written = 0;
651 nub_addr_t curr_addr = address;
652 const uint8_t *curr_data = (const uint8_t*)data;
653 while (total_bytes_written < data_count)
654 {
655 mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
656 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
657 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
658 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
659
660 #if !defined (__i386__) && !defined (__x86_64__)
661 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
662
663 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
664 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
665 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
666 #endif
667
668 if (m_err.Success())
669 {
670 total_bytes_written += curr_data_count;
671 curr_addr += curr_data_count;
672 curr_data += curr_data_count;
673 }
674 else
675 {
676 break;
677 }
678 }
679 return total_bytes_written;
680 }
681