/mm/kasan/ |
D | quarantine.c | 44 size_t bytes; member 57 q->bytes = 0; in qlist_init() 69 q->bytes += size; in qlist_put() 85 to->bytes += from->bytes; in qlist_move_all() 190 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { in quarantine_put() 194 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); in quarantine_put() 196 if (global_quarantine[quarantine_tail].bytes >= in quarantine_put() 251 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes); in quarantine_reduce()
|
/mm/ |
D | process_vm_access.c | 103 size_t bytes; in process_vm_rw_single_vec() local 115 bytes = pages * PAGE_SIZE - start_offset; in process_vm_rw_single_vec() 116 if (bytes > len) in process_vm_rw_single_vec() 117 bytes = len; in process_vm_rw_single_vec() 120 start_offset, bytes, iter, in process_vm_rw_single_vec() 122 len -= bytes; in process_vm_rw_single_vec()
|
D | page_counter.c | 179 u64 bytes; in page_counter_memparse() local 186 bytes = memparse(buf, &end); in page_counter_memparse() 190 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); in page_counter_memparse()
|
D | page_poison.c | 109 static void check_poison_mem(unsigned char *mem, size_t bytes) in check_poison_mem() argument 118 start = memchr_inv(mem, PAGE_POISON, bytes); in check_poison_mem() 122 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem()
|
D | bootmem.c | 56 unsigned long bytes = DIV_ROUND_UP(pages, 8); in bootmap_bytes() local 58 return ALIGN(bytes, sizeof(long)); in bootmap_bytes() 67 unsigned long bytes = bootmap_bytes(pages); in bootmem_bootmap_pages() local 69 return PAGE_ALIGN(bytes) >> PAGE_SHIFT; in bootmem_bootmap_pages()
|
D | filemap.c | 2711 unsigned long bytes; /* Bytes to write to page */ in generic_perform_write() local 2716 bytes = min_t(unsigned long, PAGE_SIZE - offset, in generic_perform_write() 2730 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { in generic_perform_write() 2740 status = a_ops->write_begin(file, mapping, pos, bytes, flags, in generic_perform_write() 2748 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write() 2751 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write() 2769 bytes = min_t(unsigned long, PAGE_SIZE - offset, in generic_perform_write()
|
D | page-writeback.c | 396 unsigned long bytes = vm_dirty_bytes; in domain_dirty_limits() local 416 if (bytes) in domain_dirty_limits() 417 ratio = min(DIV_ROUND_UP(bytes, global_avail), in domain_dirty_limits() 422 bytes = bg_bytes = 0; in domain_dirty_limits() 425 if (bytes) in domain_dirty_limits() 426 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); in domain_dirty_limits()
|
D | memory.c | 3874 int bytes, ret, offset; in __access_remote_vm() local 3896 bytes = ret; in __access_remote_vm() 3899 bytes = len; in __access_remote_vm() 3901 if (bytes > PAGE_SIZE-offset) in __access_remote_vm() 3902 bytes = PAGE_SIZE-offset; in __access_remote_vm() 3907 maddr + offset, buf, bytes); in __access_remote_vm() 3911 buf, maddr + offset, bytes); in __access_remote_vm() 3916 len -= bytes; in __access_remote_vm() 3917 buf += bytes; in __access_remote_vm() 3918 addr += bytes; in __access_remote_vm()
|
D | slab_common.c | 875 static inline int size_index_elem(size_t bytes) in size_index_elem() argument 877 return (bytes - 1) / 8; in size_index_elem()
|
D | slub.c | 715 u8 *start, unsigned int value, unsigned int bytes) in check_bytes_and_report() argument 721 fault = memchr_inv(start, value, bytes); in check_bytes_and_report() 726 end = start + bytes; in check_bytes_and_report()
|
D | Kconfig | 224 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
|