Home
last modified time | relevance | path

Searched refs:off (Results 1 – 10 of 10) sorted by relevance

/mm/
Dpercpu.c334 int off = chunk->map[i] & ~1; in pcpu_count_occupied_pages() local
337 if (!PAGE_ALIGNED(off) && i > 0) { in pcpu_count_occupied_pages()
340 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE)) in pcpu_count_occupied_pages()
341 off = round_down(off, PAGE_SIZE); in pcpu_count_occupied_pages()
352 return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0); in pcpu_count_occupied_pages()
501 static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, in pcpu_fit_in_area() argument
504 int cand_off = off; in pcpu_fit_in_area()
507 int head = ALIGN(cand_off, align) - off; in pcpu_fit_in_area()
521 page_start = PFN_DOWN(head + off); in pcpu_fit_in_area()
522 page_end = PFN_UP(head + off + size); in pcpu_fit_in_area()
[all …]
Dzsmalloc.c844 unsigned long off = 0; in obj_idx_to_offset() local
847 off = page->index; in obj_idx_to_offset()
849 return off + obj_idx * class_size; in obj_idx_to_offset()
909 unsigned long off = 0; in init_zspage() local
926 page->index = off; in init_zspage()
929 link = (struct link_free *)vaddr + off / sizeof(*link); in init_zspage()
931 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
945 off %= PAGE_SIZE; in init_zspage()
1048 struct page *pages[2], int off, int size) in __zs_map_object() argument
1052 return area->vm_addr + off; in __zs_map_object()
[all …]
Dhugetlb_cgroup.c257 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_write() argument
289 char *buf, size_t nbytes, loff_t off) in hugetlb_cgroup_reset() argument
Dslub.c626 unsigned int off; /* Offset of last byte */ in print_trailer() local
648 off = s->offset + sizeof(void *); in print_trailer()
650 off = s->inuse; in print_trailer()
653 off += 2 * sizeof(struct track); in print_trailer()
655 if (off != size_from_object(s)) in print_trailer()
657 print_section("Padding ", p + off, size_from_object(s) - off); in print_trailer()
770 unsigned long off = s->inuse; /* The end of info */ in check_pad_bytes() local
774 off += sizeof(void *); in check_pad_bytes()
778 off += 2 * sizeof(struct track); in check_pad_bytes()
780 if (size_from_object(s) == off) in check_pad_bytes()
[all …]
Dmempolicy.c1780 struct vm_area_struct *vma, unsigned long off) in offset_il_node() argument
1789 target = (unsigned int)off % nnodes; in offset_il_node()
1803 unsigned long off; in interleave_nid() local
1813 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); in interleave_nid()
1814 off += (addr - vma->vm_start) >> shift; in interleave_nid()
1815 return offset_il_node(pol, vma, off); in interleave_nid()
Dbootmem.c484 unsigned long off, unsigned long align) in align_off() argument
490 return ALIGN(base + off, align) - base; in align_off()
Dmemory.c771 unsigned long off; in vm_normal_page() local
772 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
773 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
2803 int off; in do_fault_around() local
2809 off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
2810 pte -= off; in do_fault_around()
2811 pgoff -= off; in do_fault_around()
Dvmstat.c1225 unsigned long off = l - (unsigned long *)m->private; in vmstat_show() local
1227 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); in vmstat_show()
DKconfig389 more than it requires. To deal with this, mmap() is able to trim off
392 If trimming is enabled, the excess is trimmed off and returned to the
Dmemcontrol.c3954 loff_t off) in mem_cgroup_force_empty_write() argument
4187 char *buf, size_t nbytes, loff_t off) in mem_cgroup_write() argument
4264 size_t nbytes, loff_t off) in mem_cgroup_reset() argument
5001 char *buf, size_t nbytes, loff_t off) in memcg_write_event_control() argument