Home
last modified time | relevance | path

Searched refs:order (Results 1 – 13 of 13) sorted by relevance

/kernel/locking/
Dtest-ww_mutex.c351 int *order; in get_random_order() local
354 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in get_random_order()
355 if (!order) in get_random_order()
356 return order; in get_random_order()
359 order[n] = n; in get_random_order()
364 tmp = order[n]; in get_random_order()
365 order[n] = order[r]; in get_random_order()
366 order[r] = tmp; in get_random_order()
370 return order; in get_random_order()
384 int *order; in stress_inorder_work() local
[all …]
/kernel/dma/
Dpool.c85 unsigned int order; in atomic_pool_expand() local
91 order = min(get_order(pool_size), MAX_ORDER-1); in atomic_pool_expand()
94 pool_size = 1 << (PAGE_SHIFT + order); in atomic_pool_expand()
96 page = dma_alloc_from_contiguous(NULL, 1 << order, in atomic_pool_expand()
97 order, false); in atomic_pool_expand()
99 page = alloc_pages(gfp, order); in atomic_pool_expand()
100 } while (!page && order-- > 0); in atomic_pool_expand()
120 1 << order); in atomic_pool_expand()
133 1 << order); in atomic_pool_expand()
143 __free_pages(page, order); in atomic_pool_expand()
Dcoherent.c155 int order = get_order(size); in __dma_alloc_from_coherent() local
165 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); in __dma_alloc_from_coherent()
220 int order, void *vaddr) in __dma_release_from_coherent() argument
228 bitmap_release_region(mem->bitmap, page, order); in __dma_release_from_coherent()
247 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) in dma_release_from_dev_coherent() argument
251 return __dma_release_from_coherent(mem, order, vaddr); in dma_release_from_dev_coherent()
254 int dma_release_from_global_coherent(int order, void *vaddr) in dma_release_from_global_coherent() argument
259 return __dma_release_from_coherent(dma_coherent_default_memory, order, in dma_release_from_global_coherent()
Dswiotlb.c290 unsigned int order; in swiotlb_late_init_with_default_size() local
301 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); in swiotlb_late_init_with_default_size()
302 io_tlb_nslabs = SLABS_PER_PAGE << order; in swiotlb_late_init_with_default_size()
305 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { in swiotlb_late_init_with_default_size()
307 order); in swiotlb_late_init_with_default_size()
310 order--; in swiotlb_late_init_with_default_size()
317 if (order != get_order(bytes)) { in swiotlb_late_init_with_default_size()
319 (PAGE_SIZE << order) >> 20); in swiotlb_late_init_with_default_size()
320 io_tlb_nslabs = SLABS_PER_PAGE << order; in swiotlb_late_init_with_default_size()
324 free_pages((unsigned long)vstart, order); in swiotlb_late_init_with_default_size()
DKconfig179 int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
184 PAGE_SIZE order which is greater than or equal to the requested buffer
187 specify the maximum PAGE_SIZE order for contiguous buffers. Larger
188 buffers will be aligned only to this specified order. The order is
191 For example, if your system defaults to 4KiB pages, the order value
/kernel/
Dkexec_core.c299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() argument
305 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); in kimage_alloc_pages()
310 set_page_private(pages, order); in kimage_alloc_pages()
311 count = 1 << order; in kimage_alloc_pages()
328 unsigned int order, count, i; in kimage_free_pages() local
330 order = page_private(page); in kimage_free_pages()
331 count = 1 << order; in kimage_free_pages()
337 __free_pages(page, order); in kimage_free_pages()
351 unsigned int order) in kimage_alloc_normal_control_pages() argument
370 count = 1 << order; in kimage_alloc_normal_control_pages()
[all …]
/kernel/events/
Dring_buffer.c605 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() argument
609 if (order > MAX_ORDER) in rb_alloc_aux_page()
610 order = MAX_ORDER; in rb_alloc_aux_page()
613 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page()
614 } while (!page && order--); in rb_alloc_aux_page()
616 if (page && order) { in rb_alloc_aux_page()
623 split_page(page, order); in rb_alloc_aux_page()
625 set_page_private(page, order); in rb_alloc_aux_page()
708 int last, order; in rb_alloc_aux() local
710 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
[all …]
/kernel/trace/
Dftrace.c1094 int order; member
3159 int order; in ftrace_allocate_records() local
3167 order = get_count_order(pages); in ftrace_allocate_records()
3174 order--; in ftrace_allocate_records()
3177 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in ftrace_allocate_records()
3181 if (!order) in ftrace_allocate_records()
3183 order--; in ftrace_allocate_records()
3187 ftrace_number_of_pages += 1 << order; in ftrace_allocate_records()
3190 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; in ftrace_allocate_records()
3191 pg->order = order; in ftrace_allocate_records()
[all …]
Dtrace.c2260 int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN); in free_saved_cmdlines_buffer() local
2264 free_pages((unsigned long)s, order); in free_saved_cmdlines_buffer()
2272 int order; in allocate_cmdlines_buffer() local
2276 order = get_order(orig_size); in allocate_cmdlines_buffer()
2277 size = 1 << (order + PAGE_SHIFT); in allocate_cmdlines_buffer()
2278 page = alloc_pages(GFP_KERNEL, order); in allocate_cmdlines_buffer()
DKconfig645 separated out as a stand-alone facility in order to allow it
/kernel/time/
DKconfig140 other dependencies to provide in order to make the full
/kernel/rcu/
DKconfig153 value to the maximum value possible in order to reduce the
/kernel/power/
DKconfig270 In order to use APM, you will need supporting software. For location