Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 179) sorted by relevance

12345678

/arch/arm64/kvm/hyp/nvhe/
Dpage_alloc.c35 u8 order) in __find_buddy_nocheck() argument
39 addr ^= (PAGE_SIZE << order); in __find_buddy_nocheck()
54 u8 order) in __find_buddy_avail() argument
56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail()
58 if (!buddy || buddy->order != order || buddy->refcount) in __find_buddy_avail()
98 u8 order = p->order; in __hyp_attach_page() local
100 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); in __hyp_attach_page()
111 p->order = HYP_NO_ORDER; in __hyp_attach_page()
112 for (; (order + 1) < pool->max_order; order++) { in __hyp_attach_page()
113 buddy = __find_buddy_avail(pool, p, order); in __hyp_attach_page()
[all …]
/arch/arm/lib/
Dlib1funcs.S106 .macro ARM_DIV2_ORDER divisor, order argument
110 clz \order, \divisor
111 rsb \order, \order, #31
117 movhs \order, #16
118 movlo \order, #0
122 addhs \order, \order, #8
126 addhs \order, \order, #4
129 addhi \order, \order, #3
130 addls \order, \order, \divisor, lsr #1
137 .macro ARM_MOD_BODY dividend, divisor, order, spare
[all …]
/arch/s390/mm/
Dpage-states.c71 static inline void set_page_unused(struct page *page, int order) in set_page_unused() argument
75 for (i = 0; i < (1 << order); i++) in set_page_unused()
82 static inline void set_page_stable_dat(struct page *page, int order) in set_page_stable_dat() argument
86 for (i = 0; i < (1 << order); i++) in set_page_stable_dat()
93 static inline void set_page_stable_nodat(struct page *page, int order) in set_page_stable_nodat() argument
97 for (i = 0; i < (1 << order); i++) in set_page_stable_nodat()
208 void arch_free_page(struct page *page, int order) in arch_free_page() argument
212 set_page_unused(page, order); in arch_free_page()
215 void arch_alloc_page(struct page *page, int order) in arch_alloc_page() argument
220 set_page_stable_dat(page, order); in arch_alloc_page()
[all …]
Dinit.c65 unsigned int order; in setup_zero_pages() local
70 order = 7; in setup_zero_pages()
73 while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) in setup_zero_pages()
74 order--; in setup_zero_pages()
76 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in setup_zero_pages()
81 split_page(page, order); in setup_zero_pages()
82 for (i = 1 << order; i > 0; i--) { in setup_zero_pages()
87 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; in setup_zero_pages()
/arch/alpha/include/asm/
Dagp.h14 #define alloc_gatt_pages(order) \ argument
15 ((char *)__get_free_pages(GFP_KERNEL, (order)))
16 #define free_gatt_pages(table, order) \ argument
17 free_pages((unsigned long)(table), (order))
/arch/sparc/include/asm/
Dagp.h12 #define alloc_gatt_pages(order) \ argument
13 ((char *)__get_free_pages(GFP_KERNEL, (order)))
14 #define free_gatt_pages(table, order) \ argument
15 free_pages((unsigned long)(table), (order))
/arch/ia64/include/asm/
Dagp.h22 #define alloc_gatt_pages(order) \ argument
23 ((char *)__get_free_pages(GFP_KERNEL, (order)))
24 #define free_gatt_pages(table, order) \ argument
25 free_pages((unsigned long)(table), (order))
Dpage.h140 long order; in get_order() local
142 order = ia64_getf_exp(d); in get_order()
143 order = order - PAGE_SHIFT - 0xffff + 1; in get_order()
144 if (order < 0) in get_order()
145 order = 0; in get_order()
146 return order; in get_order()
/arch/parisc/include/asm/
Dagp.h16 #define alloc_gatt_pages(order) \ argument
17 ((char *)__get_free_pages(GFP_KERNEL, (order)))
18 #define free_gatt_pages(table, order) \ argument
19 free_pages((unsigned long)(table), (order))
/arch/powerpc/include/asm/
Dagp.h13 #define alloc_gatt_pages(order) \ argument
14 ((char *)__get_free_pages(GFP_KERNEL, (order)))
15 #define free_gatt_pages(table, order) \ argument
16 free_pages((unsigned long)(table), (order))
/arch/x86/include/asm/
Dagp.h27 #define alloc_gatt_pages(order) \ argument
28 ((char *)__get_free_pages(GFP_KERNEL, (order)))
29 #define free_gatt_pages(table, order) \ argument
30 free_pages((unsigned long)(table), (order))
/arch/powerpc/platforms/cell/
Dras.c99 int order; member
107 static int __init cbe_ptcal_enable_on_node(int nid, int order) in cbe_ptcal_enable_on_node() argument
121 area->order = order; in cbe_ptcal_enable_on_node()
124 area->order); in cbe_ptcal_enable_on_node()
155 __free_pages(area->pages, area->order); in cbe_ptcal_enable_on_node()
166 int order, found_mic = 0; in cbe_ptcal_enable() local
179 order = get_order(*size); in cbe_ptcal_enable()
184 cbe_ptcal_enable_on_node(of_node_to_nid(np), order); in cbe_ptcal_enable()
199 cbe_ptcal_enable_on_node(*nid, order); in cbe_ptcal_enable()
225 1 << (area->order + PAGE_SHIFT)); in cbe_ptcal_disable()
[all …]
/arch/s390/include/asm/
Dsigp.h41 static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm, in ____pcpu_sigp() argument
52 : [addr] "d" (addr), [order] "a" (order) in ____pcpu_sigp()
58 static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm, in __pcpu_sigp() argument
64 cc = ____pcpu_sigp(addr, order, parm, &_status); in __pcpu_sigp()
/arch/x86/kernel/
Daperture_64.c155 static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) in read_agp() argument
173 old_order = *order; in read_agp()
180 *order = 7 - nbits; in read_agp()
181 if ((int)*order < 0) /* < 32MB */ in read_agp()
182 *order = 0; in read_agp()
195 if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) { in read_agp()
197 bus, slot, func, 32 << *order, apsizereg); in read_agp()
198 *order = old_order; in read_agp()
202 bus, slot, func, aper, aper + (32ULL << (*order + 20)) - 1, in read_agp()
203 32 << *order, apsizereg); in read_agp()
[all …]
Dkvmclock.c205 unsigned int order; in kvmclock_init_mem() local
213 order = get_order(ncpus * sizeof(*hvclock_mem)); in kvmclock_init_mem()
215 p = alloc_pages(GFP_KERNEL, order); in kvmclock_init_mem()
217 pr_warn("%s: failed to alloc %d pages", __func__, (1U << order)); in kvmclock_init_mem()
229 1UL << order); in kvmclock_init_mem()
231 __free_pages(p, order); in kvmclock_init_mem()
238 memset(hvclock_mem, 0, PAGE_SIZE << order); in kvmclock_init_mem()
/arch/x86/hyperv/
Dhv_proc.c30 int order; in hv_call_deposit_pages() local
59 order = 31 - __builtin_clz(num_pages); in hv_call_deposit_pages()
62 pages[i] = alloc_pages_node(node, GFP_KERNEL, order); in hv_call_deposit_pages()
65 if (!order) { in hv_call_deposit_pages()
70 --order; in hv_call_deposit_pages()
73 split_page(pages[i], order); in hv_call_deposit_pages()
74 counts[i] = 1 << order; in hv_call_deposit_pages()
/arch/arm/xen/
Dmm.c26 unsigned long xen_get_swiotlb_free_pages(unsigned int order) in xen_get_swiotlb_free_pages() argument
41 return __get_free_pages(flags, order); in xen_get_swiotlb_free_pages()
121 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, in xen_create_contiguous_region() argument
133 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) in xen_destroy_contiguous_region() argument
/arch/sparc/kernel/
Dpci_sun4v.c185 unsigned long flags, order, first_page, npages, n; in dma_4v_alloc_coherent() local
195 order = get_order(size); in dma_4v_alloc_coherent()
196 if (unlikely(order >= MAX_ORDER)) in dma_4v_alloc_coherent()
205 page = alloc_pages_node(nid, gfp, order); in dma_4v_alloc_coherent()
210 memset((char *)first_page, 0, PAGE_SIZE << order); in dma_4v_alloc_coherent()
254 free_pages(first_page, order); in dma_4v_alloc_coherent()
329 unsigned long order, npages, entry; in dma_4v_free_coherent() local
349 order = get_order(size); in dma_4v_free_coherent()
350 if (order < 10) in dma_4v_free_coherent()
351 free_pages((unsigned long)cpu, order); in dma_4v_free_coherent()
[all …]
Dpci_fire.c233 unsigned long pages, order, i; in pci_fire_msiq_alloc() local
235 order = get_order(512 * 1024); in pci_fire_msiq_alloc()
236 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); in pci_fire_msiq_alloc()
239 order); in pci_fire_msiq_alloc()
242 memset((char *)pages, 0, PAGE_SIZE << order); in pci_fire_msiq_alloc()
265 unsigned long pages, order; in pci_fire_msiq_free() local
267 order = get_order(512 * 1024); in pci_fire_msiq_free()
270 free_pages(pages, order); in pci_fire_msiq_free()
Diommu.c97 unsigned long i, order, sz, num_tsb_entries; in iommu_table_init() local
132 order = get_order(tsbsize); in iommu_table_init()
133 page = alloc_pages_node(numa_node, GFP_KERNEL, order); in iommu_table_init()
201 unsigned long order, first_page; in dma_4u_alloc_coherent() local
209 order = get_order(size); in dma_4u_alloc_coherent()
210 if (order >= 10) in dma_4u_alloc_coherent()
214 page = alloc_pages_node(nid, gfp, order); in dma_4u_alloc_coherent()
219 memset((char *)first_page, 0, PAGE_SIZE << order); in dma_4u_alloc_coherent()
226 free_pages(first_page, order); in dma_4u_alloc_coherent()
251 unsigned long order, npages; in dma_4u_free_coherent() local
[all …]
/arch/powerpc/sysdev/xive/
Dnative.c130 __be32 *qpage, u32 order, bool can_escalate) in xive_native_configure_queue() argument
138 if (order) { in xive_native_configure_queue()
146 q->msk = order ? ((1u << (order - 2)) - 1) : 0; in xive_native_configure_queue()
172 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags); in xive_native_configure_queue()
656 u32 order; in xive_native_alloc_vp_block() local
658 order = fls(max_vcpus) - 1; in xive_native_alloc_vp_block()
659 if (max_vcpus > (1 << order)) in xive_native_alloc_vp_block()
660 order++; in xive_native_alloc_vp_block()
663 max_vcpus, order); in xive_native_alloc_vp_block()
666 rc = opal_xive_alloc_vp_block(order); in xive_native_alloc_vp_block()
[all …]
/arch/m68k/ifpsp060/src/
DREADME-SRC6 assembler, however it is being included in order to comply with the
9 You don't need to actually assemble these files in order to compile a
/arch/mips/kernel/
Dirq.c56 unsigned int order = get_order(IRQ_STACK_SIZE); in init_IRQ() local
67 void *s = (void *)__get_free_pages(GFP_KERNEL, order); in init_IRQ()
/arch/um/include/shared/
Dkern_util.h22 extern unsigned long alloc_stack(int order, int atomic);
23 extern void free_stack(unsigned long stack, int order);
/arch/powerpc/kvm/
Dbook3s_64_mmu_hv.c54 u32 order; member
71 int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) in kvmppc_allocate_hpt() argument
79 if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) in kvmppc_allocate_hpt()
82 page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); in kvmppc_allocate_hpt()
85 memset((void *)hpt, 0, (1ul << order)); in kvmppc_allocate_hpt()
91 |__GFP_NOWARN, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
97 npte = 1ul << (order - 4); in kvmppc_allocate_hpt()
103 kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); in kvmppc_allocate_hpt()
105 free_pages(hpt, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
109 info->order = order; in kvmppc_allocate_hpt()
[all …]

12345678