• Home
  • Raw
  • Download

Lines Matching refs:size

344 static struct vmap_area *alloc_vmap_area(unsigned long size,  in alloc_vmap_area()  argument
355 BUG_ON(!size); in alloc_vmap_area()
356 BUG_ON(size & ~PAGE_MASK); in alloc_vmap_area()
382 size < cached_hole_size || in alloc_vmap_area()
399 if (addr + size < addr) in alloc_vmap_area()
404 if (addr + size < addr) in alloc_vmap_area()
427 while (addr + size > first->va_start && addr + size <= vend) { in alloc_vmap_area()
431 if (addr + size < addr) in alloc_vmap_area()
442 if (addr + size > vend) in alloc_vmap_area()
446 va->va_end = addr + size; in alloc_vmap_area()
468 "use vmalloc=<size> to increase size.\n", size); in alloc_vmap_area()
905 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument
912 BUG_ON(size & ~PAGE_MASK); in vb_alloc()
913 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); in vb_alloc()
914 if (WARN_ON(size == 0)) { in vb_alloc()
922 order = get_order(size); in vb_alloc()
963 static void vb_free(const void *addr, unsigned long size) in vb_free() argument
970 BUG_ON(size & ~PAGE_MASK); in vb_free()
971 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); in vb_free()
973 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); in vb_free()
975 order = get_order(size); in vb_free()
985 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); in vb_free()
1063 unsigned long size = count << PAGE_SHIFT; in vm_unmap_ram() local
1071 debug_check_no_locks_freed(mem, size); in vm_unmap_ram()
1072 vmap_debug_free_range(addr, addr+size); in vm_unmap_ram()
1075 vb_free(mem, size); in vm_unmap_ram()
1098 unsigned long size = count << PAGE_SHIFT; in vm_map_ram() local
1103 mem = vb_alloc(size, GFP_KERNEL); in vm_map_ram()
1109 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
1117 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { in vm_map_ram()
1143 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1146 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1170 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1200 va->va_end = va->va_start + tmp->size; in vmalloc_init()
1229 int map_kernel_range_noflush(unsigned long addr, unsigned long size, in map_kernel_range_noflush() argument
1232 return vmap_page_range_noflush(addr, addr + size, prot, pages); in map_kernel_range_noflush()
1249 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) in unmap_kernel_range_noflush() argument
1251 vunmap_page_range(addr, addr + size); in unmap_kernel_range_noflush()
1263 void unmap_kernel_range(unsigned long addr, unsigned long size) in unmap_kernel_range() argument
1265 unsigned long end = addr + size; in unmap_kernel_range()
1291 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm()
1309 static struct vm_struct *__get_vm_area_node(unsigned long size, in __get_vm_area_node() argument
1318 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); in __get_vm_area_node()
1320 size = PAGE_ALIGN(size); in __get_vm_area_node()
1321 if (unlikely(!size)) in __get_vm_area_node()
1331 size += PAGE_SIZE; in __get_vm_area_node()
1333 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); in __get_vm_area_node()
1344 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, in __get_vm_area() argument
1347 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, in __get_vm_area()
1352 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, in __get_vm_area_caller() argument
1356 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, in __get_vm_area_caller()
1369 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) in get_vm_area() argument
1371 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, in get_vm_area()
1376 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, in get_vm_area_caller() argument
1379 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, in get_vm_area_caller()
1425 vm->size -= PAGE_SIZE; in remove_vm_area()
1450 debug_check_no_locks_freed(addr, area->size); in __vunmap()
1451 debug_check_no_obj_freed(addr, area->size); in __vunmap()
1556 static void *__vmalloc_node(unsigned long size, unsigned long align,
1612 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
1632 void *__vmalloc_node_range(unsigned long size, unsigned long align, in __vmalloc_node_range() argument
1638 unsigned long real_size = size; in __vmalloc_node_range()
1640 size = PAGE_ALIGN(size); in __vmalloc_node_range()
1641 if (!size || (size >> PAGE_SHIFT) > totalram_pages) in __vmalloc_node_range()
1644 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, in __vmalloc_node_range()
1689 static void *__vmalloc_node(unsigned long size, unsigned long align, in __vmalloc_node() argument
1693 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, in __vmalloc_node()
1697 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument
1699 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, in __vmalloc()
1704 static inline void *__vmalloc_node_flags(unsigned long size, in __vmalloc_node_flags() argument
1707 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, in __vmalloc_node_flags()
1720 void *vmalloc(unsigned long size) in vmalloc() argument
1722 return __vmalloc_node_flags(size, NUMA_NO_NODE, in vmalloc()
1737 void *vzalloc(unsigned long size) in vzalloc() argument
1739 return __vmalloc_node_flags(size, NUMA_NO_NODE, in vzalloc()
1751 void *vmalloc_user(unsigned long size) in vmalloc_user() argument
1756 ret = __vmalloc_node(size, SHMLBA, in vmalloc_user()
1779 void *vmalloc_node(unsigned long size, int node) in vmalloc_node() argument
1781 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, in vmalloc_node()
1798 void *vzalloc_node(unsigned long size, int node) in vzalloc_node() argument
1800 return __vmalloc_node_flags(size, node, in vzalloc_node()
1821 void *vmalloc_exec(unsigned long size) in vmalloc_exec() argument
1823 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, in vmalloc_exec()
1842 void *vmalloc_32(unsigned long size) in vmalloc_32() argument
1844 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, in vmalloc_32()
1856 void *vmalloc_32_user(unsigned long size) in vmalloc_32_user() argument
1861 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, in vmalloc_32_user()
2126 void *kaddr, unsigned long size) in remap_vmalloc_range_partial() argument
2130 size = PAGE_ALIGN(size); in remap_vmalloc_range_partial()
2142 if (kaddr + size > area->addr + area->size) in remap_vmalloc_range_partial()
2155 size -= PAGE_SIZE; in remap_vmalloc_range_partial()
2156 } while (size > 0); in remap_vmalloc_range_partial()
2221 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) in alloc_vm_area() argument
2225 area = get_vm_area_caller(size, VM_IOREMAP, in alloc_vm_area()
2235 size, f, ptes ? &ptes : NULL)) { in alloc_vm_area()
2609 v->addr, v->addr + v->size, v->size); in s_show()