• Home
  • Raw
  • Download

Lines Matching refs:PAGE_SIZE

67 	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));  in __ia64_sync_icache_dcache()
83 while (pg_addr + PAGE_SIZE <= end) { in dma_mark_clean()
86 pg_addr += PAGE_SIZE; in dma_mark_clean()
122 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_init_addr_space()
139 vma->vm_end = PAGE_SIZE; in ia64_init_addr_space()
165 addr += PAGE_SIZE; in free_initmem()
212 for (; start < end; start += PAGE_SIZE) { in free_initrd_mem()
272 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); in setup_gate()
273 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); in setup_gate()
280 for (addr = GATE_ADDR + PAGE_SIZE; in setup_gate()
282 addr += PAGE_SIZE) in setup_gate()
405 end_address += PAGE_SIZE; in vmemmap_find_next_valid_pfn()
440 for (address = start_page; address < end_page; address += PAGE_SIZE) { in create_mem_map_page_table()
443 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); in create_mem_map_page_table()
447 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); in create_mem_map_page_table()
451 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); in create_mem_map_page_table()
455 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, in create_mem_map_page_table()
488 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); in virtual_memmap_init()
572 for (; start < end; start += PAGE_SIZE) in count_reserved_pages()
622 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); in mem_init()
623 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); in mem_init()
624 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); in mem_init()
641 high_memory = __va(max_low_pfn * PAGE_SIZE); in mem_init()
643 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); in mem_init()