Searched refs:start_page (Results 1 – 8 of 8) sorted by relevance
/arch/mn10300/mm/ |
D | cache-flush-icache.c | 110 unsigned long start_page, end_page; in flush_icache_range() local 126 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; in flush_icache_range() 127 mn10300_local_dcache_flush_range(start_page, end); in flush_icache_range() 128 mn10300_local_icache_inv_range(start_page, end); in flush_icache_range() 129 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end); in flush_icache_range() 130 if (start_page == start) in flush_icache_range() 132 end = start_page; in flush_icache_range() 135 start_page = start & PAGE_MASK; in flush_icache_range() 138 if (start_page == end_page) { in flush_icache_range() 141 } else if (start_page + 1 == end_page) { in flush_icache_range()
|
D | cache-inv-icache.c | 86 unsigned long start_page, end_page; in flush_icache_range() local 102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; in flush_icache_range() 103 mn10300_icache_inv_range(start_page, end); in flush_icache_range() 105 if (start_page == start) in flush_icache_range() 107 end = start_page; in flush_icache_range() 110 start_page = start & PAGE_MASK; in flush_icache_range() 113 if (start_page == end_page) { in flush_icache_range() 116 } else if (start_page + 1 == end_page) { in flush_icache_range()
|
/arch/m68k/sun3/ |
D | config.c | 112 unsigned long start_page; in sun3_bootmem_alloc() local 118 start_page = __pa(memory_start) >> PAGE_SHIFT; in sun3_bootmem_alloc() 125 availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); in sun3_bootmem_alloc()
|
/arch/powerpc/mm/ |
D | init_64.c | 267 int __meminit vmemmap_populate(struct page *start_page, in vmemmap_populate() argument 270 unsigned long start = (unsigned long)start_page; in vmemmap_populate() 271 unsigned long end = (unsigned long)(start_page + nr_pages); in vmemmap_populate() 278 start_page, nr_pages, node); in vmemmap_populate()
|
/arch/powerpc/platforms/pseries/ |
D | cmm.c | 530 unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn); in cmm_mem_going_offline() local 531 unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT); in cmm_mem_going_offline() 537 start_page, marg->nr_pages); in cmm_mem_going_offline() 544 if ((pa_curr->page[idx] < start_page) || in cmm_mem_going_offline() 570 if (((unsigned long)pa_curr >= start_page) && in cmm_mem_going_offline()
|
/arch/x86/mm/ |
D | init_64.c | 911 vmemmap_populate(struct page *start_page, unsigned long size, int node) in vmemmap_populate() argument 913 unsigned long addr = (unsigned long)start_page; in vmemmap_populate() 914 unsigned long end = (unsigned long)(start_page + size); in vmemmap_populate() 977 sync_global_pgds((unsigned long)start_page, end); in vmemmap_populate()
|
/arch/ia64/mm/ |
D | init.c | 426 unsigned long address, start_page, end_page; in create_mem_map_page_table() local 437 start_page = (unsigned long) map_start & PAGE_MASK; in create_mem_map_page_table() 441 for (address = start_page; address < end_page; address += PAGE_SIZE) { in create_mem_map_page_table()
|
D | discontig.c | 822 int __meminit vmemmap_populate(struct page *start_page, in vmemmap_populate() argument 825 return vmemmap_populate_basepages(start_page, size, node); in vmemmap_populate()
|