/arch/arm/mach-lh7a40x/include/mach/ |
D | constants.h | 26 # define CPLD_SIZE PAGE_SIZE 33 # define IOBARRIER_SIZE PAGE_SIZE 44 # define CPLD00_SIZE PAGE_SIZE 47 # define CPLD02_SIZE PAGE_SIZE 50 # define CPLD06_SIZE PAGE_SIZE 53 # define CPLD08_SIZE PAGE_SIZE 56 # define CPLD0A_SIZE PAGE_SIZE 59 # define CPLD0C_SIZE PAGE_SIZE 62 # define CPLD0E_SIZE PAGE_SIZE 65 # define CPLD10_SIZE PAGE_SIZE [all …]
|
/arch/sh/kernel/ |
D | vmlinux_32.lds.S | 48 RO_DATA(PAGE_SIZE) 53 . = ALIGN(PAGE_SIZE); 69 . = ALIGN(PAGE_SIZE); 74 . = ALIGN(PAGE_SIZE); 83 . = ALIGN(PAGE_SIZE); /* Init code and data */ 107 . = ALIGN(PAGE_SIZE); 118 PERCPU(PAGE_SIZE) 127 . = ALIGN(PAGE_SIZE);
|
D | vmlinux_64.lds.S | 37 . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE; 69 RO_DATA(PAGE_SIZE) 81 . = ALIGN(PAGE_SIZE); 86 . = ALIGN(PAGE_SIZE); 95 . = ALIGN(PAGE_SIZE); /* Init code and data */ 119 . = ALIGN(PAGE_SIZE); 130 PERCPU(PAGE_SIZE) 139 . = ALIGN(PAGE_SIZE);
|
/arch/m68k/mm/ |
D | sun3kmap.c | 57 phys += PAGE_SIZE; in do_pmeg_mapin() 58 virt += PAGE_SIZE; in do_pmeg_mapin() 74 offset = phys & (PAGE_SIZE-1); in sun3_ioremap() 75 phys &= ~(PAGE_SIZE-1); in sun3_ioremap() 87 pages = size / PAGE_SIZE; in sun3_ioremap() 94 seg_pages = (SUN3_PMEG_SIZE - (virt & SUN3_PMEG_MASK)) / PAGE_SIZE; in sun3_ioremap() 101 phys += seg_pages * PAGE_SIZE; in sun3_ioremap() 102 virt += seg_pages * PAGE_SIZE; in sun3_ioremap()
|
D | sun3mmu.c | 55 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); in paging_init() 63 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); in paging_init() 66 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; in paging_init() 84 address += PAGE_SIZE; in paging_init()
|
/arch/mn10300/kernel/ |
D | vmlinux.lds.S | 59 . = ALIGN(PAGE_SIZE); 62 . = ALIGN(PAGE_SIZE); 65 . = ALIGN(PAGE_SIZE); 82 . = ALIGN(PAGE_SIZE); 90 . = ALIGN(PAGE_SIZE); /* Init code and data */ 124 . = ALIGN(PAGE_SIZE); 131 . = ALIGN(PAGE_SIZE); 146 . = ALIGN(PAGE_SIZE);
|
/arch/x86/kernel/ |
D | vmlinux_32.lds.S | 41 . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */ 65 . = ALIGN(PAGE_SIZE); 71 . = ALIGN(PAGE_SIZE); 75 . = ALIGN(PAGE_SIZE); 79 . = ALIGN(PAGE_SIZE); 103 . = ALIGN(PAGE_SIZE); 115 . = ALIGN(PAGE_SIZE); 118 . = ALIGN(PAGE_SIZE); /* Init code and data */ 174 . = ALIGN(PAGE_SIZE); 181 . = ALIGN(PAGE_SIZE); [all …]
|
D | vmlinux_64.lds.S | 55 . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ 64 . = ALIGN(PAGE_SIZE); 114 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; 129 . = ALIGN(PAGE_SIZE); 135 . = ALIGN(PAGE_SIZE); 142 . = ALIGN(PAGE_SIZE); 145 . = ALIGN(PAGE_SIZE); /* Init code and data */ 205 . = ALIGN(PAGE_SIZE); 211 PERCPU(PAGE_SIZE) 213 . = ALIGN(PAGE_SIZE); [all …]
|
D | trampoline.c | 17 reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); in reserve_trampoline_memory()
|
/arch/m68knommu/mm/ |
D | init.c | 94 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); in paging_init() 95 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); in paging_init() 96 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); in paging_init() 97 memset((void *)empty_zero_page, 0, PAGE_SIZE); in paging_init() 164 for (; start < end; start += PAGE_SIZE) { in free_initrd_mem() 187 for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { in free_initmem() 196 (int)(addr - PAGE_SIZE)); in free_initmem()
|
/arch/h8300/mm/ |
D | init.c | 93 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); in paging_init() 94 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); in paging_init() 95 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); in paging_init() 96 memset((void *)empty_zero_page, 0, PAGE_SIZE); in paging_init() 166 for (; start < end; start += PAGE_SIZE) { in free_initrd_mem() 189 for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { in free_initmem() 198 (int)(addr - PAGE_SIZE)); in free_initmem()
|
/arch/mn10300/mm/ |
D | init.c | 60 for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE); in paging_init() 61 loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE); in paging_init() 65 ppte += PAGE_SIZE / sizeof(pte_t); in paging_init() 94 high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); in mem_init() 97 memset(empty_zero_page, 0, PAGE_SIZE); in mem_init() 132 for (addr = begin; addr < end; addr += PAGE_SIZE) { in free_init_pages() 135 memset((void *) addr, 0xcc, PAGE_SIZE); in free_init_pages()
|
/arch/s390/kernel/ |
D | vmlinux.lds.S | 51 . = ALIGN(PAGE_SIZE); 66 . = ALIGN(PAGE_SIZE); 71 . = ALIGN(PAGE_SIZE); 74 . = ALIGN(PAGE_SIZE); 96 . = ALIGN(PAGE_SIZE); /* Init code and data */ 143 PERCPU(PAGE_SIZE) 144 . = ALIGN(PAGE_SIZE);
|
/arch/frv/mm/ |
D | init.c | 76 empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); in paging_init() 77 empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); in paging_init() 78 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); in paging_init() 80 memset((void *) empty_zero_page, 0, PAGE_SIZE); in paging_init() 88 pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); in paging_init() 179 for (addr = start; addr < end; addr += PAGE_SIZE) { in free_initmem() 199 for (; start < end; start += PAGE_SIZE) { in free_initrd_mem() 206 printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10); in free_initrd_mem()
|
/arch/sh/mm/ |
D | ioremap_64.c | 105 .start = IOBASE_VADDR + PAGE_SIZE, 203 unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; in shmedia_ioremap() 209 PAGE_SIZE, NULL, NULL) != 0) { in shmedia_ioremap() 217 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; in shmedia_ioremap() 224 for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { in shmedia_ioremap() 226 va += PAGE_SIZE; in shmedia_ioremap() 227 pa += PAGE_SIZE; in shmedia_ioremap() 240 BUG_ON((len & (PAGE_SIZE - 1)) != 0); in shmedia_free_io() 243 len -= PAGE_SIZE; in shmedia_free_io() 258 page = alloc_bootmem_pages(PAGE_SIZE); in sh64_get_page() [all …]
|
D | pg-sh7705.c | 83 __flush_wback_region(to, PAGE_SIZE); in clear_user_page() 87 PAGE_SIZE); in clear_user_page() 89 __flush_wback_region(to, PAGE_SIZE); in clear_user_page() 107 __flush_wback_region(to, PAGE_SIZE); in copy_user_page() 111 PAGE_SIZE); in copy_user_page() 113 __flush_wback_region(to, PAGE_SIZE); in copy_user_page()
|
/arch/m68k/kernel/ |
D | sys_m68k.c | 298 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_040() 304 tmp = PAGE_SIZE; in cache_flush_040() 316 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; in cache_flush_040() 350 addr += PAGE_SIZE; in cache_flush_040() 351 i = PAGE_SIZE / 16; in cache_flush_040() 361 addr += PAGE_SIZE; in cache_flush_040() 371 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); in cache_flush_040() 372 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) in cache_flush_040() 459 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_060() 465 tmp = PAGE_SIZE; in cache_flush_060() [all …]
|
/arch/alpha/mm/ |
D | init.c | 79 memset((void *) EMPTY_PGT, 0, PAGE_SIZE); in __bad_pagetable() 86 memset((void *) EMPTY_PGE, 0, PAGE_SIZE); in __bad_page() 108 memset(swapper_pg_dir, 0, PAGE_SIZE); in switch_to_system_map() 182 kernel_end = two_pages + 2*PAGE_SIZE; in callback_init() 183 memset(two_pages, 0, 2*PAGE_SIZE); in callback_init() 188 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); in callback_init() 205 memset(kernel_end, 0, PAGE_SIZE); in callback_init() 208 kernel_end += PAGE_SIZE; in callback_init() 213 vaddr += PAGE_SIZE; in callback_init() 252 memset((void *)ZERO_PGE, 0, PAGE_SIZE); in paging_init() [all …]
|
/arch/s390/kvm/ |
D | gaccess.h | 27 if (guestaddr < 2 * PAGE_SIZE) in __guestaddr_to_user() 29 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) in __guestaddr_to_user() 164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) in copy_to_guest() 170 if ((guestdest < prefix + 2 * PAGE_SIZE) in copy_to_guest() 171 && (guestdest + n > prefix + 2 * PAGE_SIZE)) in copy_to_guest() 174 if (guestdest < 2 * PAGE_SIZE) in copy_to_guest() 176 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) in copy_to_guest() 215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) in copy_from_guest() 221 if ((guestsrc < prefix + 2 * PAGE_SIZE) in copy_from_guest() 222 && (guestsrc + n > prefix + 2 * PAGE_SIZE)) in copy_from_guest() [all …]
|
/arch/parisc/kernel/ |
D | vmlinux.lds.S | 76 . = ALIGN(PAGE_SIZE); 122 . = ALIGN(PAGE_SIZE); 127 . = ALIGN(PAGE_SIZE); 136 . = ALIGN(PAGE_SIZE); 224 . = ALIGN(PAGE_SIZE); 232 PERCPU(PAGE_SIZE) 233 . = ALIGN(PAGE_SIZE);
|
/arch/sparc/mm/ |
D | generic_64.c | 40 unsigned long curend = address + PAGE_SIZE; in io_remap_pte_range() 42 entry = mk_pte_io(offset, prot, space, PAGE_SIZE); in io_remap_pte_range() 44 if (PAGE_SIZE < (4 * 1024 * 1024) && in io_remap_pte_range() 52 } else if (PAGE_SIZE < (512 * 1024) && in io_remap_pte_range() 60 } else if (PAGE_SIZE < (64 * 1024) && in io_remap_pte_range() 68 offset += PAGE_SIZE; in io_remap_pte_range() 70 offset += PAGE_SIZE; in io_remap_pte_range() 77 address += PAGE_SIZE; in io_remap_pte_range() 78 pte_val(entry) += PAGE_SIZE; in io_remap_pte_range()
|
/arch/powerpc/kernel/ |
D | vmlinux.lds.S | 71 . = ALIGN(PAGE_SIZE); 98 . = ALIGN(PAGE_SIZE); 177 . = ALIGN(PAGE_SIZE); 184 . = ALIGN(PAGE_SIZE); 217 . = ALIGN(PAGE_SIZE); 224 . = ALIGN(PAGE_SIZE); 252 . = ALIGN(PAGE_SIZE); 266 . = ALIGN(PAGE_SIZE); 280 . = ALIGN(PAGE_SIZE); 284 . = ALIGN(PAGE_SIZE); [all …]
|
/arch/ia64/mm/ |
D | init.c | 67 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); in __ia64_sync_icache_dcache() 83 while (pg_addr + PAGE_SIZE <= end) { in dma_mark_clean() 86 pg_addr += PAGE_SIZE; in dma_mark_clean() 122 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_init_addr_space() 139 vma->vm_end = PAGE_SIZE; in ia64_init_addr_space() 165 addr += PAGE_SIZE; in free_initmem() 212 for (; start < end; start += PAGE_SIZE) { in free_initrd_mem() 272 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); in setup_gate() 273 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); in setup_gate() 280 for (addr = GATE_ADDR + PAGE_SIZE; in setup_gate() [all …]
|
/arch/s390/mm/ |
D | init.c | 42 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); 43 char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 89 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); in mem_init() 92 memset(empty_zero_page, 0, PAGE_SIZE); in mem_init() 149 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { in free_initmem() 152 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); in free_initmem() 165 for (; start < end; start += PAGE_SIZE) { in free_initrd_mem()
|
/arch/alpha/kernel/ |
D | vmlinux.lds.S | 44 . = ALIGN(PAGE_SIZE); 71 . = ALIGN(PAGE_SIZE); 89 PERCPU(PAGE_SIZE) 91 . = ALIGN(2 * PAGE_SIZE); 100 . = ALIGN(PAGE_SIZE);
|