/arch/openrisc/kernel/ |
D | vmlinux.lds.S | 42 . = ALIGN(PAGE_SIZE); 64 RO_DATA_SECTION(PAGE_SIZE) 70 RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE) 78 . = ALIGN(PAGE_SIZE); 84 INIT_TEXT_SECTION(PAGE_SIZE) 93 . = ALIGN(PAGE_SIZE); 100 . = ALIGN (PAGE_SIZE);
|
/arch/mn10300/kernel/ |
D | vmlinux.lds.S | 44 RO_DATA(PAGE_SIZE) 48 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) 52 . = ALIGN(PAGE_SIZE); 60 . = ALIGN(PAGE_SIZE); /* Init code and data */ 62 INIT_TEXT_SECTION(PAGE_SIZE) 75 . = ALIGN(PAGE_SIZE); 79 BSS_SECTION(0, PAGE_SIZE, 4) 84 . = ALIGN(PAGE_SIZE);
|
/arch/hexagon/kernel/ |
D | vmlinux.lds.S | 35 #define PAGE_SIZE _PAGE_SIZE macro 43 INIT_TEXT_SECTION(PAGE_SIZE) 59 INIT_DATA_SECTION(PAGE_SIZE) 62 RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE) 63 RO_DATA_SECTION(PAGE_SIZE)
|
/arch/metag/kernel/ |
D | vmlinux.lds.S | 34 RO_DATA_SECTION(PAGE_SIZE) 35 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 41 . = ALIGN(PAGE_SIZE); /* Init code and data */ 43 INIT_TEXT_SECTION(PAGE_SIZE) 56 BSS_SECTION(0, PAGE_SIZE, 0) 60 . = ALIGN(PAGE_SIZE);
|
/arch/s390/kernel/ |
D | vmlinux.lds.S | 48 RO_DATA_SECTION(PAGE_SIZE) 54 . = ALIGN(PAGE_SIZE); 60 RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) 65 . = ALIGN(PAGE_SIZE); /* Init code and data */ 68 INIT_TEXT_SECTION(PAGE_SIZE) 83 . = ALIGN(PAGE_SIZE); 87 . = ALIGN(PAGE_SIZE);
|
/arch/tile/kernel/ |
D | vmlinux.lds.S | 53 INIT_TEXT_SECTION(PAGE_SIZE) 60 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE); 69 RO_DATA_SECTION(PAGE_SIZE) 72 . = ALIGN(PAGE_SIZE); 80 RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 88 BSS_SECTION(8, PAGE_SIZE, 1)
|
/arch/hexagon/mm/ |
D | uaccess.c | 39 while (count > PAGE_SIZE) { in __clear_user_hexagon() 41 PAGE_SIZE); in __clear_user_hexagon() 43 return count - (PAGE_SIZE - uncleared); in __clear_user_hexagon() 44 count -= PAGE_SIZE; in __clear_user_hexagon() 45 dest += PAGE_SIZE; in __clear_user_hexagon()
|
/arch/x86/kernel/ |
D | vmlinux.lds.S | 117 . = ALIGN(PAGE_SIZE); 120 RO_DATA(PAGE_SIZE) 136 PAGE_ALIGNED_DATA(PAGE_SIZE) 152 . = ALIGN(PAGE_SIZE); 170 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 175 . = ALIGN(PAGE_SIZE); 189 INIT_TEXT_SECTION(PAGE_SIZE) 272 . = ALIGN(PAGE_SIZE); 283 . = ALIGN(PAGE_SIZE); 287 . = ALIGN(PAGE_SIZE); [all …]
|
/arch/score/include/asm/ |
D | page.h | 9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) macro 10 #define PAGE_MASK (~(PAGE_SIZE-1)) 16 #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 17 #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) 33 #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 34 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 36 #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 38 memcpy((vto), (vfrom), PAGE_SIZE)
|
/arch/arc/kernel/ |
D | vmlinux.lds.S | 36 . = ALIGN(PAGE_SIZE); 62 . = ALIGN(PAGE_SIZE); 93 . = ALIGN(PAGE_SIZE); 109 RO_DATA_SECTION(PAGE_SIZE) 115 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 122 . = ALIGN(PAGE_SIZE); 134 . = ALIGN(PAGE_SIZE);
|
/arch/m68k/mm/ |
D | sun3kmap.c | 57 phys += PAGE_SIZE; in do_pmeg_mapin() 58 virt += PAGE_SIZE; in do_pmeg_mapin() 74 offset = phys & (PAGE_SIZE-1); in sun3_ioremap() 75 phys &= ~(PAGE_SIZE-1); in sun3_ioremap() 87 pages = size / PAGE_SIZE; in sun3_ioremap() 94 seg_pages = (SUN3_PMEG_SIZE - (virt & SUN3_PMEG_MASK)) / PAGE_SIZE; in sun3_ioremap() 101 phys += seg_pages * PAGE_SIZE; in sun3_ioremap() 102 virt += seg_pages * PAGE_SIZE; in sun3_ioremap()
|
D | sun3mmu.c | 50 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); in paging_init() 58 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); in paging_init() 61 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; in paging_init() 79 address += PAGE_SIZE; in paging_init()
|
/arch/m68k/kernel/ |
D | sys_m68k.c | 106 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_040() 112 tmp = PAGE_SIZE; in cache_flush_040() 124 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; in cache_flush_040() 158 addr += PAGE_SIZE; in cache_flush_040() 159 i = PAGE_SIZE / 16; in cache_flush_040() 169 addr += PAGE_SIZE; in cache_flush_040() 179 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); in cache_flush_040() 180 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) in cache_flush_040() 267 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_060() 273 tmp = PAGE_SIZE; in cache_flush_060() [all …]
|
/arch/x86/vdso/ |
D | vdsox32.S | 8 .align PAGE_SIZE 12 .align PAGE_SIZE /* extra data here leaks to userspace. */ 21 .zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
|
D | vdso.S | 8 .align PAGE_SIZE 12 .align PAGE_SIZE /* extra data here leaks to userspace. */ 21 .zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
|
/arch/sh/kernel/ |
D | vmlinux.lds.S | 51 RO_DATA(PAGE_SIZE) 52 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 57 . = ALIGN(PAGE_SIZE); /* Init code and data */ 59 INIT_TEXT_SECTION(PAGE_SIZE) 78 . = ALIGN(PAGE_SIZE); 80 BSS_SECTION(0, PAGE_SIZE, 4)
|
/arch/x86/mm/kmemcheck/ |
D | shadow.c | 33 return page->shadow + (address & (PAGE_SIZE - 1)); in kmemcheck_shadow_lookup() 50 first_n = page + PAGE_SIZE - addr; in mark_shadow() 60 while (n >= PAGE_SIZE) { in mark_shadow() 63 memset(shadow, status, PAGE_SIZE); in mark_shadow() 65 addr += PAGE_SIZE; in mark_shadow() 66 n -= PAGE_SIZE; in mark_shadow() 107 kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); in kmemcheck_mark_unallocated_pages() 115 kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); in kmemcheck_mark_uninitialized_pages() 123 kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); in kmemcheck_mark_initialized_pages()
|
/arch/powerpc/kernel/ |
D | proc_powerpc.c | 43 new = PAGE_SIZE + off; in page_map_seek() 48 if ( new < 0 || new > PAGE_SIZE ) in page_map_seek() 57 PDE_DATA(file_inode(file)), PAGE_SIZE); in page_map_read() 62 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) in page_map_mmap() 67 PAGE_SIZE, vma->vm_page_prot); in page_map_mmap() 86 proc_set_size(pde, PAGE_SIZE); in proc_ppc64_init()
|
/arch/score/kernel/ |
D | vmlinux.lds.S | 56 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) 68 . = ALIGN(PAGE_SIZE); /* Init code and data */ 71 INIT_TEXT_SECTION(PAGE_SIZE) 83 . = ALIGN(PAGE_SIZE);
|
/arch/parisc/kernel/ |
D | vmlinux.lds.S | 84 . = ALIGN(PAGE_SIZE); 98 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) 112 . = ALIGN(PAGE_SIZE); 142 . = ALIGN(PAGE_SIZE); 155 . = ALIGN(PAGE_SIZE);
|
/arch/x86/include/asm/ |
D | page_64_types.h | 5 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 9 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 12 #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 15 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
|
/arch/alpha/mm/ |
D | init.c | 79 memset((void *) EMPTY_PGT, 0, PAGE_SIZE); in __bad_pagetable() 86 memset((void *) EMPTY_PGE, 0, PAGE_SIZE); in __bad_page() 108 memset(swapper_pg_dir, 0, PAGE_SIZE); in switch_to_system_map() 182 kernel_end = two_pages + 2*PAGE_SIZE; in callback_init() 183 memset(two_pages, 0, 2*PAGE_SIZE); in callback_init() 188 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); in callback_init() 203 vm_area_register_early(&console_remap_vm, PAGE_SIZE); in callback_init() 217 memset(kernel_end, 0, PAGE_SIZE); in callback_init() 220 kernel_end += PAGE_SIZE; in callback_init() 225 vaddr += PAGE_SIZE; in callback_init() [all …]
|
/arch/arc/include/uapi/asm/ |
D | page.h | 29 #define PAGE_SIZE (1 << PAGE_SHIFT) 32 #define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */ macro 36 #define PAGE_MASK (~(PAGE_SIZE-1))
|
/arch/mn10300/mm/ |
D | init.c | 61 for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE); in paging_init() 62 loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE); in paging_init() 66 ppte += PAGE_SIZE / sizeof(pte_t); in paging_init() 88 vm_area_register_early(&user_iomap_vm, PAGE_SIZE); in paging_init() 111 high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); in mem_init() 114 memset(empty_zero_page, 0, PAGE_SIZE); in mem_init()
|
/arch/frv/include/asm/ |
D | mem-layout.h | 27 #define PAGE_SIZE (1UL << PAGE_SHIFT) macro 29 #define PAGE_SIZE (1 << PAGE_SHIFT) macro 32 #define PAGE_MASK (~(PAGE_SIZE-1)) 68 #define BRK_BASE __UL(2 * 1024 * 1024 + PAGE_SIZE)
|