Lines Matching +full:light +full:- +full:weight
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
11 #include <linux/dma-map-ops.h>
64 if (test_bit(PG_arch_1, &page->flags)) in __ia64_sync_icache_dcache()
65 return; /* i-cache is already coherent with d-cache */ in __ia64_sync_icache_dcache()
68 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ in __ia64_sync_icache_dcache()
72 * Since DMA is i-cache coherent, any (complete) pages that were written via
74 * flush them when they get mapped into an executable vm-area.
81 set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); in arch_dma_mark_clean()
82 } while (++pfn <= PHYS_PFN(paddr + size - 1)); in arch_dma_mark_clean()
88 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; in ia64_set_rbs_bot()
92 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); in ia64_set_rbs_bot()
96 * This performs some platform-dependent address space initialization.
97 * On IA-64, we want to setup the VM area for the register backing
113 vma = vm_area_alloc(current->mm); in ia64_init_addr_space()
116 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; in ia64_init_addr_space()
117 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_init_addr_space()
118 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; in ia64_init_addr_space()
119 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in ia64_init_addr_space()
120 mmap_write_lock(current->mm); in ia64_init_addr_space()
121 if (insert_vm_struct(current->mm, vma)) { in ia64_init_addr_space()
122 mmap_write_unlock(current->mm); in ia64_init_addr_space()
126 mmap_write_unlock(current->mm); in ia64_init_addr_space()
129 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ in ia64_init_addr_space()
130 if (!(current->personality & MMAP_PAGE_ZERO)) { in ia64_init_addr_space()
131 vma = vm_area_alloc(current->mm); in ia64_init_addr_space()
134 vma->vm_end = PAGE_SIZE; in ia64_init_addr_space()
135 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); in ia64_init_addr_space()
136 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | in ia64_init_addr_space()
138 mmap_write_lock(current->mm); in ia64_init_addr_space()
139 if (insert_vm_struct(current->mm, vma)) { in ia64_init_addr_space()
140 mmap_write_unlock(current->mm); in ia64_init_addr_space()
144 mmap_write_unlock(current->mm); in ia64_init_addr_space()
153 -1, "unused kernel"); in free_initmem()
166 * - align up the beginning of initrd in free_initrd_mem()
167 * - align down the end of initrd in free_initrd_mem()
194 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); in free_initrd_mem()
245 * Map the gate page twice: once read-only to export the ELF in setup_gate()
246 * headers etc. and once execute-only page to enable in setup_gate()
247 * privilege-promotion via "epc": in setup_gate()
256 /* Fill in the holes (if any) with read-only zero pages: */ in setup_gate()
318 * address space. The IA-64 architecture guarantees that at least 50 bits of in ia64_mmu_init()
325 * --davidm 00/12/06 in ia64_mmu_init()
328 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) in ia64_mmu_init()
333 * non-speculative accesses to the virtual page table, so the address range of the in ia64_mmu_init()
336 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) in ia64_mmu_init()
339 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); in ia64_mmu_init()
342 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); in ia64_mmu_init()
344 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, in ia64_mmu_init()
345 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of in ia64_mmu_init()
349 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || in ia64_mmu_init()
350 (mapped_space_bits > impl_va_bits - 1)) in ia64_mmu_init()
351 panic("Cannot build a big enough virtual-linear page table" in ia64_mmu_init()
356 /* place the VMLPT at the end of each page-table mapped region: */ in ia64_mmu_init()
357 pta = POW2(61) - POW2(vmlpt_bits); in ia64_mmu_init()
361 * 8 selects between the short and long format, bits 2-7 the in ia64_mmu_init()
382 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; in vmemmap_find_next_valid_pfn()
432 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; in vmemmap_find_next_valid_pfn()
434 return hole_next_pfn - pgdat->node_start_pfn; in vmemmap_find_next_valid_pfn()
503 return -ENOMEM; in create_mem_map_page_table()
523 if (map_start < args->start) in virtual_memmap_init()
524 map_start = args->start; in virtual_memmap_init()
525 if (map_end > args->end) in virtual_memmap_init()
526 map_end = args->end; in virtual_memmap_init()
533 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); in virtual_memmap_init()
534 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) in virtual_memmap_init()
538 memmap_init_zone((unsigned long)(map_end - map_start), in virtual_memmap_init()
539 args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), in virtual_memmap_init()
576 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) in ia64_pfn_valid()
577 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); in ia64_pfn_valid()
589 if (*max_gap < (start - last_end)) in find_largest_hole()
590 *max_gap = start - last_end; in find_largest_hole()
609 memblock_add_node(__pa(start), end - start, nid); in register_active_ranges()
619 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; in find_max_min_low_pfn()
622 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; in find_max_min_low_pfn()
630 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
632 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
680 * For fsyscall entrpoints with no light-weight handler, use the ordinary in mem_init()
681 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry in mem_init()
702 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) in arch_add_memory()
703 return -EINVAL; in arch_add_memory()