Lines Matching +full:hall +full:- +full:switch +full:-
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
46 return -1; in kmmio_fault()
53 * 32-bit mode:
58 * 64-bit mode:
72 switch (instr_hi) { in check_prefetch_opcode()
85 * In 64-bit mode 0x40..0x4F are valid REX prefixes in check_prefetch_opcode()
127 * not-present page (e.g. due to a race). No one has ever in is_prefetch()
173 * and redundant with the set_pmd() on non-PAE. As would in vmalloc_sync_one()
205 * where it synchronizes this update with the other page-tables in the
211 * which are not mapped in every page-table in the system, causing an
212 * unhandled page-fault when they are accessed.
222 return -1; in vmalloc_fault()
225 * Synchronize this task's top level page-table in vmalloc_fault()
229 * an interrupt in the middle of a task switch.. in vmalloc_fault()
234 return -1; in vmalloc_fault()
241 return -1; in vmalloc_fault()
261 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; in arch_sync_kernel_mappings()
281 if (!v8086_mode(regs) || !tsk->thread.vm86) in check_v8086_mode()
284 bit = (address - 0xA0000) >> PAGE_SHIFT; in check_v8086_mode()
286 tsk->thread.vm86->screen_bitmap |= 1 << bit; in check_v8086_mode()
321 * And let's rather not kmap-atomic the pte, just in case in dump_pagetable()
345 * No vm86 mode in 64-bit mode:
427 * Does nothing on 32-bit.
436 if (address != regs->ip) in is_errata93()
446 regs->ip = address; in is_errata93()
464 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) in is_errata100()
493 if (offset + sizeof(struct ldttss_desc) >= gdt->size) { in show_ldttss()
494 pr_alert("%s: 0x%hx -- out of bounds\n", name, index); in show_ldttss()
498 if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset), in show_ldttss()
500 pr_alert("%s: 0x%hx -- GDT entry is not readable\n", in show_ldttss()
509 pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n", in show_ldttss()
530 pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", in show_fault_oops()
552 pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code, in show_fault_oops()
553 !(error_code & X86_PF_PROT) ? "not-present page" : in show_fault_oops()
574 /* Usable even on Xen PV -- it's just slow. */ in show_fault_oops()
603 tsk->comm, address); in pgtable_bad()
619 * table layout, pretend that user-mode accesses to in set_signal_archinfo()
629 tsk->thread.trap_nr = X86_TRAP_PF; in set_signal_archinfo()
630 tsk->thread.error_code = error_code | X86_PF_USER; in set_signal_archinfo()
631 tsk->thread.cr2 = address; in set_signal_archinfo()
644 * This is an implicit supervisor-mode access from user in no_context()
645 * mode. Bypass all the kernel-mode recovery code and just in no_context()
667 if (current->thread.sig_on_uaccess_err && signal) { in no_context()
683 * stack in the direct map, but that's not an overflow -- check in no_context()
687 (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || in no_context()
688 address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { in no_context()
689 unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *); in no_context()
693 * double-fault even before we get this far, in which case in no_context()
694 * we're fine: the double-fault handler will deal with it. in no_context()
697 * and then double-fault, though, because we're likely to in no_context()
712 * 32-bit: in no_context()
718 * 64-bit: in no_context()
720 * Hall of shame of CPU/BIOS bugs. in no_context()
774 loglvl, tsk->comm, task_pid_nr(tsk), address, in show_signal_msg()
775 (void *)regs->ip, (void *)regs->sp, error_code); in show_signal_msg()
777 print_vma_addr(KERN_CONT " in ", regs->ip); in show_signal_msg()
786 * of the address space that has user-accessible permissions.
818 * layout, pretend that user-mode accesses to kernel addresses in __bad_area_nosemaphore()
856 struct mm_struct *mm = current->mm; in __bad_area()
937 /* User-space => ok to do another page fault: */ in do_sigbus()
950 tsk->comm, tsk->pid, address); in do_sigbus()
982 * oom-killed): in mm_fault_error()
1011 * permissions of a kernel page (RO -> RW or NX -> X). Doing it
1013 * cross-processor TLB flush, even if no stale TLB entries exist
1017 * fewer permission than the page table entry. Non-present (P = 0)
1023 * Returns non-zero if a spurious fault was handled, zero otherwise.
1106 * a follow-up action to resolve the fault, like a COW. in access_error()
1122 if (unlikely(!(vma->vm_flags & VM_WRITE))) in access_error()
1141 * On 64-bit systems, the vsyscall page is at an address above in fault_in_kernel_space()
1169 * We can fault-in kernel-space virtual memory on-demand. The in do_kern_addr_fault()
1177 * Before doing this on-demand faulting, ensure that the in do_kern_addr_fault()
1180 * 2. A fault caused by a user-mode access. (Do not demand- in do_kern_addr_fault()
1181 * fault kernel memory due to user-mode accesses). in do_kern_addr_fault()
1182 * 3. A fault caused by a page-level protection violation. in do_kern_addr_fault()
1183 * (A demand fault would be on a non-present page which in do_kern_addr_fault()
1186 * This is only needed to close a race condition on x86-32 in in do_kern_addr_fault()
1188 * vmalloc_fault() for details. On x86-64 the race does not in do_kern_addr_fault()
1231 mm = tsk->mm; in do_user_addr_fault()
1253 !(regs->flags & X86_EFLAGS_AC))) in do_user_addr_fault()
1272 * User-mode registers count as a user access even for any in do_user_addr_fault()
1279 if (regs->flags & X86_EFLAGS_IF) in do_user_addr_fault()
1309 * Kernel-mode access to the user address space should only occur in do_user_addr_fault()
1310 * on well-defined single instructions listed in the exception in do_user_addr_fault()
1321 if (!user_mode(regs) && !search_exception_tables(regs->ip)) { in do_user_addr_fault()
1345 if (likely(vma->vm_start <= address)) in do_user_addr_fault()
1347 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { in do_user_addr_fault()
1432 /* Was the fault on kernel-controlled part of the address space? */ in handle_page_fault()
1453 prefetchw(¤t->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()