Lines Matching +full:no +full:- +full:memory +full:- +full:wc
84 * memory type of pages that have backing page struct.
86 * X86 PAT supports 4 different memory types:
87 * - _PAGE_CACHE_MODE_WB
88 * - _PAGE_CACHE_MODE_WC
89 * - _PAGE_CACHE_MODE_UC_MINUS
90 * - _PAGE_CACHE_MODE_WT
104 unsigned long pg_flags = pg->flags & _PGMT_MASK; in get_page_memtype()
140 old_flags = pg->flags; in set_page_memtype()
142 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); in set_page_memtype()
147 return -1; in get_page_memtype()
173 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; in pat_get_cache_mode()
177 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; in pat_get_cache_mode()
200 for (i = 7; i >= 0; i--) { in __init_cache_modes()
205 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); in __init_cache_modes()
238 * PAT on the boot CPU. We have no way to undo PAT. in pat_ap_init()
261 * as No PAT. in init_cache_modes()
268 * No PAT. Emulate the PAT table that corresponds to the two in init_cache_modes()
279 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS in init_cache_modes()
282 * NOTE: When WC or WP is used, it is redirected to UC- per in init_cache_modes()
293 * pat_init - Initialize PAT MSR and PAT table
295 * This function initializes PAT MSR and PAT table with an OS-defined value
296 * to enable additional cache attributes, WC, WT and WP.
310 if ((c->x86_vendor == X86_VENDOR_INTEL) && in pat_init()
311 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || in pat_init()
312 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { in pat_init()
325 * 001 1 WC : _PAGE_CACHE_MODE_WC in pat_init()
326 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS in pat_init()
330 * NOTE: When WT or WP is used, it is redirected to UC- per in pat_init()
333 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | in pat_init()
334 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); in pat_init()
351 * 001 1 WC : _PAGE_CACHE_MODE_WC in pat_init()
352 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS in pat_init()
356 * 110 6 UC-: Reserved in pat_init()
362 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | in pat_init()
379 * Does intersection of PAT memory type and MTRR memory type and returns
380 * the resulting memory type as PAT understands it.
382 * The intersection is based on "Effective Memory Type" tables in IA-32
416 state->not_ram |= initial_pfn > state->cur_pfn; in pagerange_is_ram_callback()
417 state->ram |= total_nr_pages > 0; in pagerange_is_ram_callback()
418 state->cur_pfn = initial_pfn + total_nr_pages; in pagerange_is_ram_callback()
420 return state->ram && state->not_ram; in pagerange_is_ram_callback()
427 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; in pat_pagerange_is_ram()
432 * region is tracked as non-RAM. This will allow users of in pat_pagerange_is_ram()
441 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, in pat_pagerange_is_ram()
445 return (ret > 0) ? -1 : (state.ram ? 1 : 0); in pat_pagerange_is_ram()
450 * The page flags are limited to four types, WB (default), WC, WT and UC-.
451 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
452 * a new memory type is only allowed for a page mapped with the default WB
456 * - Find the memtype of all the pages in the range, look for any conflicts.
457 * - In case of no conflicts, set the new memtype for pages in the range.
469 return -EINVAL; in reserve_ram_pages_type()
484 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", in reserve_ram_pages_type()
485 start, end - 1, type, req_type); in reserve_ram_pages_type()
489 return -EBUSY; in reserve_ram_pages_type()
523 * Decoy addresses are not present for 32-bit builds, see in sanitize_phys()
533 * - _PAGE_CACHE_MODE_WB
534 * - _PAGE_CACHE_MODE_WC
535 * - _PAGE_CACHE_MODE_UC_MINUS
536 * - _PAGE_CACHE_MODE_UC
537 * - _PAGE_CACHE_MODE_WT
540 * region with req_type. If new_type is non-NULL, function will return
541 * available type in new_type in case of no error. In case of any error
555 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, in reserve_memtype()
556 start, end - 1, cattr_name(req_type)); in reserve_memtype()
557 return -EINVAL; in reserve_memtype()
567 /* Low ISA region is always mapped WB in page table. No need to track */ in reserve_memtype()
576 * optimization for /dev/mem mmap'ers into WB memory (BIOS in reserve_memtype()
577 * tools and ACPI tools). Use WB request for WB memory and use in reserve_memtype()
592 return -EINVAL; in reserve_memtype()
597 return -ENOMEM; in reserve_memtype()
599 new->start = start; in reserve_memtype()
600 new->end = end; in reserve_memtype()
601 new->type = actual_type; in reserve_memtype()
607 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", in reserve_memtype()
608 start, end - 1, in reserve_memtype()
609 cattr_name(new->type), cattr_name(req_type)); in reserve_memtype()
618 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", in reserve_memtype()
619 start, end - 1, cattr_name(new->type), cattr_name(req_type), in reserve_memtype()
620 new_type ? cattr_name(*new_type) : "-"); in reserve_memtype()
627 int err = -EINVAL; in free_memtype()
637 /* Low ISA region is always mapped WB. No need to track */ in free_memtype()
648 return -EINVAL; in free_memtype()
656 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", in free_memtype()
657 current->comm, current->pid, start, end - 1); in free_memtype()
658 return -EINVAL; in free_memtype()
663 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); in free_memtype()
670 * lookup_memtype - Looksup the memory type for a physical address
671 * @paddr: physical address of which memory type needs to be looked up
697 rettype = entry->type; in lookup_memtype()
706 * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
707 * of @pfn cannot be overridden by UC MTRR memory type.
711 * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
725 * io_reserve_memtype - Request a memory type mapping for a region of memory
732 * On failure, returns non-zero
737 resource_size_t size = end - start; in io_reserve_memtype()
759 ret = -EBUSY; in io_reserve_memtype()
765 * io_free_memtype - Release a memory type mapping for a region of memory
832 if (file->f_flags & O_DSYNC) in phys_mem_access_prot_allowed()
841 * Change the memory type for the physial address range in kernel identity
849 if (base > __pa(high_memory-1)) in kernel_map_sync_memtype()
859 id_sz = (__pa(high_memory-1) <= base + size) ? in kernel_map_sync_memtype()
860 __pa(high_memory) - base : in kernel_map_sync_memtype()
864 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", in kernel_map_sync_memtype()
865 current->comm, current->pid, in kernel_map_sync_memtype()
867 base, (unsigned long long)(base + size-1)); in kernel_map_sync_memtype()
868 return -EINVAL; in kernel_map_sync_memtype()
874 * Internal interface to reserve a range of physical memory with prot.
899 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", in reserve_pfn_range()
900 current->comm, current->pid, in reserve_pfn_range()
903 (unsigned long long)(paddr + size - 1), in reserve_pfn_range()
920 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", in reserve_pfn_range()
921 current->comm, current->pid, in reserve_pfn_range()
924 (unsigned long long)(paddr + size - 1), in reserve_pfn_range()
926 return -EINVAL; in reserve_pfn_range()
939 return -EINVAL; in reserve_pfn_range()
945 * Internal interface to free a range of physical memory.
968 unsigned long vma_size = vma->vm_end - vma->vm_start; in track_pfn_copy()
971 if (vma->vm_flags & VM_PAT) { in track_pfn_copy()
976 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { in track_pfn_copy()
978 return -EINVAL; in track_pfn_copy()
989 * a linear pfn mapping for the entire range, or no vma is provided,
1000 if (!vma || (addr == vma->vm_start in track_pfn_remap()
1001 && size == (vma->vm_end - vma->vm_start))) { in track_pfn_remap()
1006 vma->vm_flags |= VM_PAT; in track_pfn_remap()
1021 size -= PAGE_SIZE; in track_pfn_remap()
1024 return -EINVAL; in track_pfn_remap()
1057 if (vma && !(vma->vm_flags & VM_PAT)) in untrack_pfn()
1063 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { in untrack_pfn()
1068 size = vma->vm_end - vma->vm_start; in untrack_pfn()
1072 vma->vm_flags &= ~VM_PAT; in untrack_pfn()
1082 vma->vm_flags &= ~VM_PAT; in untrack_pfn_moved()
1146 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), in memtype_seq_show()
1147 print_entry->start, print_entry->end); in memtype_seq_show()