1 /* 2 * highmem.h: virtual kernel memory mappings for high memory 3 * 4 * Used in CONFIG_HIGHMEM systems for memory pages which 5 * are not addressable by direct kernel virtual addresses. 6 * 7 * Copyright (C) 1999 Gerhard Wichert, Siemens AG 8 * Gerhard.Wichert@pdb.siemens.de 9 * 10 * 11 * Redesigned the x86 32-bit VM architecture to deal with 12 * up to 16 Terabyte physical memory. With current x86 CPUs 13 * we now support up to 64 Gigabytes physical RAM. 14 * 15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 16 */ 17 #ifndef _ASM_HIGHMEM_H 18 #define _ASM_HIGHMEM_H 19 20 #ifdef __KERNEL__ 21 22 #include <linux/init.h> 23 #include <linux/interrupt.h> 24 #include <linux/uaccess.h> 25 #include <asm/fixmap.h> 26 27 extern pte_t *kmap_pte; 28 extern pgprot_t kmap_prot; 29 extern pte_t *pkmap_page_table; 30 31 /* 32 * Right now we initialize only a single pte table. It can be extended 33 * easily, subsequent pte tables have to be allocated in one physical 34 * chunk of RAM. 35 */ 36 /* 37 * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte 38 * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP 39 * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP 40 * in case of 16K/64K/256K page sizes. 41 */ 42 43 #define PKMAP_ORDER PTE_SHIFT 44 #define LAST_PKMAP (1 << PKMAP_ORDER) 45 46 #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 47 & PMD_MASK) 48 49 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 50 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) 51 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 52 53 extern void *kmap_high(struct page *page); 54 extern void kunmap_high(struct page *page); 55 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); 56 extern void __kunmap_atomic(void *kvaddr); 57 kmap(struct page * page)58static inline void *kmap(struct page *page) 59 { 60 might_sleep(); 61 if (!PageHighMem(page)) 62 return page_address(page); 63 return kmap_high(page); 64 } 65 kunmap(struct page * page)66static inline void kunmap(struct page *page) 67 { 68 BUG_ON(in_interrupt()); 69 if (!PageHighMem(page)) 70 return; 71 kunmap_high(page); 72 } 73 __kmap_atomic(struct page * page)74static inline void *__kmap_atomic(struct page *page) 75 { 76 return kmap_atomic_prot(page, kmap_prot); 77 } 78 kmap_atomic_to_page(void * ptr)79static inline struct page *kmap_atomic_to_page(void *ptr) 80 { 81 unsigned long idx, vaddr = (unsigned long) ptr; 82 pte_t *pte; 83 84 if (vaddr < FIXADDR_START) 85 return virt_to_page(ptr); 86 87 idx = virt_to_fix(vaddr); 88 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 89 return pte_page(*pte); 90 } 91 92 #define flush_cache_kmaps() { flush_icache(); flush_dcache(); } 93 94 #endif /* __KERNEL__ */ 95 96 #endif /* _ASM_HIGHMEM_H */ 97