• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/highmem.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/spinlock.h>
24 #include <linux/cpumask.h>
25 #include <linux/module.h>
26 #include <linux/io.h>
27 #include <linux/vmalloc.h>
28 #include <linux/smp.h>
29 
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
33 #include <asm/tlb.h>
34 #include <asm/tlbflush.h>
35 #include <asm/homecache.h>
36 
37 #define K(x) ((x) << (PAGE_SHIFT-10))
38 
39 /*
40  * The normal show_free_areas() is too verbose on Tile, with dozens
41  * of processors and often four NUMA zones each with high and lowmem.
42  */
show_mem(unsigned int filter)43 void show_mem(unsigned int filter)
44 {
45 	struct zone *zone;
46 
47 	pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
48 	       " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
49 	       " pagecache:%lu swap:%lu\n",
50 	       (global_page_state(NR_ACTIVE_ANON) +
51 		global_page_state(NR_ACTIVE_FILE)),
52 	       (global_page_state(NR_INACTIVE_ANON) +
53 		global_page_state(NR_INACTIVE_FILE)),
54 	       global_page_state(NR_FILE_DIRTY),
55 	       global_page_state(NR_WRITEBACK),
56 	       global_page_state(NR_UNSTABLE_NFS),
57 	       global_page_state(NR_FREE_PAGES),
58 	       (global_page_state(NR_SLAB_RECLAIMABLE) +
59 		global_page_state(NR_SLAB_UNRECLAIMABLE)),
60 	       global_page_state(NR_FILE_MAPPED),
61 	       global_page_state(NR_PAGETABLE),
62 	       global_page_state(NR_BOUNCE),
63 	       global_page_state(NR_FILE_PAGES),
64 	       nr_swap_pages);
65 
66 	for_each_zone(zone) {
67 		unsigned long flags, order, total = 0, largest_order = -1;
68 
69 		if (!populated_zone(zone))
70 			continue;
71 
72 		spin_lock_irqsave(&zone->lock, flags);
73 		for (order = 0; order < MAX_ORDER; order++) {
74 			int nr = zone->free_area[order].nr_free;
75 			total += nr << order;
76 			if (nr)
77 				largest_order = order;
78 		}
79 		spin_unlock_irqrestore(&zone->lock, flags);
80 		pr_err("Node %d %7s: %lukB (largest %luKb)\n",
81 		       zone_to_nid(zone), zone->name,
82 		       K(total), largest_order ? K(1UL) << largest_order : 0);
83 	}
84 }
85 
86 /*
87  * Associate a virtual page frame with a given physical page frame
88  * and protection flags for that frame.
89  */
set_pte_pfn(unsigned long vaddr,unsigned long pfn,pgprot_t flags)90 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
91 {
92 	pgd_t *pgd;
93 	pud_t *pud;
94 	pmd_t *pmd;
95 	pte_t *pte;
96 
97 	pgd = swapper_pg_dir + pgd_index(vaddr);
98 	if (pgd_none(*pgd)) {
99 		BUG();
100 		return;
101 	}
102 	pud = pud_offset(pgd, vaddr);
103 	if (pud_none(*pud)) {
104 		BUG();
105 		return;
106 	}
107 	pmd = pmd_offset(pud, vaddr);
108 	if (pmd_none(*pmd)) {
109 		BUG();
110 		return;
111 	}
112 	pte = pte_offset_kernel(pmd, vaddr);
113 	/* <pfn,flags> stored as-is, to permit clearing entries */
114 	set_pte(pte, pfn_pte(pfn, flags));
115 
116 	/*
117 	 * It's enough to flush this one mapping.
118 	 * This appears conservative since it is only called
119 	 * from __set_fixmap.
120 	 */
121 	local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
122 }
123 
__set_fixmap(enum fixed_addresses idx,unsigned long phys,pgprot_t flags)124 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
125 {
126 	unsigned long address = __fix_to_virt(idx);
127 
128 	if (idx >= __end_of_fixed_addresses) {
129 		BUG();
130 		return;
131 	}
132 	set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
133 }
134 
135 #if defined(CONFIG_HIGHPTE)
_pte_offset_map(pmd_t * dir,unsigned long address)136 pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
137 {
138 	pte_t *pte = kmap_atomic(pmd_page(*dir)) +
139 		(pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
140 	return &pte[pte_index(address)];
141 }
142 #endif
143 
144 /**
145  * shatter_huge_page() - ensure a given address is mapped by a small page.
146  *
147  * This function converts a huge PTE mapping kernel LOWMEM into a bunch
148  * of small PTEs with the same caching.  No cache flush required, but we
149  * must do a global TLB flush.
150  *
151  * Any caller that wishes to modify a kernel mapping that might
152  * have been made with a huge page should call this function,
153  * since doing so properly avoids race conditions with installing the
154  * newly-shattered page and then flushing all the TLB entries.
155  *
156  * @addr: Address at which to shatter any existing huge page.
157  */
shatter_huge_page(unsigned long addr)158 void shatter_huge_page(unsigned long addr)
159 {
160 	pgd_t *pgd;
161 	pud_t *pud;
162 	pmd_t *pmd;
163 	unsigned long flags = 0;  /* happy compiler */
164 #ifdef __PAGETABLE_PMD_FOLDED
165 	struct list_head *pos;
166 #endif
167 
168 	/* Get a pointer to the pmd entry that we need to change. */
169 	addr &= HPAGE_MASK;
170 	BUG_ON(pgd_addr_invalid(addr));
171 	BUG_ON(addr < PAGE_OFFSET);  /* only for kernel LOWMEM */
172 	pgd = swapper_pg_dir + pgd_index(addr);
173 	pud = pud_offset(pgd, addr);
174 	BUG_ON(!pud_present(*pud));
175 	pmd = pmd_offset(pud, addr);
176 	BUG_ON(!pmd_present(*pmd));
177 	if (!pmd_huge_page(*pmd))
178 		return;
179 
180 	spin_lock_irqsave(&init_mm.page_table_lock, flags);
181 	if (!pmd_huge_page(*pmd)) {
182 		/* Lost the race to convert the huge page. */
183 		spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
184 		return;
185 	}
186 
187 	/* Shatter the huge page into the preallocated L2 page table. */
188 	pmd_populate_kernel(&init_mm, pmd,
189 			    get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
190 
191 #ifdef __PAGETABLE_PMD_FOLDED
192 	/* Walk every pgd on the system and update the pmd there. */
193 	spin_lock(&pgd_lock);
194 	list_for_each(pos, &pgd_list) {
195 		pmd_t *copy_pmd;
196 		pgd = list_to_pgd(pos) + pgd_index(addr);
197 		pud = pud_offset(pgd, addr);
198 		copy_pmd = pmd_offset(pud, addr);
199 		__set_pmd(copy_pmd, *pmd);
200 	}
201 	spin_unlock(&pgd_lock);
202 #endif
203 
204 	/* Tell every cpu to notice the change. */
205 	flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
206 		     cpu_possible_mask, NULL, 0);
207 
208 	/* Hold the lock until the TLB flush is finished to avoid races. */
209 	spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
210 }
211 
212 /*
213  * List of all pgd's needed so it can invalidate entries in both cached
214  * and uncached pgd's. This is essentially codepath-based locking
215  * against pageattr.c; it is the unique case in which a valid change
216  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
217  * vmalloc faults work because attached pagetables are never freed.
218  *
219  * The lock is always taken with interrupts disabled, unlike on x86
220  * and other platforms, because we need to take the lock in
221  * shatter_huge_page(), which may be called from an interrupt context.
222  * We are not at risk from the tlbflush IPI deadlock that was seen on
223  * x86, since we use the flush_remote() API to have the hypervisor do
224  * the TLB flushes regardless of irq disabling.
225  */
226 DEFINE_SPINLOCK(pgd_lock);
227 LIST_HEAD(pgd_list);
228 
pgd_list_add(pgd_t * pgd)229 static inline void pgd_list_add(pgd_t *pgd)
230 {
231 	list_add(pgd_to_list(pgd), &pgd_list);
232 }
233 
pgd_list_del(pgd_t * pgd)234 static inline void pgd_list_del(pgd_t *pgd)
235 {
236 	list_del(pgd_to_list(pgd));
237 }
238 
239 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
240 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
241 
pgd_ctor(pgd_t * pgd)242 static void pgd_ctor(pgd_t *pgd)
243 {
244 	unsigned long flags;
245 
246 	memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
247 	spin_lock_irqsave(&pgd_lock, flags);
248 
249 #ifndef __tilegx__
250 	/*
251 	 * Check that the user interrupt vector has no L2.
252 	 * It never should for the swapper, and new page tables
253 	 * should always start with an empty user interrupt vector.
254 	 */
255 	BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
256 #endif
257 
258 	memcpy(pgd + KERNEL_PGD_INDEX_START,
259 	       swapper_pg_dir + KERNEL_PGD_INDEX_START,
260 	       KERNEL_PGD_PTRS * sizeof(pgd_t));
261 
262 	pgd_list_add(pgd);
263 	spin_unlock_irqrestore(&pgd_lock, flags);
264 }
265 
pgd_dtor(pgd_t * pgd)266 static void pgd_dtor(pgd_t *pgd)
267 {
268 	unsigned long flags; /* can be called from interrupt context */
269 
270 	spin_lock_irqsave(&pgd_lock, flags);
271 	pgd_list_del(pgd);
272 	spin_unlock_irqrestore(&pgd_lock, flags);
273 }
274 
pgd_alloc(struct mm_struct * mm)275 pgd_t *pgd_alloc(struct mm_struct *mm)
276 {
277 	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
278 	if (pgd)
279 		pgd_ctor(pgd);
280 	return pgd;
281 }
282 
pgd_free(struct mm_struct * mm,pgd_t * pgd)283 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
284 {
285 	pgd_dtor(pgd);
286 	kmem_cache_free(pgd_cache, pgd);
287 }
288 
289 
290 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
291 
pte_alloc_one(struct mm_struct * mm,unsigned long address)292 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
293 {
294 	gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
295 	struct page *p;
296 #if L2_USER_PGTABLE_ORDER > 0
297 	int i;
298 #endif
299 
300 #ifdef CONFIG_HIGHPTE
301 	flags |= __GFP_HIGHMEM;
302 #endif
303 
304 	p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
305 	if (p == NULL)
306 		return NULL;
307 
308 #if L2_USER_PGTABLE_ORDER > 0
309 	/*
310 	 * Make every page have a page_count() of one, not just the first.
311 	 * We don't use __GFP_COMP since it doesn't look like it works
312 	 * correctly with tlb_remove_page().
313 	 */
314 	for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
315 		init_page_count(p+i);
316 		inc_zone_page_state(p+i, NR_PAGETABLE);
317 	}
318 #endif
319 
320 	pgtable_page_ctor(p);
321 	return p;
322 }
323 
324 /*
325  * Free page immediately (used in __pte_alloc if we raced with another
326  * process).  We have to correct whatever pte_alloc_one() did before
327  * returning the pages to the allocator.
328  */
pte_free(struct mm_struct * mm,struct page * p)329 void pte_free(struct mm_struct *mm, struct page *p)
330 {
331 	int i;
332 
333 	pgtable_page_dtor(p);
334 	__free_page(p);
335 
336 	for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
337 		__free_page(p+i);
338 		dec_zone_page_state(p+i, NR_PAGETABLE);
339 	}
340 }
341 
__pte_free_tlb(struct mmu_gather * tlb,struct page * pte,unsigned long address)342 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
343 		    unsigned long address)
344 {
345 	int i;
346 
347 	pgtable_page_dtor(pte);
348 	tlb_remove_page(tlb, pte);
349 
350 	for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
351 		tlb_remove_page(tlb, pte + i);
352 		dec_zone_page_state(pte + i, NR_PAGETABLE);
353 	}
354 }
355 
356 #ifndef __tilegx__
357 
358 /*
359  * FIXME: needs to be atomic vs hypervisor writes.  For now we make the
360  * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
361  */
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)362 int ptep_test_and_clear_young(struct vm_area_struct *vma,
363 			      unsigned long addr, pte_t *ptep)
364 {
365 #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
366 # error Code assumes HV_PTE "accessed" bit in second byte
367 #endif
368 	u8 *tmp = (u8 *)ptep;
369 	u8 second_byte = tmp[1];
370 	if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
371 		return 0;
372 	tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
373 	return 1;
374 }
375 
376 /*
377  * This implementation is atomic vs hypervisor writes, since the hypervisor
378  * always writes the low word (where "accessed" and "dirty" are) and this
379  * routine only writes the high word.
380  */
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)381 void ptep_set_wrprotect(struct mm_struct *mm,
382 			unsigned long addr, pte_t *ptep)
383 {
384 #if HV_PTE_INDEX_WRITABLE < 32
385 # error Code assumes HV_PTE "writable" bit in high word
386 #endif
387 	u32 *tmp = (u32 *)ptep;
388 	tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
389 }
390 
391 #endif
392 
virt_to_pte(struct mm_struct * mm,unsigned long addr)393 pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
394 {
395 	pgd_t *pgd;
396 	pud_t *pud;
397 	pmd_t *pmd;
398 
399 	if (pgd_addr_invalid(addr))
400 		return NULL;
401 
402 	pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
403 	pud = pud_offset(pgd, addr);
404 	if (!pud_present(*pud))
405 		return NULL;
406 	pmd = pmd_offset(pud, addr);
407 	if (pmd_huge_page(*pmd))
408 		return (pte_t *)pmd;
409 	if (!pmd_present(*pmd))
410 		return NULL;
411 	return pte_offset_kernel(pmd, addr);
412 }
413 
set_remote_cache_cpu(pgprot_t prot,int cpu)414 pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
415 {
416 	unsigned int width = smp_width;
417 	int x = cpu % width;
418 	int y = cpu / width;
419 	BUG_ON(y >= smp_height);
420 	BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
421 	BUG_ON(cpu < 0 || cpu >= NR_CPUS);
422 	BUG_ON(!cpu_is_valid_lotar(cpu));
423 	return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
424 }
425 
get_remote_cache_cpu(pgprot_t prot)426 int get_remote_cache_cpu(pgprot_t prot)
427 {
428 	HV_LOTAR lotar = hv_pte_get_lotar(prot);
429 	int x = HV_LOTAR_X(lotar);
430 	int y = HV_LOTAR_Y(lotar);
431 	BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
432 	return x + y * smp_width;
433 }
434 
435 /*
436  * Convert a kernel VA to a PA and homing information.
437  */
va_to_cpa_and_pte(void * va,unsigned long long * cpa,pte_t * pte)438 int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
439 {
440 	struct page *page = virt_to_page(va);
441 	pte_t null_pte = { 0 };
442 
443 	*cpa = __pa(va);
444 
445 	/* Note that this is not writing a page table, just returning a pte. */
446 	*pte = pte_set_home(null_pte, page_home(page));
447 
448 	return 0; /* return non-zero if not hfh? */
449 }
450 EXPORT_SYMBOL(va_to_cpa_and_pte);
451 
__set_pte(pte_t * ptep,pte_t pte)452 void __set_pte(pte_t *ptep, pte_t pte)
453 {
454 #ifdef __tilegx__
455 	*ptep = pte;
456 #else
457 # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
458 #  error Must write the present and migrating bits last
459 # endif
460 	if (pte_present(pte)) {
461 		((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
462 		barrier();
463 		((u32 *)ptep)[0] = (u32)(pte_val(pte));
464 	} else {
465 		((u32 *)ptep)[0] = (u32)(pte_val(pte));
466 		barrier();
467 		((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
468 	}
469 #endif /* __tilegx__ */
470 }
471 
set_pte(pte_t * ptep,pte_t pte)472 void set_pte(pte_t *ptep, pte_t pte)
473 {
474 	if (pte_present(pte) &&
475 	    (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
476 		/* The PTE actually references physical memory. */
477 		unsigned long pfn = pte_pfn(pte);
478 		if (pfn_valid(pfn)) {
479 			/* Update the home of the PTE from the struct page. */
480 			pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
481 		} else if (hv_pte_get_mode(pte) == 0) {
482 			/* remap_pfn_range(), etc, must supply PTE mode. */
483 			panic("set_pte(): out-of-range PFN and mode 0\n");
484 		}
485 	}
486 
487 	__set_pte(ptep, pte);
488 }
489 
490 /* Can this mm load a PTE with cached_priority set? */
mm_is_priority_cached(struct mm_struct * mm)491 static inline int mm_is_priority_cached(struct mm_struct *mm)
492 {
493 	return mm->context.priority_cached;
494 }
495 
496 /*
497  * Add a priority mapping to an mm_context and
498  * notify the hypervisor if this is the first one.
499  */
start_mm_caching(struct mm_struct * mm)500 void start_mm_caching(struct mm_struct *mm)
501 {
502 	if (!mm_is_priority_cached(mm)) {
503 		mm->context.priority_cached = -1U;
504 		hv_set_caching(-1U);
505 	}
506 }
507 
508 /*
509  * Validate and return the priority_cached flag.  We know if it's zero
510  * that we don't need to scan, since we immediately set it non-zero
511  * when we first consider a MAP_CACHE_PRIORITY mapping.
512  *
513  * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
514  * since we're in an interrupt context (servicing switch_mm) we don't
515  * worry about it and don't unset the "priority_cached" field.
516  * Presumably we'll come back later and have more luck and clear
517  * the value then; for now we'll just keep the cache marked for priority.
518  */
update_priority_cached(struct mm_struct * mm)519 static unsigned int update_priority_cached(struct mm_struct *mm)
520 {
521 	if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
522 		struct vm_area_struct *vm;
523 		for (vm = mm->mmap; vm; vm = vm->vm_next) {
524 			if (hv_pte_get_cached_priority(vm->vm_page_prot))
525 				break;
526 		}
527 		if (vm == NULL)
528 			mm->context.priority_cached = 0;
529 		up_write(&mm->mmap_sem);
530 	}
531 	return mm->context.priority_cached;
532 }
533 
534 /* Set caching correctly for an mm that we are switching to. */
check_mm_caching(struct mm_struct * prev,struct mm_struct * next)535 void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
536 {
537 	if (!mm_is_priority_cached(next)) {
538 		/*
539 		 * If the new mm doesn't use priority caching, just see if we
540 		 * need the hv_set_caching(), or can assume it's already zero.
541 		 */
542 		if (mm_is_priority_cached(prev))
543 			hv_set_caching(0);
544 	} else {
545 		hv_set_caching(update_priority_cached(next));
546 	}
547 }
548 
549 #if CHIP_HAS_MMIO()
550 
551 /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
ioremap_prot(resource_size_t phys_addr,unsigned long size,pgprot_t home)552 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
553 			   pgprot_t home)
554 {
555 	void *addr;
556 	struct vm_struct *area;
557 	unsigned long offset, last_addr;
558 	pgprot_t pgprot;
559 
560 	/* Don't allow wraparound or zero size */
561 	last_addr = phys_addr + size - 1;
562 	if (!size || last_addr < phys_addr)
563 		return NULL;
564 
565 	/* Create a read/write, MMIO VA mapping homed at the requested shim. */
566 	pgprot = PAGE_KERNEL;
567 	pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
568 	pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
569 
570 	/*
571 	 * Mappings have to be page-aligned
572 	 */
573 	offset = phys_addr & ~PAGE_MASK;
574 	phys_addr &= PAGE_MASK;
575 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
576 
577 	/*
578 	 * Ok, go for it..
579 	 */
580 	area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
581 	if (!area)
582 		return NULL;
583 	area->phys_addr = phys_addr;
584 	addr = area->addr;
585 	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
586 			       phys_addr, pgprot)) {
587 		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
588 		return NULL;
589 	}
590 	return (__force void __iomem *) (offset + (char *)addr);
591 }
592 EXPORT_SYMBOL(ioremap_prot);
593 
594 /* Map a PCI MMIO bus address into VA space. */
ioremap(resource_size_t phys_addr,unsigned long size)595 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
596 {
597 	panic("ioremap for PCI MMIO is not supported");
598 }
599 EXPORT_SYMBOL(ioremap);
600 
601 /* Unmap an MMIO VA mapping. */
iounmap(volatile void __iomem * addr_in)602 void iounmap(volatile void __iomem *addr_in)
603 {
604 	volatile void __iomem *addr = (volatile void __iomem *)
605 		(PAGE_MASK & (unsigned long __force)addr_in);
606 #if 1
607 	vunmap((void * __force)addr);
608 #else
609 	/* x86 uses this complicated flow instead of vunmap().  Is
610 	 * there any particular reason we should do the same? */
611 	struct vm_struct *p, *o;
612 
613 	/* Use the vm area unlocked, assuming the caller
614 	   ensures there isn't another iounmap for the same address
615 	   in parallel. Reuse of the virtual address is prevented by
616 	   leaving it in the global lists until we're done with it.
617 	   cpa takes care of the direct mappings. */
618 	read_lock(&vmlist_lock);
619 	for (p = vmlist; p; p = p->next) {
620 		if (p->addr == addr)
621 			break;
622 	}
623 	read_unlock(&vmlist_lock);
624 
625 	if (!p) {
626 		pr_err("iounmap: bad address %p\n", addr);
627 		dump_stack();
628 		return;
629 	}
630 
631 	/* Finally remove it */
632 	o = remove_vm_area((void *)addr);
633 	BUG_ON(p != o || o == NULL);
634 	kfree(p);
635 #endif
636 }
637 EXPORT_SYMBOL(iounmap);
638 
639 #endif /* CHIP_HAS_MMIO() */
640