Lines Matching refs:pt
273 static void psb_mmu_free_pt(struct psb_mmu_pt *pt) in psb_mmu_free_pt() argument
275 __free_page(pt->p); in psb_mmu_free_pt()
276 kfree(pt); in psb_mmu_free_pt()
282 struct psb_mmu_pt *pt; in psb_mmu_free_pagedir() local
293 pt = pd->tables[i]; in psb_mmu_free_pagedir()
294 if (pt) in psb_mmu_free_pagedir()
295 psb_mmu_free_pt(pt); in psb_mmu_free_pagedir()
308 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); in psb_mmu_alloc_pt() local
317 if (!pt) in psb_mmu_alloc_pt()
320 pt->p = alloc_page(GFP_DMA32); in psb_mmu_alloc_pt()
321 if (!pt->p) { in psb_mmu_alloc_pt()
322 kfree(pt); in psb_mmu_alloc_pt()
328 v = kmap_atomic(pt->p); in psb_mmu_alloc_pt()
347 pt->count = 0; in psb_mmu_alloc_pt()
348 pt->pd = pd; in psb_mmu_alloc_pt()
349 pt->index = 0; in psb_mmu_alloc_pt()
351 return pt; in psb_mmu_alloc_pt()
358 struct psb_mmu_pt *pt; in psb_mmu_pt_alloc_map_lock() local
363 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
364 while (!pt) { in psb_mmu_pt_alloc_map_lock()
366 pt = psb_mmu_alloc_pt(pd); in psb_mmu_pt_alloc_map_lock()
367 if (!pt) in psb_mmu_pt_alloc_map_lock()
373 psb_mmu_free_pt(pt); in psb_mmu_pt_alloc_map_lock()
375 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
380 pd->tables[index] = pt; in psb_mmu_pt_alloc_map_lock()
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock()
382 pt->index = index; in psb_mmu_pt_alloc_map_lock()
390 pt->v = kmap_atomic(pt->p); in psb_mmu_pt_alloc_map_lock()
391 return pt; in psb_mmu_pt_alloc_map_lock()
398 struct psb_mmu_pt *pt; in psb_mmu_pt_map_lock() local
402 pt = pd->tables[index]; in psb_mmu_pt_map_lock()
403 if (!pt) { in psb_mmu_pt_map_lock()
407 pt->v = kmap_atomic(pt->p); in psb_mmu_pt_map_lock()
408 return pt; in psb_mmu_pt_map_lock()
411 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) in psb_mmu_pt_unmap_unlock() argument
413 struct psb_mmu_pd *pd = pt->pd; in psb_mmu_pt_unmap_unlock()
416 kunmap_atomic(pt->v); in psb_mmu_pt_unmap_unlock()
417 if (pt->count == 0) { in psb_mmu_pt_unmap_unlock()
419 v[pt->index] = pd->invalid_pde; in psb_mmu_pt_unmap_unlock()
420 pd->tables[pt->index] = NULL; in psb_mmu_pt_unmap_unlock()
424 (void *) &v[pt->index]); in psb_mmu_pt_unmap_unlock()
427 kunmap_atomic(pt->v); in psb_mmu_pt_unmap_unlock()
429 psb_mmu_free_pt(pt); in psb_mmu_pt_unmap_unlock()
435 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, in psb_mmu_set_pte() argument
438 pt->v[psb_mmu_pt_index(addr)] = pte; in psb_mmu_set_pte()
441 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, in psb_mmu_invalidate_pte() argument
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; in psb_mmu_invalidate_pte()
553 struct psb_mmu_pt *pt; in psb_mmu_flush_ptes() local
585 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_flush_ptes()
586 if (!pt) in psb_mmu_flush_ptes()
589 psb_clflush(&pt->v in psb_mmu_flush_ptes()
595 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_flush_ptes()
605 struct psb_mmu_pt *pt; in psb_mmu_remove_pfn_sequence() local
618 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_remove_pfn_sequence()
619 if (!pt) in psb_mmu_remove_pfn_sequence()
622 psb_mmu_invalidate_pte(pt, addr); in psb_mmu_remove_pfn_sequence()
623 --pt->count; in psb_mmu_remove_pfn_sequence()
625 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_remove_pfn_sequence()
645 struct psb_mmu_pt *pt; in psb_mmu_remove_pages() local
674 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_remove_pages()
675 if (!pt) in psb_mmu_remove_pages()
678 psb_mmu_invalidate_pte(pt, addr); in psb_mmu_remove_pages()
679 --pt->count; in psb_mmu_remove_pages()
682 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_remove_pages()
701 struct psb_mmu_pt *pt; in psb_mmu_insert_pfn_sequence() local
716 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pfn_sequence()
717 if (!pt) { in psb_mmu_insert_pfn_sequence()
723 psb_mmu_set_pte(pt, addr, pte); in psb_mmu_insert_pfn_sequence()
724 pt->count++; in psb_mmu_insert_pfn_sequence()
726 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_insert_pfn_sequence()
747 struct psb_mmu_pt *pt; in psb_mmu_insert_pages() local
779 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pages()
780 if (!pt) { in psb_mmu_insert_pages()
788 psb_mmu_set_pte(pt, addr, pte); in psb_mmu_insert_pages()
789 pt->count++; in psb_mmu_insert_pages()
791 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_insert_pages()
814 struct psb_mmu_pt *pt; in psb_mmu_virtual_to_pfn() local
819 pt = psb_mmu_pt_map_lock(pd, virtual); in psb_mmu_virtual_to_pfn()
820 if (!pt) { in psb_mmu_virtual_to_pfn()
838 tmp = pt->v[psb_mmu_pt_index(virtual)]; in psb_mmu_virtual_to_pfn()
845 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_virtual_to_pfn()