Lines Matching refs:pd
138 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) in psb_mmu_set_pd_context() argument
140 struct drm_device *dev = pd->driver->dev; in psb_mmu_set_pd_context()
145 down_write(&pd->driver->sem); in psb_mmu_set_pd_context()
146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); in psb_mmu_set_pd_context()
148 psb_mmu_flush_pd_locked(pd->driver, 1); in psb_mmu_set_pd_context()
149 pd->hw_context = hw_context; in psb_mmu_set_pd_context()
150 up_write(&pd->driver->sem); in psb_mmu_set_pd_context()
178 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); in psb_mmu_alloc_pd() local
182 if (!pd) in psb_mmu_alloc_pd()
185 pd->p = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
186 if (!pd->p) in psb_mmu_alloc_pd()
188 pd->dummy_pt = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
189 if (!pd->dummy_pt) in psb_mmu_alloc_pd()
191 pd->dummy_page = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
192 if (!pd->dummy_page) in psb_mmu_alloc_pd()
196 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), in psb_mmu_alloc_pd()
198 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), in psb_mmu_alloc_pd()
201 pd->invalid_pde = 0; in psb_mmu_alloc_pd()
202 pd->invalid_pte = 0; in psb_mmu_alloc_pd()
205 v = kmap(pd->dummy_pt); in psb_mmu_alloc_pd()
207 v[i] = pd->invalid_pte; in psb_mmu_alloc_pd()
209 kunmap(pd->dummy_pt); in psb_mmu_alloc_pd()
211 v = kmap(pd->p); in psb_mmu_alloc_pd()
213 v[i] = pd->invalid_pde; in psb_mmu_alloc_pd()
215 kunmap(pd->p); in psb_mmu_alloc_pd()
217 clear_page(kmap(pd->dummy_page)); in psb_mmu_alloc_pd()
218 kunmap(pd->dummy_page); in psb_mmu_alloc_pd()
220 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); in psb_mmu_alloc_pd()
221 if (!pd->tables) in psb_mmu_alloc_pd()
224 pd->hw_context = -1; in psb_mmu_alloc_pd()
225 pd->pd_mask = PSB_PTE_VALID; in psb_mmu_alloc_pd()
226 pd->driver = driver; in psb_mmu_alloc_pd()
228 return pd; in psb_mmu_alloc_pd()
231 __free_page(pd->dummy_page); in psb_mmu_alloc_pd()
233 __free_page(pd->dummy_pt); in psb_mmu_alloc_pd()
235 __free_page(pd->p); in psb_mmu_alloc_pd()
237 kfree(pd); in psb_mmu_alloc_pd()
247 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) in psb_mmu_free_pagedir() argument
249 struct psb_mmu_driver *driver = pd->driver; in psb_mmu_free_pagedir()
256 if (pd->hw_context != -1) { in psb_mmu_free_pagedir()
257 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); in psb_mmu_free_pagedir()
265 pt = pd->tables[i]; in psb_mmu_free_pagedir()
270 vfree(pd->tables); in psb_mmu_free_pagedir()
271 __free_page(pd->dummy_page); in psb_mmu_free_pagedir()
272 __free_page(pd->dummy_pt); in psb_mmu_free_pagedir()
273 __free_page(pd->p); in psb_mmu_free_pagedir()
274 kfree(pd); in psb_mmu_free_pagedir()
278 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) in psb_mmu_alloc_pt() argument
282 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; in psb_mmu_alloc_pt()
284 spinlock_t *lock = &pd->driver->lock; in psb_mmu_alloc_pt()
304 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
307 if (pd->driver->has_clflush && pd->hw_context != -1) { in psb_mmu_alloc_pt()
320 pt->pd = pd; in psb_mmu_alloc_pt()
326 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_alloc_map_lock() argument
332 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_alloc_map_lock()
335 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
338 pt = psb_mmu_alloc_pt(pd); in psb_mmu_pt_alloc_map_lock()
343 if (pd->tables[index]) { in psb_mmu_pt_alloc_map_lock()
347 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
351 v = kmap_atomic(pd->p); in psb_mmu_pt_alloc_map_lock()
352 pd->tables[index] = pt; in psb_mmu_pt_alloc_map_lock()
353 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock()
357 if (pd->hw_context != -1) { in psb_mmu_pt_alloc_map_lock()
358 psb_mmu_clflush(pd->driver, (void *)&v[index]); in psb_mmu_pt_alloc_map_lock()
359 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_alloc_map_lock()
366 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_map_lock() argument
371 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_map_lock()
374 pt = pd->tables[index]; in psb_mmu_pt_map_lock()
385 struct psb_mmu_pd *pd = pt->pd; in psb_mmu_pt_unmap_unlock() local
390 v = kmap_atomic(pd->p); in psb_mmu_pt_unmap_unlock()
391 v[pt->index] = pd->invalid_pde; in psb_mmu_pt_unmap_unlock()
392 pd->tables[pt->index] = NULL; in psb_mmu_pt_unmap_unlock()
394 if (pd->hw_context != -1) { in psb_mmu_pt_unmap_unlock()
395 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]); in psb_mmu_pt_unmap_unlock()
396 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_unmap_unlock()
399 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
403 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
415 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; in psb_mmu_invalidate_pte()
420 struct psb_mmu_pd *pd; in psb_mmu_get_default_pd() local
423 pd = driver->default_pd; in psb_mmu_get_default_pd()
426 return pd; in psb_mmu_get_default_pd()
432 struct psb_mmu_pd *pd; in psb_get_default_pd_addr() local
434 pd = psb_mmu_get_default_pd(driver); in psb_get_default_pd_addr()
435 return page_to_pfn(pd->p) << PAGE_SHIFT; in psb_get_default_pd_addr()
509 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_flush_ptes() argument
521 unsigned long clflush_add = pd->driver->clflush_add; in psb_mmu_flush_ptes()
522 unsigned long clflush_mask = pd->driver->clflush_mask; in psb_mmu_flush_ptes()
524 if (!pd->driver->has_clflush) in psb_mmu_flush_ptes()
542 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_flush_ptes()
557 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_flush_ptes() argument
565 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, in psb_mmu_remove_pfn_sequence() argument
574 down_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
581 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_remove_pfn_sequence()
593 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
594 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence()
596 up_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
598 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
599 psb_mmu_flush(pd->driver); in psb_mmu_remove_pfn_sequence()
604 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_remove_pages() argument
626 down_read(&pd->driver->sem); in psb_mmu_remove_pages()
637 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_remove_pages()
650 if (pd->hw_context != -1) in psb_mmu_remove_pages()
651 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages()
654 up_read(&pd->driver->sem); in psb_mmu_remove_pages()
656 if (pd->hw_context != -1) in psb_mmu_remove_pages()
657 psb_mmu_flush(pd->driver); in psb_mmu_remove_pages()
660 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument
672 down_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
679 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pfn_sequence()
695 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
696 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_insert_pfn_sequence()
698 up_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
700 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
701 psb_mmu_flush(pd->driver); in psb_mmu_insert_pfn_sequence()
706 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, in psb_mmu_insert_pages() argument
734 down_read(&pd->driver->sem); in psb_mmu_insert_pages()
743 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pages()
761 if (pd->hw_context != -1) in psb_mmu_insert_pages()
762 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_insert_pages()
765 up_read(&pd->driver->sem); in psb_mmu_insert_pages()
767 if (pd->hw_context != -1) in psb_mmu_insert_pages()
768 psb_mmu_flush(pd->driver); in psb_mmu_insert_pages()
773 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, in psb_mmu_virtual_to_pfn() argument
779 spinlock_t *lock = &pd->driver->lock; in psb_mmu_virtual_to_pfn()
781 down_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()
782 pt = psb_mmu_pt_map_lock(pd, virtual); in psb_mmu_virtual_to_pfn()
787 v = kmap_atomic(pd->p); in psb_mmu_virtual_to_pfn()
792 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || in psb_mmu_virtual_to_pfn()
793 !(pd->invalid_pte & PSB_PTE_VALID)) { in psb_mmu_virtual_to_pfn()
798 *pfn = pd->invalid_pte >> PAGE_SHIFT; in psb_mmu_virtual_to_pfn()
810 up_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()