• Home
  • Raw
  • Download

Lines Matching full:pd

128 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)  in psb_mmu_set_pd_context()  argument
130 struct drm_device *dev = pd->driver->dev; in psb_mmu_set_pd_context()
135 down_write(&pd->driver->sem); in psb_mmu_set_pd_context()
136 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); in psb_mmu_set_pd_context()
138 psb_mmu_flush_pd_locked(pd->driver, 1); in psb_mmu_set_pd_context()
139 pd->hw_context = hw_context; in psb_mmu_set_pd_context()
140 up_write(&pd->driver->sem); in psb_mmu_set_pd_context()
168 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); in psb_mmu_alloc_pd() local
172 if (!pd) in psb_mmu_alloc_pd()
175 pd->p = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
176 if (!pd->p) in psb_mmu_alloc_pd()
178 pd->dummy_pt = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
179 if (!pd->dummy_pt) in psb_mmu_alloc_pd()
181 pd->dummy_page = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
182 if (!pd->dummy_page) in psb_mmu_alloc_pd()
186 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), in psb_mmu_alloc_pd()
188 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), in psb_mmu_alloc_pd()
191 pd->invalid_pde = 0; in psb_mmu_alloc_pd()
192 pd->invalid_pte = 0; in psb_mmu_alloc_pd()
195 v = kmap(pd->dummy_pt); in psb_mmu_alloc_pd()
197 v[i] = pd->invalid_pte; in psb_mmu_alloc_pd()
199 kunmap(pd->dummy_pt); in psb_mmu_alloc_pd()
201 v = kmap(pd->p); in psb_mmu_alloc_pd()
203 v[i] = pd->invalid_pde; in psb_mmu_alloc_pd()
205 kunmap(pd->p); in psb_mmu_alloc_pd()
207 clear_page(kmap(pd->dummy_page)); in psb_mmu_alloc_pd()
208 kunmap(pd->dummy_page); in psb_mmu_alloc_pd()
210 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); in psb_mmu_alloc_pd()
211 if (!pd->tables) in psb_mmu_alloc_pd()
214 pd->hw_context = -1; in psb_mmu_alloc_pd()
215 pd->pd_mask = PSB_PTE_VALID; in psb_mmu_alloc_pd()
216 pd->driver = driver; in psb_mmu_alloc_pd()
218 return pd; in psb_mmu_alloc_pd()
221 __free_page(pd->dummy_page); in psb_mmu_alloc_pd()
223 __free_page(pd->dummy_pt); in psb_mmu_alloc_pd()
225 __free_page(pd->p); in psb_mmu_alloc_pd()
227 kfree(pd); in psb_mmu_alloc_pd()
237 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) in psb_mmu_free_pagedir() argument
239 struct psb_mmu_driver *driver = pd->driver; in psb_mmu_free_pagedir()
246 if (pd->hw_context != -1) { in psb_mmu_free_pagedir()
247 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); in psb_mmu_free_pagedir()
255 pt = pd->tables[i]; in psb_mmu_free_pagedir()
260 vfree(pd->tables); in psb_mmu_free_pagedir()
261 __free_page(pd->dummy_page); in psb_mmu_free_pagedir()
262 __free_page(pd->dummy_pt); in psb_mmu_free_pagedir()
263 __free_page(pd->p); in psb_mmu_free_pagedir()
264 kfree(pd); in psb_mmu_free_pagedir()
268 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) in psb_mmu_alloc_pt() argument
272 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; in psb_mmu_alloc_pt()
274 spinlock_t *lock = &pd->driver->lock; in psb_mmu_alloc_pt()
294 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
297 if (pd->driver->has_clflush && pd->hw_context != -1) { in psb_mmu_alloc_pt()
310 pt->pd = pd; in psb_mmu_alloc_pt()
316 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_alloc_map_lock() argument
322 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_alloc_map_lock()
325 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
328 pt = psb_mmu_alloc_pt(pd); in psb_mmu_pt_alloc_map_lock()
333 if (pd->tables[index]) { in psb_mmu_pt_alloc_map_lock()
337 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
341 v = kmap_atomic(pd->p); in psb_mmu_pt_alloc_map_lock()
342 pd->tables[index] = pt; in psb_mmu_pt_alloc_map_lock()
343 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock()
347 if (pd->hw_context != -1) { in psb_mmu_pt_alloc_map_lock()
348 psb_mmu_clflush(pd->driver, (void *)&v[index]); in psb_mmu_pt_alloc_map_lock()
349 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_alloc_map_lock()
356 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_map_lock() argument
361 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_map_lock()
364 pt = pd->tables[index]; in psb_mmu_pt_map_lock()
375 struct psb_mmu_pd *pd = pt->pd; in psb_mmu_pt_unmap_unlock() local
380 v = kmap_atomic(pd->p); in psb_mmu_pt_unmap_unlock()
381 v[pt->index] = pd->invalid_pde; in psb_mmu_pt_unmap_unlock()
382 pd->tables[pt->index] = NULL; in psb_mmu_pt_unmap_unlock()
384 if (pd->hw_context != -1) { in psb_mmu_pt_unmap_unlock()
385 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]); in psb_mmu_pt_unmap_unlock()
386 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_unmap_unlock()
389 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
393 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
405 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; in psb_mmu_invalidate_pte()
410 struct psb_mmu_pd *pd; in psb_mmu_get_default_pd() local
413 pd = driver->default_pd; in psb_mmu_get_default_pd()
416 return pd; in psb_mmu_get_default_pd()
419 /* Returns the physical address of the PD shared by sgx/msvdx */
422 struct psb_mmu_pd *pd; in psb_get_default_pd_addr() local
424 pd = psb_mmu_get_default_pd(driver); in psb_get_default_pd_addr()
425 return page_to_pfn(pd->p) << PAGE_SHIFT; in psb_get_default_pd_addr()
499 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_flush_ptes() argument
511 unsigned long clflush_add = pd->driver->clflush_add; in psb_mmu_flush_ptes()
512 unsigned long clflush_mask = pd->driver->clflush_mask; in psb_mmu_flush_ptes()
514 if (!pd->driver->has_clflush) in psb_mmu_flush_ptes()
532 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_flush_ptes()
547 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_flush_ptes() argument
555 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, in psb_mmu_remove_pfn_sequence() argument
564 down_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
571 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_remove_pfn_sequence()
583 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
584 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence()
586 up_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
588 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
589 psb_mmu_flush(pd->driver); in psb_mmu_remove_pfn_sequence()
594 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_remove_pages() argument
616 down_read(&pd->driver->sem); in psb_mmu_remove_pages()
627 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_remove_pages()
640 if (pd->hw_context != -1) in psb_mmu_remove_pages()
641 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages()
644 up_read(&pd->driver->sem); in psb_mmu_remove_pages()
646 if (pd->hw_context != -1) in psb_mmu_remove_pages()
647 psb_mmu_flush(pd->driver); in psb_mmu_remove_pages()
650 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument
662 down_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
669 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pfn_sequence()
685 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
686 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_insert_pfn_sequence()
688 up_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
690 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
691 psb_mmu_flush(pd->driver); in psb_mmu_insert_pfn_sequence()
696 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, in psb_mmu_insert_pages() argument
724 down_read(&pd->driver->sem); in psb_mmu_insert_pages()
733 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pages()
751 if (pd->hw_context != -1) in psb_mmu_insert_pages()
752 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_insert_pages()
755 up_read(&pd->driver->sem); in psb_mmu_insert_pages()
757 if (pd->hw_context != -1) in psb_mmu_insert_pages()
758 psb_mmu_flush(pd->driver); in psb_mmu_insert_pages()
763 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, in psb_mmu_virtual_to_pfn() argument
769 spinlock_t *lock = &pd->driver->lock; in psb_mmu_virtual_to_pfn()
771 down_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()
772 pt = psb_mmu_pt_map_lock(pd, virtual); in psb_mmu_virtual_to_pfn()
777 v = kmap_atomic(pd->p); in psb_mmu_virtual_to_pfn()
782 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || in psb_mmu_virtual_to_pfn()
783 !(pd->invalid_pte & PSB_PTE_VALID)) { in psb_mmu_virtual_to_pfn()
788 *pfn = pd->invalid_pte >> PAGE_SHIFT; in psb_mmu_virtual_to_pfn()
800 up_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()