• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007, Intel Corporation.
4  *
5  **************************************************************************/
6 
7 #include <linux/highmem.h>
8 
9 #include "mmu.h"
10 #include "psb_drv.h"
11 #include "psb_reg.h"
12 
13 /*
14  * Code for the SGX MMU:
15  */
16 
17 /*
18  * clflush on one processor only:
19  * clflush should apparently flush the cache line on all processors in an
20  * SMP system.
21  */
22 
23 /*
24  * kmap atomic:
25  * The usage of the slots must be completely encapsulated within a spinlock, and
26  * no other functions that may be using the locks for other purposed may be
27  * called from within the locked region.
28  * Since the slots are per processor, this will guarantee that we are the only
29  * user.
30  */
31 
32 /*
33  * TODO: Inserting ptes from an interrupt handler:
34  * This may be desirable for some SGX functionality where the GPU can fault in
35  * needed pages. For that, we need to make an atomic insert_pages function, that
36  * may fail.
37  * If it fails, the caller need to insert the page using a workqueue function,
38  * but on average it should be fast.
39  */
40 
psb_mmu_pt_index(uint32_t offset)41 static inline uint32_t psb_mmu_pt_index(uint32_t offset)
42 {
43 	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
44 }
45 
psb_mmu_pd_index(uint32_t offset)46 static inline uint32_t psb_mmu_pd_index(uint32_t offset)
47 {
48 	return offset >> PSB_PDE_SHIFT;
49 }
50 
51 #if defined(CONFIG_X86)
psb_clflush(void * addr)52 static inline void psb_clflush(void *addr)
53 {
54 	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
55 }
56 
psb_mmu_clflush(struct psb_mmu_driver * driver,void * addr)57 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
58 {
59 	if (!driver->has_clflush)
60 		return;
61 
62 	mb();
63 	psb_clflush(addr);
64 	mb();
65 }
66 #else
67 
psb_mmu_clflush(struct psb_mmu_driver * driver,void * addr)68 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
69 {;
70 }
71 
72 #endif
73 
psb_mmu_flush_pd_locked(struct psb_mmu_driver * driver,int force)74 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
75 {
76 	struct drm_device *dev = driver->dev;
77 	struct drm_psb_private *dev_priv = dev->dev_private;
78 
79 	if (atomic_read(&driver->needs_tlbflush) || force) {
80 		uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
81 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
82 
83 		/* Make sure data cache is turned off before enabling it */
84 		wmb();
85 		PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
86 		(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
87 		if (driver->msvdx_mmu_invaldc)
88 			atomic_set(driver->msvdx_mmu_invaldc, 1);
89 	}
90 	atomic_set(&driver->needs_tlbflush, 0);
91 }
92 
93 #if 0
94 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
95 {
96 	down_write(&driver->sem);
97 	psb_mmu_flush_pd_locked(driver, force);
98 	up_write(&driver->sem);
99 }
100 #endif
101 
psb_mmu_flush(struct psb_mmu_driver * driver)102 void psb_mmu_flush(struct psb_mmu_driver *driver)
103 {
104 	struct drm_device *dev = driver->dev;
105 	struct drm_psb_private *dev_priv = dev->dev_private;
106 	uint32_t val;
107 
108 	down_write(&driver->sem);
109 	val = PSB_RSGX32(PSB_CR_BIF_CTRL);
110 	if (atomic_read(&driver->needs_tlbflush))
111 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
112 	else
113 		PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
114 
115 	/* Make sure data cache is turned off and MMU is flushed before
116 	   restoring bank interface control register */
117 	wmb();
118 	PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
119 		   PSB_CR_BIF_CTRL);
120 	(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
121 
122 	atomic_set(&driver->needs_tlbflush, 0);
123 	if (driver->msvdx_mmu_invaldc)
124 		atomic_set(driver->msvdx_mmu_invaldc, 1);
125 	up_write(&driver->sem);
126 }
127 
psb_mmu_set_pd_context(struct psb_mmu_pd * pd,int hw_context)128 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
129 {
130 	struct drm_device *dev = pd->driver->dev;
131 	struct drm_psb_private *dev_priv = dev->dev_private;
132 	uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
133 			  PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
134 
135 	down_write(&pd->driver->sem);
136 	PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
137 	wmb();
138 	psb_mmu_flush_pd_locked(pd->driver, 1);
139 	pd->hw_context = hw_context;
140 	up_write(&pd->driver->sem);
141 
142 }
143 
psb_pd_addr_end(unsigned long addr,unsigned long end)144 static inline unsigned long psb_pd_addr_end(unsigned long addr,
145 					    unsigned long end)
146 {
147 	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
148 	return (addr < end) ? addr : end;
149 }
150 
psb_mmu_mask_pte(uint32_t pfn,int type)151 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
152 {
153 	uint32_t mask = PSB_PTE_VALID;
154 
155 	if (type & PSB_MMU_CACHED_MEMORY)
156 		mask |= PSB_PTE_CACHED;
157 	if (type & PSB_MMU_RO_MEMORY)
158 		mask |= PSB_PTE_RO;
159 	if (type & PSB_MMU_WO_MEMORY)
160 		mask |= PSB_PTE_WO;
161 
162 	return (pfn << PAGE_SHIFT) | mask;
163 }
164 
psb_mmu_alloc_pd(struct psb_mmu_driver * driver,int trap_pagefaults,int invalid_type)165 struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
166 				    int trap_pagefaults, int invalid_type)
167 {
168 	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
169 	uint32_t *v;
170 	int i;
171 
172 	if (!pd)
173 		return NULL;
174 
175 	pd->p = alloc_page(GFP_DMA32);
176 	if (!pd->p)
177 		goto out_err1;
178 	pd->dummy_pt = alloc_page(GFP_DMA32);
179 	if (!pd->dummy_pt)
180 		goto out_err2;
181 	pd->dummy_page = alloc_page(GFP_DMA32);
182 	if (!pd->dummy_page)
183 		goto out_err3;
184 
185 	if (!trap_pagefaults) {
186 		pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
187 						   invalid_type);
188 		pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
189 						   invalid_type);
190 	} else {
191 		pd->invalid_pde = 0;
192 		pd->invalid_pte = 0;
193 	}
194 
195 	v = kmap(pd->dummy_pt);
196 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
197 		v[i] = pd->invalid_pte;
198 
199 	kunmap(pd->dummy_pt);
200 
201 	v = kmap(pd->p);
202 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
203 		v[i] = pd->invalid_pde;
204 
205 	kunmap(pd->p);
206 
207 	clear_page(kmap(pd->dummy_page));
208 	kunmap(pd->dummy_page);
209 
210 	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
211 	if (!pd->tables)
212 		goto out_err4;
213 
214 	pd->hw_context = -1;
215 	pd->pd_mask = PSB_PTE_VALID;
216 	pd->driver = driver;
217 
218 	return pd;
219 
220 out_err4:
221 	__free_page(pd->dummy_page);
222 out_err3:
223 	__free_page(pd->dummy_pt);
224 out_err2:
225 	__free_page(pd->p);
226 out_err1:
227 	kfree(pd);
228 	return NULL;
229 }
230 
psb_mmu_free_pt(struct psb_mmu_pt * pt)231 static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
232 {
233 	__free_page(pt->p);
234 	kfree(pt);
235 }
236 
psb_mmu_free_pagedir(struct psb_mmu_pd * pd)237 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
238 {
239 	struct psb_mmu_driver *driver = pd->driver;
240 	struct drm_device *dev = driver->dev;
241 	struct drm_psb_private *dev_priv = dev->dev_private;
242 	struct psb_mmu_pt *pt;
243 	int i;
244 
245 	down_write(&driver->sem);
246 	if (pd->hw_context != -1) {
247 		PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
248 		psb_mmu_flush_pd_locked(driver, 1);
249 	}
250 
251 	/* Should take the spinlock here, but we don't need to do that
252 	   since we have the semaphore in write mode. */
253 
254 	for (i = 0; i < 1024; ++i) {
255 		pt = pd->tables[i];
256 		if (pt)
257 			psb_mmu_free_pt(pt);
258 	}
259 
260 	vfree(pd->tables);
261 	__free_page(pd->dummy_page);
262 	__free_page(pd->dummy_pt);
263 	__free_page(pd->p);
264 	kfree(pd);
265 	up_write(&driver->sem);
266 }
267 
psb_mmu_alloc_pt(struct psb_mmu_pd * pd)268 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
269 {
270 	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
271 	void *v;
272 	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
273 	uint32_t clflush_count = PAGE_SIZE / clflush_add;
274 	spinlock_t *lock = &pd->driver->lock;
275 	uint8_t *clf;
276 	uint32_t *ptes;
277 	int i;
278 
279 	if (!pt)
280 		return NULL;
281 
282 	pt->p = alloc_page(GFP_DMA32);
283 	if (!pt->p) {
284 		kfree(pt);
285 		return NULL;
286 	}
287 
288 	spin_lock(lock);
289 
290 	v = kmap_atomic(pt->p);
291 	clf = (uint8_t *) v;
292 	ptes = (uint32_t *) v;
293 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
294 		*ptes++ = pd->invalid_pte;
295 
296 #if defined(CONFIG_X86)
297 	if (pd->driver->has_clflush && pd->hw_context != -1) {
298 		mb();
299 		for (i = 0; i < clflush_count; ++i) {
300 			psb_clflush(clf);
301 			clf += clflush_add;
302 		}
303 		mb();
304 	}
305 #endif
306 	kunmap_atomic(v);
307 	spin_unlock(lock);
308 
309 	pt->count = 0;
310 	pt->pd = pd;
311 	pt->index = 0;
312 
313 	return pt;
314 }
315 
psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd * pd,unsigned long addr)316 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
317 					     unsigned long addr)
318 {
319 	uint32_t index = psb_mmu_pd_index(addr);
320 	struct psb_mmu_pt *pt;
321 	uint32_t *v;
322 	spinlock_t *lock = &pd->driver->lock;
323 
324 	spin_lock(lock);
325 	pt = pd->tables[index];
326 	while (!pt) {
327 		spin_unlock(lock);
328 		pt = psb_mmu_alloc_pt(pd);
329 		if (!pt)
330 			return NULL;
331 		spin_lock(lock);
332 
333 		if (pd->tables[index]) {
334 			spin_unlock(lock);
335 			psb_mmu_free_pt(pt);
336 			spin_lock(lock);
337 			pt = pd->tables[index];
338 			continue;
339 		}
340 
341 		v = kmap_atomic(pd->p);
342 		pd->tables[index] = pt;
343 		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
344 		pt->index = index;
345 		kunmap_atomic((void *) v);
346 
347 		if (pd->hw_context != -1) {
348 			psb_mmu_clflush(pd->driver, (void *)&v[index]);
349 			atomic_set(&pd->driver->needs_tlbflush, 1);
350 		}
351 	}
352 	pt->v = kmap_atomic(pt->p);
353 	return pt;
354 }
355 
psb_mmu_pt_map_lock(struct psb_mmu_pd * pd,unsigned long addr)356 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
357 					      unsigned long addr)
358 {
359 	uint32_t index = psb_mmu_pd_index(addr);
360 	struct psb_mmu_pt *pt;
361 	spinlock_t *lock = &pd->driver->lock;
362 
363 	spin_lock(lock);
364 	pt = pd->tables[index];
365 	if (!pt) {
366 		spin_unlock(lock);
367 		return NULL;
368 	}
369 	pt->v = kmap_atomic(pt->p);
370 	return pt;
371 }
372 
psb_mmu_pt_unmap_unlock(struct psb_mmu_pt * pt)373 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
374 {
375 	struct psb_mmu_pd *pd = pt->pd;
376 	uint32_t *v;
377 
378 	kunmap_atomic(pt->v);
379 	if (pt->count == 0) {
380 		v = kmap_atomic(pd->p);
381 		v[pt->index] = pd->invalid_pde;
382 		pd->tables[pt->index] = NULL;
383 
384 		if (pd->hw_context != -1) {
385 			psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
386 			atomic_set(&pd->driver->needs_tlbflush, 1);
387 		}
388 		kunmap_atomic(v);
389 		spin_unlock(&pd->driver->lock);
390 		psb_mmu_free_pt(pt);
391 		return;
392 	}
393 	spin_unlock(&pd->driver->lock);
394 }
395 
psb_mmu_set_pte(struct psb_mmu_pt * pt,unsigned long addr,uint32_t pte)396 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
397 				   uint32_t pte)
398 {
399 	pt->v[psb_mmu_pt_index(addr)] = pte;
400 }
401 
psb_mmu_invalidate_pte(struct psb_mmu_pt * pt,unsigned long addr)402 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
403 					  unsigned long addr)
404 {
405 	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
406 }
407 
psb_mmu_get_default_pd(struct psb_mmu_driver * driver)408 struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
409 {
410 	struct psb_mmu_pd *pd;
411 
412 	down_read(&driver->sem);
413 	pd = driver->default_pd;
414 	up_read(&driver->sem);
415 
416 	return pd;
417 }
418 
419 /* Returns the physical address of the PD shared by sgx/msvdx */
psb_get_default_pd_addr(struct psb_mmu_driver * driver)420 uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
421 {
422 	struct psb_mmu_pd *pd;
423 
424 	pd = psb_mmu_get_default_pd(driver);
425 	return page_to_pfn(pd->p) << PAGE_SHIFT;
426 }
427 
psb_mmu_driver_takedown(struct psb_mmu_driver * driver)428 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
429 {
430 	struct drm_device *dev = driver->dev;
431 	struct drm_psb_private *dev_priv = dev->dev_private;
432 
433 	PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
434 	psb_mmu_free_pagedir(driver->default_pd);
435 	kfree(driver);
436 }
437 
psb_mmu_driver_init(struct drm_device * dev,int trap_pagefaults,int invalid_type,atomic_t * msvdx_mmu_invaldc)438 struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
439 					   int trap_pagefaults,
440 					   int invalid_type,
441 					   atomic_t *msvdx_mmu_invaldc)
442 {
443 	struct psb_mmu_driver *driver;
444 	struct drm_psb_private *dev_priv = dev->dev_private;
445 
446 	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
447 
448 	if (!driver)
449 		return NULL;
450 
451 	driver->dev = dev;
452 	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
453 					      invalid_type);
454 	if (!driver->default_pd)
455 		goto out_err1;
456 
457 	spin_lock_init(&driver->lock);
458 	init_rwsem(&driver->sem);
459 	down_write(&driver->sem);
460 	atomic_set(&driver->needs_tlbflush, 1);
461 	driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
462 
463 	driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
464 	PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
465 		   PSB_CR_BIF_CTRL);
466 	PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
467 		   PSB_CR_BIF_CTRL);
468 
469 	driver->has_clflush = 0;
470 
471 #if defined(CONFIG_X86)
472 	if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
473 		uint32_t tfms, misc, cap0, cap4, clflush_size;
474 
475 		/*
476 		 * clflush size is determined at kernel setup for x86_64 but not
477 		 * for i386. We have to do it here.
478 		 */
479 
480 		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
481 		clflush_size = ((misc >> 8) & 0xff) * 8;
482 		driver->has_clflush = 1;
483 		driver->clflush_add =
484 		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
485 		driver->clflush_mask = driver->clflush_add - 1;
486 		driver->clflush_mask = ~driver->clflush_mask;
487 	}
488 #endif
489 
490 	up_write(&driver->sem);
491 	return driver;
492 
493 out_err1:
494 	kfree(driver);
495 	return NULL;
496 }
497 
498 #if defined(CONFIG_X86)
psb_mmu_flush_ptes(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride)499 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
500 			       uint32_t num_pages, uint32_t desired_tile_stride,
501 			       uint32_t hw_tile_stride)
502 {
503 	struct psb_mmu_pt *pt;
504 	uint32_t rows = 1;
505 	uint32_t i;
506 	unsigned long addr;
507 	unsigned long end;
508 	unsigned long next;
509 	unsigned long add;
510 	unsigned long row_add;
511 	unsigned long clflush_add = pd->driver->clflush_add;
512 	unsigned long clflush_mask = pd->driver->clflush_mask;
513 
514 	if (!pd->driver->has_clflush)
515 		return;
516 
517 	if (hw_tile_stride)
518 		rows = num_pages / desired_tile_stride;
519 	else
520 		desired_tile_stride = num_pages;
521 
522 	add = desired_tile_stride << PAGE_SHIFT;
523 	row_add = hw_tile_stride << PAGE_SHIFT;
524 	mb();
525 	for (i = 0; i < rows; ++i) {
526 
527 		addr = address;
528 		end = addr + add;
529 
530 		do {
531 			next = psb_pd_addr_end(addr, end);
532 			pt = psb_mmu_pt_map_lock(pd, addr);
533 			if (!pt)
534 				continue;
535 			do {
536 				psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
537 			} while (addr += clflush_add,
538 				 (addr & clflush_mask) < next);
539 
540 			psb_mmu_pt_unmap_unlock(pt);
541 		} while (addr = next, next != end);
542 		address += row_add;
543 	}
544 	mb();
545 }
546 #else
psb_mmu_flush_ptes(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride)547 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
548 			       uint32_t num_pages, uint32_t desired_tile_stride,
549 			       uint32_t hw_tile_stride)
550 {
551 	drm_ttm_cache_flush();
552 }
553 #endif
554 
psb_mmu_remove_pfn_sequence(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages)555 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
556 				 unsigned long address, uint32_t num_pages)
557 {
558 	struct psb_mmu_pt *pt;
559 	unsigned long addr;
560 	unsigned long end;
561 	unsigned long next;
562 	unsigned long f_address = address;
563 
564 	down_read(&pd->driver->sem);
565 
566 	addr = address;
567 	end = addr + (num_pages << PAGE_SHIFT);
568 
569 	do {
570 		next = psb_pd_addr_end(addr, end);
571 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
572 		if (!pt)
573 			goto out;
574 		do {
575 			psb_mmu_invalidate_pte(pt, addr);
576 			--pt->count;
577 		} while (addr += PAGE_SIZE, addr < next);
578 		psb_mmu_pt_unmap_unlock(pt);
579 
580 	} while (addr = next, next != end);
581 
582 out:
583 	if (pd->hw_context != -1)
584 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
585 
586 	up_read(&pd->driver->sem);
587 
588 	if (pd->hw_context != -1)
589 		psb_mmu_flush(pd->driver);
590 
591 	return;
592 }
593 
psb_mmu_remove_pages(struct psb_mmu_pd * pd,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride)594 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
595 			  uint32_t num_pages, uint32_t desired_tile_stride,
596 			  uint32_t hw_tile_stride)
597 {
598 	struct psb_mmu_pt *pt;
599 	uint32_t rows = 1;
600 	uint32_t i;
601 	unsigned long addr;
602 	unsigned long end;
603 	unsigned long next;
604 	unsigned long add;
605 	unsigned long row_add;
606 	unsigned long f_address = address;
607 
608 	if (hw_tile_stride)
609 		rows = num_pages / desired_tile_stride;
610 	else
611 		desired_tile_stride = num_pages;
612 
613 	add = desired_tile_stride << PAGE_SHIFT;
614 	row_add = hw_tile_stride << PAGE_SHIFT;
615 
616 	down_read(&pd->driver->sem);
617 
618 	/* Make sure we only need to flush this processor's cache */
619 
620 	for (i = 0; i < rows; ++i) {
621 
622 		addr = address;
623 		end = addr + add;
624 
625 		do {
626 			next = psb_pd_addr_end(addr, end);
627 			pt = psb_mmu_pt_map_lock(pd, addr);
628 			if (!pt)
629 				continue;
630 			do {
631 				psb_mmu_invalidate_pte(pt, addr);
632 				--pt->count;
633 
634 			} while (addr += PAGE_SIZE, addr < next);
635 			psb_mmu_pt_unmap_unlock(pt);
636 
637 		} while (addr = next, next != end);
638 		address += row_add;
639 	}
640 	if (pd->hw_context != -1)
641 		psb_mmu_flush_ptes(pd, f_address, num_pages,
642 				   desired_tile_stride, hw_tile_stride);
643 
644 	up_read(&pd->driver->sem);
645 
646 	if (pd->hw_context != -1)
647 		psb_mmu_flush(pd->driver);
648 }
649 
psb_mmu_insert_pfn_sequence(struct psb_mmu_pd * pd,uint32_t start_pfn,unsigned long address,uint32_t num_pages,int type)650 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
651 				unsigned long address, uint32_t num_pages,
652 				int type)
653 {
654 	struct psb_mmu_pt *pt;
655 	uint32_t pte;
656 	unsigned long addr;
657 	unsigned long end;
658 	unsigned long next;
659 	unsigned long f_address = address;
660 	int ret = -ENOMEM;
661 
662 	down_read(&pd->driver->sem);
663 
664 	addr = address;
665 	end = addr + (num_pages << PAGE_SHIFT);
666 
667 	do {
668 		next = psb_pd_addr_end(addr, end);
669 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
670 		if (!pt) {
671 			ret = -ENOMEM;
672 			goto out;
673 		}
674 		do {
675 			pte = psb_mmu_mask_pte(start_pfn++, type);
676 			psb_mmu_set_pte(pt, addr, pte);
677 			pt->count++;
678 		} while (addr += PAGE_SIZE, addr < next);
679 		psb_mmu_pt_unmap_unlock(pt);
680 
681 	} while (addr = next, next != end);
682 	ret = 0;
683 
684 out:
685 	if (pd->hw_context != -1)
686 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
687 
688 	up_read(&pd->driver->sem);
689 
690 	if (pd->hw_context != -1)
691 		psb_mmu_flush(pd->driver);
692 
693 	return 0;
694 }
695 
psb_mmu_insert_pages(struct psb_mmu_pd * pd,struct page ** pages,unsigned long address,uint32_t num_pages,uint32_t desired_tile_stride,uint32_t hw_tile_stride,int type)696 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
697 			 unsigned long address, uint32_t num_pages,
698 			 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
699 			 int type)
700 {
701 	struct psb_mmu_pt *pt;
702 	uint32_t rows = 1;
703 	uint32_t i;
704 	uint32_t pte;
705 	unsigned long addr;
706 	unsigned long end;
707 	unsigned long next;
708 	unsigned long add;
709 	unsigned long row_add;
710 	unsigned long f_address = address;
711 	int ret = -ENOMEM;
712 
713 	if (hw_tile_stride) {
714 		if (num_pages % desired_tile_stride != 0)
715 			return -EINVAL;
716 		rows = num_pages / desired_tile_stride;
717 	} else {
718 		desired_tile_stride = num_pages;
719 	}
720 
721 	add = desired_tile_stride << PAGE_SHIFT;
722 	row_add = hw_tile_stride << PAGE_SHIFT;
723 
724 	down_read(&pd->driver->sem);
725 
726 	for (i = 0; i < rows; ++i) {
727 
728 		addr = address;
729 		end = addr + add;
730 
731 		do {
732 			next = psb_pd_addr_end(addr, end);
733 			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
734 			if (!pt)
735 				goto out;
736 			do {
737 				pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
738 						       type);
739 				psb_mmu_set_pte(pt, addr, pte);
740 				pt->count++;
741 			} while (addr += PAGE_SIZE, addr < next);
742 			psb_mmu_pt_unmap_unlock(pt);
743 
744 		} while (addr = next, next != end);
745 
746 		address += row_add;
747 	}
748 
749 	ret = 0;
750 out:
751 	if (pd->hw_context != -1)
752 		psb_mmu_flush_ptes(pd, f_address, num_pages,
753 				   desired_tile_stride, hw_tile_stride);
754 
755 	up_read(&pd->driver->sem);
756 
757 	if (pd->hw_context != -1)
758 		psb_mmu_flush(pd->driver);
759 
760 	return ret;
761 }
762 
psb_mmu_virtual_to_pfn(struct psb_mmu_pd * pd,uint32_t virtual,unsigned long * pfn)763 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
764 			   unsigned long *pfn)
765 {
766 	int ret;
767 	struct psb_mmu_pt *pt;
768 	uint32_t tmp;
769 	spinlock_t *lock = &pd->driver->lock;
770 
771 	down_read(&pd->driver->sem);
772 	pt = psb_mmu_pt_map_lock(pd, virtual);
773 	if (!pt) {
774 		uint32_t *v;
775 
776 		spin_lock(lock);
777 		v = kmap_atomic(pd->p);
778 		tmp = v[psb_mmu_pd_index(virtual)];
779 		kunmap_atomic(v);
780 		spin_unlock(lock);
781 
782 		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
783 		    !(pd->invalid_pte & PSB_PTE_VALID)) {
784 			ret = -EINVAL;
785 			goto out;
786 		}
787 		ret = 0;
788 		*pfn = pd->invalid_pte >> PAGE_SHIFT;
789 		goto out;
790 	}
791 	tmp = pt->v[psb_mmu_pt_index(virtual)];
792 	if (!(tmp & PSB_PTE_VALID)) {
793 		ret = -EINVAL;
794 	} else {
795 		ret = 0;
796 		*pfn = tmp >> PAGE_SHIFT;
797 	}
798 	psb_mmu_pt_unmap_unlock(pt);
799 out:
800 	up_read(&pd->driver->sem);
801 	return ret;
802 }
803