• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/highmem.h>
37 #include <linux/sched/mm.h>
38 
39 #ifdef CONFIG_X86
40 #include <asm/set_memory.h>
41 #endif
42 
43 #include <drm/ttm/ttm_pool.h>
44 #include <drm/ttm/ttm_bo_driver.h>
45 #include <drm/ttm/ttm_tt.h>
46 
47 #include "ttm_module.h"
48 
49 /**
50  * struct ttm_pool_dma - Helper object for coherent DMA mappings
51  *
52  * @addr: original DMA address returned for the mapping
53  * @vaddr: original vaddr return for the mapping and order in the lower bits
54  */
55 struct ttm_pool_dma {
56 	dma_addr_t addr;
57 	unsigned long vaddr;
58 };
59 
60 static unsigned long page_pool_size;
61 
62 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
63 module_param(page_pool_size, ulong, 0644);
64 
65 static atomic_long_t allocated_pages;
66 
67 static struct ttm_pool_type global_write_combined[MAX_ORDER];
68 static struct ttm_pool_type global_uncached[MAX_ORDER];
69 
70 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
71 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
72 
73 static struct mutex shrinker_lock;
74 static struct list_head shrinker_list;
75 static struct shrinker mm_shrinker;
76 
77 /* Allocate pages of size 1 << order with the given gfp_flags */
ttm_pool_alloc_page(struct ttm_pool * pool,gfp_t gfp_flags,unsigned int order)78 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
79 					unsigned int order)
80 {
81 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
82 	struct ttm_pool_dma *dma;
83 	struct page *p;
84 	void *vaddr;
85 
86 	/* Don't set the __GFP_COMP flag for higher order allocations.
87 	 * Mapping pages directly into an userspace process and calling
88 	 * put_page() on a TTM allocated page is illegal.
89 	 */
90 	if (order)
91 		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
92 			__GFP_KSWAPD_RECLAIM;
93 
94 	if (!pool->use_dma_alloc) {
95 		p = alloc_pages(gfp_flags, order);
96 		if (p)
97 			p->private = order;
98 		return p;
99 	}
100 
101 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
102 	if (!dma)
103 		return NULL;
104 
105 	if (order)
106 		attr |= DMA_ATTR_NO_WARN;
107 
108 	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
109 				&dma->addr, gfp_flags, attr);
110 	if (!vaddr)
111 		goto error_free;
112 
113 	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
114 	 * TTM page fault handling and extend the DMA API to clean this up.
115 	 */
116 	if (is_vmalloc_addr(vaddr))
117 		p = vmalloc_to_page(vaddr);
118 	else
119 		p = virt_to_page(vaddr);
120 
121 	dma->vaddr = (unsigned long)vaddr | order;
122 	p->private = (unsigned long)dma;
123 	return p;
124 
125 error_free:
126 	kfree(dma);
127 	return NULL;
128 }
129 
130 /* Reset the caching and pages of size 1 << order */
ttm_pool_free_page(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order,struct page * p)131 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
132 			       unsigned int order, struct page *p)
133 {
134 	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
135 	struct ttm_pool_dma *dma;
136 	void *vaddr;
137 
138 #ifdef CONFIG_X86
139 	/* We don't care that set_pages_wb is inefficient here. This is only
140 	 * used when we have to shrink and CPU overhead is irrelevant then.
141 	 */
142 	if (caching != ttm_cached && !PageHighMem(p))
143 		set_pages_wb(p, 1 << order);
144 #endif
145 
146 	if (!pool || !pool->use_dma_alloc) {
147 		__free_pages(p, order);
148 		return;
149 	}
150 
151 	if (order)
152 		attr |= DMA_ATTR_NO_WARN;
153 
154 	dma = (void *)p->private;
155 	vaddr = (void *)(dma->vaddr & PAGE_MASK);
156 	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
157 		       attr);
158 	kfree(dma);
159 }
160 
161 /* Apply a new caching to an array of pages */
ttm_pool_apply_caching(struct page ** first,struct page ** last,enum ttm_caching caching)162 static int ttm_pool_apply_caching(struct page **first, struct page **last,
163 				  enum ttm_caching caching)
164 {
165 #ifdef CONFIG_X86
166 	unsigned int num_pages = last - first;
167 
168 	if (!num_pages)
169 		return 0;
170 
171 	switch (caching) {
172 	case ttm_cached:
173 		break;
174 	case ttm_write_combined:
175 		return set_pages_array_wc(first, num_pages);
176 	case ttm_uncached:
177 		return set_pages_array_uc(first, num_pages);
178 	}
179 #endif
180 	return 0;
181 }
182 
183 /* Map pages of 1 << order size and fill the DMA address array  */
ttm_pool_map(struct ttm_pool * pool,unsigned int order,struct page * p,dma_addr_t ** dma_addr)184 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
185 			struct page *p, dma_addr_t **dma_addr)
186 {
187 	dma_addr_t addr;
188 	unsigned int i;
189 
190 	if (pool->use_dma_alloc) {
191 		struct ttm_pool_dma *dma = (void *)p->private;
192 
193 		addr = dma->addr;
194 	} else {
195 		size_t size = (1ULL << order) * PAGE_SIZE;
196 
197 		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
198 		if (dma_mapping_error(pool->dev, addr))
199 			return -EFAULT;
200 	}
201 
202 	for (i = 1 << order; i ; --i) {
203 		*(*dma_addr)++ = addr;
204 		addr += PAGE_SIZE;
205 	}
206 
207 	return 0;
208 }
209 
210 /* Unmap pages of 1 << order size */
ttm_pool_unmap(struct ttm_pool * pool,dma_addr_t dma_addr,unsigned int num_pages)211 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
212 			   unsigned int num_pages)
213 {
214 	/* Unmapped while freeing the page */
215 	if (pool->use_dma_alloc)
216 		return;
217 
218 	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
219 		       DMA_BIDIRECTIONAL);
220 }
221 
222 /* Give pages into a specific pool_type */
ttm_pool_type_give(struct ttm_pool_type * pt,struct page * p)223 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
224 {
225 	unsigned int i, num_pages = 1 << pt->order;
226 
227 	for (i = 0; i < num_pages; ++i) {
228 		if (PageHighMem(p))
229 			clear_highpage(p + i);
230 		else
231 			clear_page(page_address(p + i));
232 	}
233 
234 	spin_lock(&pt->lock);
235 	list_add(&p->lru, &pt->pages);
236 	spin_unlock(&pt->lock);
237 	atomic_long_add(1 << pt->order, &allocated_pages);
238 }
239 
240 /* Take pages from a specific pool_type, return NULL when nothing available */
ttm_pool_type_take(struct ttm_pool_type * pt)241 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
242 {
243 	struct page *p;
244 
245 	spin_lock(&pt->lock);
246 	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
247 	if (p) {
248 		atomic_long_sub(1 << pt->order, &allocated_pages);
249 		list_del(&p->lru);
250 	}
251 	spin_unlock(&pt->lock);
252 
253 	return p;
254 }
255 
256 /* Initialize and add a pool type to the global shrinker list */
ttm_pool_type_init(struct ttm_pool_type * pt,struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)257 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
258 			       enum ttm_caching caching, unsigned int order)
259 {
260 	pt->pool = pool;
261 	pt->caching = caching;
262 	pt->order = order;
263 	spin_lock_init(&pt->lock);
264 	INIT_LIST_HEAD(&pt->pages);
265 
266 	mutex_lock(&shrinker_lock);
267 	list_add_tail(&pt->shrinker_list, &shrinker_list);
268 	mutex_unlock(&shrinker_lock);
269 }
270 
271 /* Remove a pool_type from the global shrinker list and free all pages */
ttm_pool_type_fini(struct ttm_pool_type * pt)272 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
273 {
274 	struct page *p;
275 
276 	mutex_lock(&shrinker_lock);
277 	list_del(&pt->shrinker_list);
278 	mutex_unlock(&shrinker_lock);
279 
280 	while ((p = ttm_pool_type_take(pt)))
281 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
282 }
283 
284 /* Return the pool_type to use for the given caching and order */
ttm_pool_select_type(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)285 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
286 						  enum ttm_caching caching,
287 						  unsigned int order)
288 {
289 	if (pool->use_dma_alloc)
290 		return &pool->caching[caching].orders[order];
291 
292 #ifdef CONFIG_X86
293 	switch (caching) {
294 	case ttm_write_combined:
295 		if (pool->use_dma32)
296 			return &global_dma32_write_combined[order];
297 
298 		return &global_write_combined[order];
299 	case ttm_uncached:
300 		if (pool->use_dma32)
301 			return &global_dma32_uncached[order];
302 
303 		return &global_uncached[order];
304 	default:
305 		break;
306 	}
307 #endif
308 
309 	return NULL;
310 }
311 
312 /* Free pages using the global shrinker list */
ttm_pool_shrink(void)313 static unsigned int ttm_pool_shrink(void)
314 {
315 	struct ttm_pool_type *pt;
316 	unsigned int num_freed;
317 	struct page *p;
318 
319 	mutex_lock(&shrinker_lock);
320 	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
321 
322 	p = ttm_pool_type_take(pt);
323 	if (p) {
324 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
325 		num_freed = 1 << pt->order;
326 	} else {
327 		num_freed = 0;
328 	}
329 
330 	list_move_tail(&pt->shrinker_list, &shrinker_list);
331 	mutex_unlock(&shrinker_lock);
332 
333 	return num_freed;
334 }
335 
336 /* Return the allocation order based for a page */
ttm_pool_page_order(struct ttm_pool * pool,struct page * p)337 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
338 {
339 	if (pool->use_dma_alloc) {
340 		struct ttm_pool_dma *dma = (void *)p->private;
341 
342 		return dma->vaddr & ~PAGE_MASK;
343 	}
344 
345 	return p->private;
346 }
347 
348 /* Called when we got a page, either from a pool or newly allocated */
ttm_pool_page_allocated(struct ttm_pool * pool,unsigned int order,struct page * p,dma_addr_t ** dma_addr,unsigned long * num_pages,struct page *** pages)349 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
350 				   struct page *p, dma_addr_t **dma_addr,
351 				   unsigned long *num_pages,
352 				   struct page ***pages)
353 {
354 	unsigned int i;
355 	int r;
356 
357 	if (*dma_addr) {
358 		r = ttm_pool_map(pool, order, p, dma_addr);
359 		if (r)
360 			return r;
361 	}
362 
363 	*num_pages -= 1 << order;
364 	for (i = 1 << order; i; --i, ++(*pages), ++p)
365 		**pages = p;
366 
367 	return 0;
368 }
369 
370 /**
371  * ttm_pool_free_range() - Free a range of TTM pages
372  * @pool: The pool used for allocating.
373  * @tt: The struct ttm_tt holding the page pointers.
374  * @caching: The page caching mode used by the range.
375  * @start_page: index for first page to free.
376  * @end_page: index for last page to free + 1.
377  *
378  * During allocation the ttm_tt page-vector may be populated with ranges of
379  * pages with different attributes if allocation hit an error without being
380  * able to completely fulfill the allocation. This function can be used
381  * to free these individual ranges.
382  */
ttm_pool_free_range(struct ttm_pool * pool,struct ttm_tt * tt,enum ttm_caching caching,pgoff_t start_page,pgoff_t end_page)383 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
384 				enum ttm_caching caching,
385 				pgoff_t start_page, pgoff_t end_page)
386 {
387 	struct page **pages = &tt->pages[start_page];
388 	unsigned int order;
389 	pgoff_t i, nr;
390 
391 	for (i = start_page; i < end_page; i += nr, pages += nr) {
392 		struct ttm_pool_type *pt = NULL;
393 
394 		order = ttm_pool_page_order(pool, *pages);
395 		nr = (1UL << order);
396 		if (tt->dma_address)
397 			ttm_pool_unmap(pool, tt->dma_address[i], nr);
398 
399 		pt = ttm_pool_select_type(pool, caching, order);
400 		if (pt)
401 			ttm_pool_type_give(pt, *pages);
402 		else
403 			ttm_pool_free_page(pool, caching, order, *pages);
404 	}
405 }
406 
407 /**
408  * ttm_pool_alloc - Fill a ttm_tt object
409  *
410  * @pool: ttm_pool to use
411  * @tt: ttm_tt object to fill
412  * @ctx: operation context
413  *
414  * Fill the ttm_tt object with pages and also make sure to DMA map them when
415  * necessary.
416  *
417  * Returns: 0 on successe, negative error code otherwise.
418  */
ttm_pool_alloc(struct ttm_pool * pool,struct ttm_tt * tt,struct ttm_operation_ctx * ctx)419 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
420 		   struct ttm_operation_ctx *ctx)
421 {
422 	pgoff_t num_pages = tt->num_pages;
423 	dma_addr_t *dma_addr = tt->dma_address;
424 	struct page **caching = tt->pages;
425 	struct page **pages = tt->pages;
426 	enum ttm_caching page_caching;
427 	gfp_t gfp_flags = GFP_USER;
428 	pgoff_t caching_divide;
429 	unsigned int order;
430 	struct page *p;
431 	int r;
432 
433 	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
434 	WARN_ON(dma_addr && !pool->dev);
435 
436 	if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
437 		gfp_flags |= __GFP_ZERO;
438 
439 	if (ctx->gfp_retry_mayfail)
440 		gfp_flags |= __GFP_RETRY_MAYFAIL;
441 
442 	if (pool->use_dma32)
443 		gfp_flags |= GFP_DMA32;
444 	else
445 		gfp_flags |= GFP_HIGHUSER;
446 
447 	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
448 	     num_pages;
449 	     order = min_t(unsigned int, order, __fls(num_pages))) {
450 		struct ttm_pool_type *pt;
451 
452 		page_caching = tt->caching;
453 		pt = ttm_pool_select_type(pool, tt->caching, order);
454 		p = pt ? ttm_pool_type_take(pt) : NULL;
455 		if (p) {
456 			r = ttm_pool_apply_caching(caching, pages,
457 						   tt->caching);
458 			if (r)
459 				goto error_free_page;
460 
461 			caching = pages;
462 			do {
463 				r = ttm_pool_page_allocated(pool, order, p,
464 							    &dma_addr,
465 							    &num_pages,
466 							    &pages);
467 				if (r)
468 					goto error_free_page;
469 
470 				caching = pages;
471 				if (num_pages < (1 << order))
472 					break;
473 
474 				p = ttm_pool_type_take(pt);
475 			} while (p);
476 		}
477 
478 		page_caching = ttm_cached;
479 		while (num_pages >= (1 << order) &&
480 		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
481 
482 			if (PageHighMem(p)) {
483 				r = ttm_pool_apply_caching(caching, pages,
484 							   tt->caching);
485 				if (r)
486 					goto error_free_page;
487 				caching = pages;
488 			}
489 			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
490 						    &num_pages, &pages);
491 			if (r)
492 				goto error_free_page;
493 			if (PageHighMem(p))
494 				caching = pages;
495 		}
496 
497 		if (!p) {
498 			if (order) {
499 				--order;
500 				continue;
501 			}
502 			r = -ENOMEM;
503 			goto error_free_all;
504 		}
505 	}
506 
507 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
508 	if (r)
509 		goto error_free_all;
510 
511 	return 0;
512 
513 error_free_page:
514 	ttm_pool_free_page(pool, page_caching, order, p);
515 
516 error_free_all:
517 	num_pages = tt->num_pages - num_pages;
518 	caching_divide = caching - tt->pages;
519 	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
520 	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
521 
522 	return r;
523 }
524 EXPORT_SYMBOL(ttm_pool_alloc);
525 
526 /**
527  * ttm_pool_free - Free the backing pages from a ttm_tt object
528  *
529  * @pool: Pool to give pages back to.
530  * @tt: ttm_tt object to unpopulate
531  *
532  * Give the packing pages back to a pool or free them
533  */
ttm_pool_free(struct ttm_pool * pool,struct ttm_tt * tt)534 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
535 {
536 	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
537 
538 	while (atomic_long_read(&allocated_pages) > page_pool_size)
539 		ttm_pool_shrink();
540 }
541 EXPORT_SYMBOL(ttm_pool_free);
542 
543 /**
544  * ttm_pool_init - Initialize a pool
545  *
546  * @pool: the pool to initialize
547  * @dev: device for DMA allocations and mappings
548  * @use_dma_alloc: true if coherent DMA alloc should be used
549  * @use_dma32: true if GFP_DMA32 should be used
550  *
551  * Initialize the pool and its pool types.
552  */
ttm_pool_init(struct ttm_pool * pool,struct device * dev,bool use_dma_alloc,bool use_dma32)553 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
554 		   bool use_dma_alloc, bool use_dma32)
555 {
556 	unsigned int i, j;
557 
558 	WARN_ON(!dev && use_dma_alloc);
559 
560 	pool->dev = dev;
561 	pool->use_dma_alloc = use_dma_alloc;
562 	pool->use_dma32 = use_dma32;
563 
564 	if (use_dma_alloc) {
565 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
566 			for (j = 0; j < MAX_ORDER; ++j)
567 				ttm_pool_type_init(&pool->caching[i].orders[j],
568 						   pool, i, j);
569 	}
570 }
571 
572 /**
573  * ttm_pool_fini - Cleanup a pool
574  *
575  * @pool: the pool to clean up
576  *
577  * Free all pages in the pool and unregister the types from the global
578  * shrinker.
579  */
ttm_pool_fini(struct ttm_pool * pool)580 void ttm_pool_fini(struct ttm_pool *pool)
581 {
582 	unsigned int i, j;
583 
584 	if (pool->use_dma_alloc) {
585 		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
586 			for (j = 0; j < MAX_ORDER; ++j)
587 				ttm_pool_type_fini(&pool->caching[i].orders[j]);
588 	}
589 }
590 
591 /* As long as pages are available make sure to release at least one */
ttm_pool_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)592 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
593 					    struct shrink_control *sc)
594 {
595 	unsigned long num_freed = 0;
596 
597 	do
598 		num_freed += ttm_pool_shrink();
599 	while (!num_freed && atomic_long_read(&allocated_pages));
600 
601 	return num_freed;
602 }
603 
604 /* Return the number of pages available or SHRINK_EMPTY if we have none */
ttm_pool_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)605 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
606 					     struct shrink_control *sc)
607 {
608 	unsigned long num_pages = atomic_long_read(&allocated_pages);
609 
610 	return num_pages ? num_pages : SHRINK_EMPTY;
611 }
612 
613 #ifdef CONFIG_DEBUG_FS
614 /* Count the number of pages available in a pool_type */
ttm_pool_type_count(struct ttm_pool_type * pt)615 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
616 {
617 	unsigned int count = 0;
618 	struct page *p;
619 
620 	spin_lock(&pt->lock);
621 	/* Only used for debugfs, the overhead doesn't matter */
622 	list_for_each_entry(p, &pt->pages, lru)
623 		++count;
624 	spin_unlock(&pt->lock);
625 
626 	return count;
627 }
628 
629 /* Print a nice header for the order */
ttm_pool_debugfs_header(struct seq_file * m)630 static void ttm_pool_debugfs_header(struct seq_file *m)
631 {
632 	unsigned int i;
633 
634 	seq_puts(m, "\t ");
635 	for (i = 0; i < MAX_ORDER; ++i)
636 		seq_printf(m, " ---%2u---", i);
637 	seq_puts(m, "\n");
638 }
639 
640 /* Dump information about the different pool types */
ttm_pool_debugfs_orders(struct ttm_pool_type * pt,struct seq_file * m)641 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
642 				    struct seq_file *m)
643 {
644 	unsigned int i;
645 
646 	for (i = 0; i < MAX_ORDER; ++i)
647 		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
648 	seq_puts(m, "\n");
649 }
650 
651 /* Dump the total amount of allocated pages */
ttm_pool_debugfs_footer(struct seq_file * m)652 static void ttm_pool_debugfs_footer(struct seq_file *m)
653 {
654 	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
655 		   atomic_long_read(&allocated_pages), page_pool_size);
656 }
657 
658 /* Dump the information for the global pools */
ttm_pool_debugfs_globals_show(struct seq_file * m,void * data)659 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
660 {
661 	ttm_pool_debugfs_header(m);
662 
663 	mutex_lock(&shrinker_lock);
664 	seq_puts(m, "wc\t:");
665 	ttm_pool_debugfs_orders(global_write_combined, m);
666 	seq_puts(m, "uc\t:");
667 	ttm_pool_debugfs_orders(global_uncached, m);
668 	seq_puts(m, "wc 32\t:");
669 	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
670 	seq_puts(m, "uc 32\t:");
671 	ttm_pool_debugfs_orders(global_dma32_uncached, m);
672 	mutex_unlock(&shrinker_lock);
673 
674 	ttm_pool_debugfs_footer(m);
675 
676 	return 0;
677 }
678 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
679 
680 /**
681  * ttm_pool_debugfs - Debugfs dump function for a pool
682  *
683  * @pool: the pool to dump the information for
684  * @m: seq_file to dump to
685  *
686  * Make a debugfs dump with the per pool and global information.
687  */
ttm_pool_debugfs(struct ttm_pool * pool,struct seq_file * m)688 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
689 {
690 	unsigned int i;
691 
692 	if (!pool->use_dma_alloc) {
693 		seq_puts(m, "unused\n");
694 		return 0;
695 	}
696 
697 	ttm_pool_debugfs_header(m);
698 
699 	mutex_lock(&shrinker_lock);
700 	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
701 		seq_puts(m, "DMA ");
702 		switch (i) {
703 		case ttm_cached:
704 			seq_puts(m, "\t:");
705 			break;
706 		case ttm_write_combined:
707 			seq_puts(m, "wc\t:");
708 			break;
709 		case ttm_uncached:
710 			seq_puts(m, "uc\t:");
711 			break;
712 		}
713 		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
714 	}
715 	mutex_unlock(&shrinker_lock);
716 
717 	ttm_pool_debugfs_footer(m);
718 	return 0;
719 }
720 EXPORT_SYMBOL(ttm_pool_debugfs);
721 
722 /* Test the shrinker functions and dump the result */
ttm_pool_debugfs_shrink_show(struct seq_file * m,void * data)723 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
724 {
725 	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
726 
727 	fs_reclaim_acquire(GFP_KERNEL);
728 	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
729 		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
730 	fs_reclaim_release(GFP_KERNEL);
731 
732 	return 0;
733 }
734 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
735 
736 #endif
737 
738 /**
739  * ttm_pool_mgr_init - Initialize globals
740  *
741  * @num_pages: default number of pages
742  *
743  * Initialize the global locks and lists for the MM shrinker.
744  */
ttm_pool_mgr_init(unsigned long num_pages)745 int ttm_pool_mgr_init(unsigned long num_pages)
746 {
747 	unsigned int i;
748 
749 	if (!page_pool_size)
750 		page_pool_size = num_pages;
751 
752 	mutex_init(&shrinker_lock);
753 	INIT_LIST_HEAD(&shrinker_list);
754 
755 	for (i = 0; i < MAX_ORDER; ++i) {
756 		ttm_pool_type_init(&global_write_combined[i], NULL,
757 				   ttm_write_combined, i);
758 		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
759 
760 		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
761 				   ttm_write_combined, i);
762 		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
763 				   ttm_uncached, i);
764 	}
765 
766 #ifdef CONFIG_DEBUG_FS
767 	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
768 			    &ttm_pool_debugfs_globals_fops);
769 	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
770 			    &ttm_pool_debugfs_shrink_fops);
771 #endif
772 
773 	mm_shrinker.count_objects = ttm_pool_shrinker_count;
774 	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
775 	mm_shrinker.seeks = 1;
776 	return register_shrinker(&mm_shrinker);
777 }
778 
779 /**
780  * ttm_pool_mgr_fini - Finalize globals
781  *
782  * Cleanup the global pools and unregister the MM shrinker.
783  */
ttm_pool_mgr_fini(void)784 void ttm_pool_mgr_fini(void)
785 {
786 	unsigned int i;
787 
788 	for (i = 0; i < MAX_ORDER; ++i) {
789 		ttm_pool_type_fini(&global_write_combined[i]);
790 		ttm_pool_type_fini(&global_uncached[i]);
791 
792 		ttm_pool_type_fini(&global_dma32_write_combined[i]);
793 		ttm_pool_type_fini(&global_dma32_uncached[i]);
794 	}
795 
796 	unregister_shrinker(&mm_shrinker);
797 	WARN_ON(!list_empty(&shrinker_list));
798 }
799