• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 (c) Oracle Corp.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24  */
25 
26 /*
27  * A simple DMA pool losely based on dmapool.c. It has certain advantages
28  * over the DMA pools:
29  * - Pool collects resently freed pages for reuse (and hooks up to
30  *   the shrinker).
31  * - Tracks currently in use pages
32  * - Tracks whether the page is UC, WB or cached (and reverts to WB
33  *   when freed).
34  */
35 
36 #define pr_fmt(fmt) "[TTM] " fmt
37 
38 #include <linux/dma-mapping.h>
39 #include <linux/list.h>
40 #include <linux/seq_file.h> /* for seq_printf */
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/highmem.h>
44 #include <linux/mm_types.h>
45 #include <linux/module.h>
46 #include <linux/mm.h>
47 #include <linux/atomic.h>
48 #include <linux/device.h>
49 #include <linux/kthread.h>
50 #include "ttm/ttm_bo_driver.h"
51 #include "ttm/ttm_page_alloc.h"
52 #ifdef TTM_HAS_AGP
53 #include <asm/agp.h>
54 #endif
55 
56 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
57 #define SMALL_ALLOCATION		4
58 #define FREE_ALL_PAGES			(~0U)
59 /* times are in msecs */
60 #define IS_UNDEFINED			(0)
61 #define IS_WC				(1<<1)
62 #define IS_UC				(1<<2)
63 #define IS_CACHED			(1<<3)
64 #define IS_DMA32			(1<<4)
65 
66 enum pool_type {
67 	POOL_IS_UNDEFINED,
68 	POOL_IS_WC = IS_WC,
69 	POOL_IS_UC = IS_UC,
70 	POOL_IS_CACHED = IS_CACHED,
71 	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
72 	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
73 	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
74 };
75 /*
76  * The pool structure. There are usually six pools:
77  *  - generic (not restricted to DMA32):
78  *      - write combined, uncached, cached.
79  *  - dma32 (up to 2^32 - so up 4GB):
80  *      - write combined, uncached, cached.
81  * for each 'struct device'. The 'cached' is for pages that are actively used.
82  * The other ones can be shrunk by the shrinker API if neccessary.
83  * @pools: The 'struct device->dma_pools' link.
84  * @type: Type of the pool
85  * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
86  * used with irqsave/irqrestore variants because pool allocator maybe called
87  * from delayed work.
88  * @inuse_list: Pool of pages that are in use. The order is very important and
89  *   it is in the order that the TTM pages that are put back are in.
90  * @free_list: Pool of pages that are free to be used. No order requirements.
91  * @dev: The device that is associated with these pools.
92  * @size: Size used during DMA allocation.
93  * @npages_free: Count of available pages for re-use.
94  * @npages_in_use: Count of pages that are in use.
95  * @nfrees: Stats when pool is shrinking.
96  * @nrefills: Stats when the pool is grown.
97  * @gfp_flags: Flags to pass for alloc_page.
98  * @name: Name of the pool.
99  * @dev_name: Name derieved from dev - similar to how dev_info works.
100  *   Used during shutdown as the dev_info during release is unavailable.
101  */
102 struct dma_pool {
103 	struct list_head pools; /* The 'struct device->dma_pools link */
104 	enum pool_type type;
105 	spinlock_t lock;
106 	struct list_head inuse_list;
107 	struct list_head free_list;
108 	struct device *dev;
109 	unsigned size;
110 	unsigned npages_free;
111 	unsigned npages_in_use;
112 	unsigned long nfrees; /* Stats when shrunk. */
113 	unsigned long nrefills; /* Stats when grown. */
114 	gfp_t gfp_flags;
115 	char name[13]; /* "cached dma32" */
116 	char dev_name[64]; /* Constructed from dev */
117 };
118 
119 /*
120  * The accounting page keeping track of the allocated page along with
121  * the DMA address.
122  * @page_list: The link to the 'page_list' in 'struct dma_pool'.
123  * @vaddr: The virtual address of the page
124  * @dma: The bus address of the page. If the page is not allocated
125  *   via the DMA API, it will be -1.
126  */
127 struct dma_page {
128 	struct list_head page_list;
129 	void *vaddr;
130 	struct page *p;
131 	dma_addr_t dma;
132 };
133 
134 /*
135  * Limits for the pool. They are handled without locks because only place where
136  * they may change is in sysfs store. They won't have immediate effect anyway
137  * so forcing serialization to access them is pointless.
138  */
139 
140 struct ttm_pool_opts {
141 	unsigned	alloc_size;
142 	unsigned	max_size;
143 	unsigned	small;
144 };
145 
146 /*
147  * Contains the list of all of the 'struct device' and their corresponding
148  * DMA pools. Guarded by _mutex->lock.
149  * @pools: The link to 'struct ttm_pool_manager->pools'
150  * @dev: The 'struct device' associated with the 'pool'
151  * @pool: The 'struct dma_pool' associated with the 'dev'
152  */
153 struct device_pools {
154 	struct list_head pools;
155 	struct device *dev;
156 	struct dma_pool *pool;
157 };
158 
159 /*
160  * struct ttm_pool_manager - Holds memory pools for fast allocation
161  *
162  * @lock: Lock used when adding/removing from pools
163  * @pools: List of 'struct device' and 'struct dma_pool' tuples.
164  * @options: Limits for the pool.
165  * @npools: Total amount of pools in existence.
166  * @shrinker: The structure used by [un|]register_shrinker
167  */
168 struct ttm_pool_manager {
169 	struct mutex		lock;
170 	struct list_head	pools;
171 	struct ttm_pool_opts	options;
172 	unsigned		npools;
173 	struct shrinker		mm_shrink;
174 	struct kobject		kobj;
175 };
176 
177 static struct ttm_pool_manager *_manager;
178 
179 static struct attribute ttm_page_pool_max = {
180 	.name = "pool_max_size",
181 	.mode = S_IRUGO | S_IWUSR
182 };
183 static struct attribute ttm_page_pool_small = {
184 	.name = "pool_small_allocation",
185 	.mode = S_IRUGO | S_IWUSR
186 };
187 static struct attribute ttm_page_pool_alloc_size = {
188 	.name = "pool_allocation_size",
189 	.mode = S_IRUGO | S_IWUSR
190 };
191 
192 static struct attribute *ttm_pool_attrs[] = {
193 	&ttm_page_pool_max,
194 	&ttm_page_pool_small,
195 	&ttm_page_pool_alloc_size,
196 	NULL
197 };
198 
ttm_pool_kobj_release(struct kobject * kobj)199 static void ttm_pool_kobj_release(struct kobject *kobj)
200 {
201 	struct ttm_pool_manager *m =
202 		container_of(kobj, struct ttm_pool_manager, kobj);
203 	kfree(m);
204 }
205 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)206 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
207 			      const char *buffer, size_t size)
208 {
209 	struct ttm_pool_manager *m =
210 		container_of(kobj, struct ttm_pool_manager, kobj);
211 	int chars;
212 	unsigned val;
213 	chars = sscanf(buffer, "%u", &val);
214 	if (chars == 0)
215 		return size;
216 
217 	/* Convert kb to number of pages */
218 	val = val / (PAGE_SIZE >> 10);
219 
220 	if (attr == &ttm_page_pool_max)
221 		m->options.max_size = val;
222 	else if (attr == &ttm_page_pool_small)
223 		m->options.small = val;
224 	else if (attr == &ttm_page_pool_alloc_size) {
225 		if (val > NUM_PAGES_TO_ALLOC*8) {
226 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
227 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
228 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
229 			return size;
230 		} else if (val > NUM_PAGES_TO_ALLOC) {
231 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
232 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
233 		}
234 		m->options.alloc_size = val;
235 	}
236 
237 	return size;
238 }
239 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)240 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
241 			     char *buffer)
242 {
243 	struct ttm_pool_manager *m =
244 		container_of(kobj, struct ttm_pool_manager, kobj);
245 	unsigned val = 0;
246 
247 	if (attr == &ttm_page_pool_max)
248 		val = m->options.max_size;
249 	else if (attr == &ttm_page_pool_small)
250 		val = m->options.small;
251 	else if (attr == &ttm_page_pool_alloc_size)
252 		val = m->options.alloc_size;
253 
254 	val = val * (PAGE_SIZE >> 10);
255 
256 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
257 }
258 
259 static const struct sysfs_ops ttm_pool_sysfs_ops = {
260 	.show = &ttm_pool_show,
261 	.store = &ttm_pool_store,
262 };
263 
264 static struct kobj_type ttm_pool_kobj_type = {
265 	.release = &ttm_pool_kobj_release,
266 	.sysfs_ops = &ttm_pool_sysfs_ops,
267 	.default_attrs = ttm_pool_attrs,
268 };
269 
270 #ifndef CONFIG_X86
set_pages_array_wb(struct page ** pages,int addrinarray)271 static int set_pages_array_wb(struct page **pages, int addrinarray)
272 {
273 #ifdef TTM_HAS_AGP
274 	int i;
275 
276 	for (i = 0; i < addrinarray; i++)
277 		unmap_page_from_agp(pages[i]);
278 #endif
279 	return 0;
280 }
281 
set_pages_array_wc(struct page ** pages,int addrinarray)282 static int set_pages_array_wc(struct page **pages, int addrinarray)
283 {
284 #ifdef TTM_HAS_AGP
285 	int i;
286 
287 	for (i = 0; i < addrinarray; i++)
288 		map_page_into_agp(pages[i]);
289 #endif
290 	return 0;
291 }
292 
set_pages_array_uc(struct page ** pages,int addrinarray)293 static int set_pages_array_uc(struct page **pages, int addrinarray)
294 {
295 #ifdef TTM_HAS_AGP
296 	int i;
297 
298 	for (i = 0; i < addrinarray; i++)
299 		map_page_into_agp(pages[i]);
300 #endif
301 	return 0;
302 }
303 #endif /* for !CONFIG_X86 */
304 
ttm_set_pages_caching(struct dma_pool * pool,struct page ** pages,unsigned cpages)305 static int ttm_set_pages_caching(struct dma_pool *pool,
306 				 struct page **pages, unsigned cpages)
307 {
308 	int r = 0;
309 	/* Set page caching */
310 	if (pool->type & IS_UC) {
311 		r = set_pages_array_uc(pages, cpages);
312 		if (r)
313 			pr_err("%s: Failed to set %d pages to uc!\n",
314 			       pool->dev_name, cpages);
315 	}
316 	if (pool->type & IS_WC) {
317 		r = set_pages_array_wc(pages, cpages);
318 		if (r)
319 			pr_err("%s: Failed to set %d pages to wc!\n",
320 			       pool->dev_name, cpages);
321 	}
322 	return r;
323 }
324 
__ttm_dma_free_page(struct dma_pool * pool,struct dma_page * d_page)325 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
326 {
327 	dma_addr_t dma = d_page->dma;
328 	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
329 
330 	kfree(d_page);
331 	d_page = NULL;
332 }
__ttm_dma_alloc_page(struct dma_pool * pool)333 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
334 {
335 	struct dma_page *d_page;
336 
337 	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
338 	if (!d_page)
339 		return NULL;
340 
341 	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
342 					   &d_page->dma,
343 					   pool->gfp_flags);
344 	if (d_page->vaddr)
345 		d_page->p = virt_to_page(d_page->vaddr);
346 	else {
347 		kfree(d_page);
348 		d_page = NULL;
349 	}
350 	return d_page;
351 }
ttm_to_type(int flags,enum ttm_caching_state cstate)352 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
353 {
354 	enum pool_type type = IS_UNDEFINED;
355 
356 	if (flags & TTM_PAGE_FLAG_DMA32)
357 		type |= IS_DMA32;
358 	if (cstate == tt_cached)
359 		type |= IS_CACHED;
360 	else if (cstate == tt_uncached)
361 		type |= IS_UC;
362 	else
363 		type |= IS_WC;
364 
365 	return type;
366 }
367 
ttm_pool_update_free_locked(struct dma_pool * pool,unsigned freed_pages)368 static void ttm_pool_update_free_locked(struct dma_pool *pool,
369 					unsigned freed_pages)
370 {
371 	pool->npages_free -= freed_pages;
372 	pool->nfrees += freed_pages;
373 
374 }
375 
376 /* set memory back to wb and free the pages. */
ttm_dma_pages_put(struct dma_pool * pool,struct list_head * d_pages,struct page * pages[],unsigned npages)377 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
378 			      struct page *pages[], unsigned npages)
379 {
380 	struct dma_page *d_page, *tmp;
381 
382 	/* Don't set WB on WB page pool. */
383 	if (npages && !(pool->type & IS_CACHED) &&
384 	    set_pages_array_wb(pages, npages))
385 		pr_err("%s: Failed to set %d pages to wb!\n",
386 		       pool->dev_name, npages);
387 
388 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
389 		list_del(&d_page->page_list);
390 		__ttm_dma_free_page(pool, d_page);
391 	}
392 }
393 
ttm_dma_page_put(struct dma_pool * pool,struct dma_page * d_page)394 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
395 {
396 	/* Don't set WB on WB page pool. */
397 	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
398 		pr_err("%s: Failed to set %d pages to wb!\n",
399 		       pool->dev_name, 1);
400 
401 	list_del(&d_page->page_list);
402 	__ttm_dma_free_page(pool, d_page);
403 }
404 
405 /*
406  * Free pages from pool.
407  *
408  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
409  * number of pages in one go.
410  *
411  * @pool: to free the pages from
412  * @nr_free: If set to true will free all pages in pool
413  **/
ttm_dma_page_pool_free(struct dma_pool * pool,unsigned nr_free)414 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
415 {
416 	unsigned long irq_flags;
417 	struct dma_page *dma_p, *tmp;
418 	struct page **pages_to_free;
419 	struct list_head d_pages;
420 	unsigned freed_pages = 0,
421 		 npages_to_free = nr_free;
422 
423 	if (NUM_PAGES_TO_ALLOC < nr_free)
424 		npages_to_free = NUM_PAGES_TO_ALLOC;
425 #if 0
426 	if (nr_free > 1) {
427 		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
428 			 pool->dev_name, pool->name, current->pid,
429 			 npages_to_free, nr_free);
430 	}
431 #endif
432 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
433 			GFP_KERNEL);
434 
435 	if (!pages_to_free) {
436 		pr_err("%s: Failed to allocate memory for pool free operation\n",
437 		       pool->dev_name);
438 		return 0;
439 	}
440 	INIT_LIST_HEAD(&d_pages);
441 restart:
442 	spin_lock_irqsave(&pool->lock, irq_flags);
443 
444 	/* We picking the oldest ones off the list */
445 	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
446 					 page_list) {
447 		if (freed_pages >= npages_to_free)
448 			break;
449 
450 		/* Move the dma_page from one list to another. */
451 		list_move(&dma_p->page_list, &d_pages);
452 
453 		pages_to_free[freed_pages++] = dma_p->p;
454 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
455 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
456 
457 			ttm_pool_update_free_locked(pool, freed_pages);
458 			/**
459 			 * Because changing page caching is costly
460 			 * we unlock the pool to prevent stalling.
461 			 */
462 			spin_unlock_irqrestore(&pool->lock, irq_flags);
463 
464 			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
465 					  freed_pages);
466 
467 			INIT_LIST_HEAD(&d_pages);
468 
469 			if (likely(nr_free != FREE_ALL_PAGES))
470 				nr_free -= freed_pages;
471 
472 			if (NUM_PAGES_TO_ALLOC >= nr_free)
473 				npages_to_free = nr_free;
474 			else
475 				npages_to_free = NUM_PAGES_TO_ALLOC;
476 
477 			freed_pages = 0;
478 
479 			/* free all so restart the processing */
480 			if (nr_free)
481 				goto restart;
482 
483 			/* Not allowed to fall through or break because
484 			 * following context is inside spinlock while we are
485 			 * outside here.
486 			 */
487 			goto out;
488 
489 		}
490 	}
491 
492 	/* remove range of pages from the pool */
493 	if (freed_pages) {
494 		ttm_pool_update_free_locked(pool, freed_pages);
495 		nr_free -= freed_pages;
496 	}
497 
498 	spin_unlock_irqrestore(&pool->lock, irq_flags);
499 
500 	if (freed_pages)
501 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
502 out:
503 	kfree(pages_to_free);
504 	return nr_free;
505 }
506 
ttm_dma_free_pool(struct device * dev,enum pool_type type)507 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
508 {
509 	struct device_pools *p;
510 	struct dma_pool *pool;
511 
512 	if (!dev)
513 		return;
514 
515 	mutex_lock(&_manager->lock);
516 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
517 		if (p->dev != dev)
518 			continue;
519 		pool = p->pool;
520 		if (pool->type != type)
521 			continue;
522 
523 		list_del(&p->pools);
524 		kfree(p);
525 		_manager->npools--;
526 		break;
527 	}
528 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
529 		if (pool->type != type)
530 			continue;
531 		/* Takes a spinlock.. */
532 		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
533 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
534 		/* This code path is called after _all_ references to the
535 		 * struct device has been dropped - so nobody should be
536 		 * touching it. In case somebody is trying to _add_ we are
537 		 * guarded by the mutex. */
538 		list_del(&pool->pools);
539 		kfree(pool);
540 		break;
541 	}
542 	mutex_unlock(&_manager->lock);
543 }
544 
545 /*
546  * On free-ing of the 'struct device' this deconstructor is run.
547  * Albeit the pool might have already been freed earlier.
548  */
ttm_dma_pool_release(struct device * dev,void * res)549 static void ttm_dma_pool_release(struct device *dev, void *res)
550 {
551 	struct dma_pool *pool = *(struct dma_pool **)res;
552 
553 	if (pool)
554 		ttm_dma_free_pool(dev, pool->type);
555 }
556 
ttm_dma_pool_match(struct device * dev,void * res,void * match_data)557 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
558 {
559 	return *(struct dma_pool **)res == match_data;
560 }
561 
ttm_dma_pool_init(struct device * dev,gfp_t flags,enum pool_type type)562 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
563 					  enum pool_type type)
564 {
565 	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
566 	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
567 	struct device_pools *sec_pool = NULL;
568 	struct dma_pool *pool = NULL, **ptr;
569 	unsigned i;
570 	int ret = -ENODEV;
571 	char *p;
572 
573 	if (!dev)
574 		return NULL;
575 
576 	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
577 	if (!ptr)
578 		return NULL;
579 
580 	ret = -ENOMEM;
581 
582 	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
583 			    dev_to_node(dev));
584 	if (!pool)
585 		goto err_mem;
586 
587 	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
588 				dev_to_node(dev));
589 	if (!sec_pool)
590 		goto err_mem;
591 
592 	INIT_LIST_HEAD(&sec_pool->pools);
593 	sec_pool->dev = dev;
594 	sec_pool->pool =  pool;
595 
596 	INIT_LIST_HEAD(&pool->free_list);
597 	INIT_LIST_HEAD(&pool->inuse_list);
598 	INIT_LIST_HEAD(&pool->pools);
599 	spin_lock_init(&pool->lock);
600 	pool->dev = dev;
601 	pool->npages_free = pool->npages_in_use = 0;
602 	pool->nfrees = 0;
603 	pool->gfp_flags = flags;
604 	pool->size = PAGE_SIZE;
605 	pool->type = type;
606 	pool->nrefills = 0;
607 	p = pool->name;
608 	for (i = 0; i < 5; i++) {
609 		if (type & t[i]) {
610 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
611 				      "%s", n[i]);
612 		}
613 	}
614 	*p = 0;
615 	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
616 	 * - the kobj->name has already been deallocated.*/
617 	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
618 		 dev_driver_string(dev), dev_name(dev));
619 	mutex_lock(&_manager->lock);
620 	/* You can get the dma_pool from either the global: */
621 	list_add(&sec_pool->pools, &_manager->pools);
622 	_manager->npools++;
623 	/* or from 'struct device': */
624 	list_add(&pool->pools, &dev->dma_pools);
625 	mutex_unlock(&_manager->lock);
626 
627 	*ptr = pool;
628 	devres_add(dev, ptr);
629 
630 	return pool;
631 err_mem:
632 	devres_free(ptr);
633 	kfree(sec_pool);
634 	kfree(pool);
635 	return ERR_PTR(ret);
636 }
637 
ttm_dma_find_pool(struct device * dev,enum pool_type type)638 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
639 					  enum pool_type type)
640 {
641 	struct dma_pool *pool, *tmp, *found = NULL;
642 
643 	if (type == IS_UNDEFINED)
644 		return found;
645 
646 	/* NB: We iterate on the 'struct dev' which has no spinlock, but
647 	 * it does have a kref which we have taken. The kref is taken during
648 	 * graphic driver loading - in the drm_pci_init it calls either
649 	 * pci_dev_get or pci_register_driver which both end up taking a kref
650 	 * on 'struct device'.
651 	 *
652 	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
653 	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
654 	 * thing is at that point of time there are no pages associated with the
655 	 * driver so this function will not be called.
656 	 */
657 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
658 		if (pool->type != type)
659 			continue;
660 		found = pool;
661 		break;
662 	}
663 	return found;
664 }
665 
666 /*
667  * Free pages the pages that failed to change the caching state. If there
668  * are pages that have changed their caching state already put them to the
669  * pool.
670  */
ttm_dma_handle_caching_state_failure(struct dma_pool * pool,struct list_head * d_pages,struct page ** failed_pages,unsigned cpages)671 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
672 						 struct list_head *d_pages,
673 						 struct page **failed_pages,
674 						 unsigned cpages)
675 {
676 	struct dma_page *d_page, *tmp;
677 	struct page *p;
678 	unsigned i = 0;
679 
680 	p = failed_pages[0];
681 	if (!p)
682 		return;
683 	/* Find the failed page. */
684 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
685 		if (d_page->p != p)
686 			continue;
687 		/* .. and then progress over the full list. */
688 		list_del(&d_page->page_list);
689 		__ttm_dma_free_page(pool, d_page);
690 		if (++i < cpages)
691 			p = failed_pages[i];
692 		else
693 			break;
694 	}
695 
696 }
697 
698 /*
699  * Allocate 'count' pages, and put 'need' number of them on the
700  * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
701  * The full list of pages should also be on 'd_pages'.
702  * We return zero for success, and negative numbers as errors.
703  */
ttm_dma_pool_alloc_new_pages(struct dma_pool * pool,struct list_head * d_pages,unsigned count)704 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
705 					struct list_head *d_pages,
706 					unsigned count)
707 {
708 	struct page **caching_array;
709 	struct dma_page *dma_p;
710 	struct page *p;
711 	int r = 0;
712 	unsigned i, cpages;
713 	unsigned max_cpages = min(count,
714 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
715 
716 	/* allocate array for page caching change */
717 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
718 
719 	if (!caching_array) {
720 		pr_err("%s: Unable to allocate table for new pages\n",
721 		       pool->dev_name);
722 		return -ENOMEM;
723 	}
724 
725 	if (count > 1) {
726 		pr_debug("%s: (%s:%d) Getting %d pages\n",
727 			 pool->dev_name, pool->name, current->pid, count);
728 	}
729 
730 	for (i = 0, cpages = 0; i < count; ++i) {
731 		dma_p = __ttm_dma_alloc_page(pool);
732 		if (!dma_p) {
733 			pr_err("%s: Unable to get page %u\n",
734 			       pool->dev_name, i);
735 
736 			/* store already allocated pages in the pool after
737 			 * setting the caching state */
738 			if (cpages) {
739 				r = ttm_set_pages_caching(pool, caching_array,
740 							  cpages);
741 				if (r)
742 					ttm_dma_handle_caching_state_failure(
743 						pool, d_pages, caching_array,
744 						cpages);
745 			}
746 			r = -ENOMEM;
747 			goto out;
748 		}
749 		p = dma_p->p;
750 #ifdef CONFIG_HIGHMEM
751 		/* gfp flags of highmem page should never be dma32 so we
752 		 * we should be fine in such case
753 		 */
754 		if (!PageHighMem(p))
755 #endif
756 		{
757 			caching_array[cpages++] = p;
758 			if (cpages == max_cpages) {
759 				/* Note: Cannot hold the spinlock */
760 				r = ttm_set_pages_caching(pool, caching_array,
761 						 cpages);
762 				if (r) {
763 					ttm_dma_handle_caching_state_failure(
764 						pool, d_pages, caching_array,
765 						cpages);
766 					goto out;
767 				}
768 				cpages = 0;
769 			}
770 		}
771 		list_add(&dma_p->page_list, d_pages);
772 	}
773 
774 	if (cpages) {
775 		r = ttm_set_pages_caching(pool, caching_array, cpages);
776 		if (r)
777 			ttm_dma_handle_caching_state_failure(pool, d_pages,
778 					caching_array, cpages);
779 	}
780 out:
781 	kfree(caching_array);
782 	return r;
783 }
784 
785 /*
786  * @return count of pages still required to fulfill the request.
787  */
ttm_dma_page_pool_fill_locked(struct dma_pool * pool,unsigned long * irq_flags)788 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
789 					 unsigned long *irq_flags)
790 {
791 	unsigned count = _manager->options.small;
792 	int r = pool->npages_free;
793 
794 	if (count > pool->npages_free) {
795 		struct list_head d_pages;
796 
797 		INIT_LIST_HEAD(&d_pages);
798 
799 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
800 
801 		/* Returns how many more are neccessary to fulfill the
802 		 * request. */
803 		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
804 
805 		spin_lock_irqsave(&pool->lock, *irq_flags);
806 		if (!r) {
807 			/* Add the fresh to the end.. */
808 			list_splice(&d_pages, &pool->free_list);
809 			++pool->nrefills;
810 			pool->npages_free += count;
811 			r = count;
812 		} else {
813 			struct dma_page *d_page;
814 			unsigned cpages = 0;
815 
816 			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
817 			       pool->dev_name, pool->name, r);
818 
819 			list_for_each_entry(d_page, &d_pages, page_list) {
820 				cpages++;
821 			}
822 			list_splice_tail(&d_pages, &pool->free_list);
823 			pool->npages_free += cpages;
824 			r = cpages;
825 		}
826 	}
827 	return r;
828 }
829 
830 /*
831  * @return count of pages still required to fulfill the request.
832  * The populate list is actually a stack (not that is matters as TTM
833  * allocates one page at a time.
834  */
ttm_dma_pool_get_pages(struct dma_pool * pool,struct ttm_dma_tt * ttm_dma,unsigned index)835 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
836 				  struct ttm_dma_tt *ttm_dma,
837 				  unsigned index)
838 {
839 	struct dma_page *d_page;
840 	struct ttm_tt *ttm = &ttm_dma->ttm;
841 	unsigned long irq_flags;
842 	int count, r = -ENOMEM;
843 
844 	spin_lock_irqsave(&pool->lock, irq_flags);
845 	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
846 	if (count) {
847 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
848 		ttm->pages[index] = d_page->p;
849 		ttm_dma->dma_address[index] = d_page->dma;
850 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
851 		r = 0;
852 		pool->npages_in_use += 1;
853 		pool->npages_free -= 1;
854 	}
855 	spin_unlock_irqrestore(&pool->lock, irq_flags);
856 	return r;
857 }
858 
859 /*
860  * On success pages list will hold count number of correctly
861  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
862  */
ttm_dma_populate(struct ttm_dma_tt * ttm_dma,struct device * dev)863 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
864 {
865 	struct ttm_tt *ttm = &ttm_dma->ttm;
866 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
867 	struct dma_pool *pool;
868 	enum pool_type type;
869 	unsigned i;
870 	gfp_t gfp_flags;
871 	int ret;
872 
873 	if (ttm->state != tt_unpopulated)
874 		return 0;
875 
876 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
877 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
878 		gfp_flags = GFP_USER | GFP_DMA32;
879 	else
880 		gfp_flags = GFP_HIGHUSER;
881 	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
882 		gfp_flags |= __GFP_ZERO;
883 
884 	pool = ttm_dma_find_pool(dev, type);
885 	if (!pool) {
886 		pool = ttm_dma_pool_init(dev, gfp_flags, type);
887 		if (IS_ERR_OR_NULL(pool)) {
888 			return -ENOMEM;
889 		}
890 	}
891 
892 	INIT_LIST_HEAD(&ttm_dma->pages_list);
893 	for (i = 0; i < ttm->num_pages; ++i) {
894 		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
895 		if (ret != 0) {
896 			ttm_dma_unpopulate(ttm_dma, dev);
897 			return -ENOMEM;
898 		}
899 
900 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
901 						false, false);
902 		if (unlikely(ret != 0)) {
903 			ttm_dma_unpopulate(ttm_dma, dev);
904 			return -ENOMEM;
905 		}
906 	}
907 
908 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
909 		ret = ttm_tt_swapin(ttm);
910 		if (unlikely(ret != 0)) {
911 			ttm_dma_unpopulate(ttm_dma, dev);
912 			return ret;
913 		}
914 	}
915 
916 	ttm->state = tt_unbound;
917 	return 0;
918 }
919 EXPORT_SYMBOL_GPL(ttm_dma_populate);
920 
921 /* Get good estimation how many pages are free in pools */
ttm_dma_pool_get_num_unused_pages(void)922 static int ttm_dma_pool_get_num_unused_pages(void)
923 {
924 	struct device_pools *p;
925 	unsigned total = 0;
926 
927 	mutex_lock(&_manager->lock);
928 	list_for_each_entry(p, &_manager->pools, pools)
929 		total += p->pool->npages_free;
930 	mutex_unlock(&_manager->lock);
931 	return total;
932 }
933 
934 /* Put all pages in pages list to correct pool to wait for reuse */
ttm_dma_unpopulate(struct ttm_dma_tt * ttm_dma,struct device * dev)935 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
936 {
937 	struct ttm_tt *ttm = &ttm_dma->ttm;
938 	struct dma_pool *pool;
939 	struct dma_page *d_page, *next;
940 	enum pool_type type;
941 	bool is_cached = false;
942 	unsigned count = 0, i, npages = 0;
943 	unsigned long irq_flags;
944 
945 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
946 	pool = ttm_dma_find_pool(dev, type);
947 	if (!pool)
948 		return;
949 
950 	is_cached = (ttm_dma_find_pool(pool->dev,
951 		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
952 
953 	/* make sure pages array match list and count number of pages */
954 	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
955 		ttm->pages[count] = d_page->p;
956 		count++;
957 	}
958 
959 	spin_lock_irqsave(&pool->lock, irq_flags);
960 	pool->npages_in_use -= count;
961 	if (is_cached) {
962 		pool->nfrees += count;
963 	} else {
964 		pool->npages_free += count;
965 		list_splice(&ttm_dma->pages_list, &pool->free_list);
966 		npages = count;
967 		if (pool->npages_free > _manager->options.max_size) {
968 			npages = pool->npages_free - _manager->options.max_size;
969 			/* free at least NUM_PAGES_TO_ALLOC number of pages
970 			 * to reduce calls to set_memory_wb */
971 			if (npages < NUM_PAGES_TO_ALLOC)
972 				npages = NUM_PAGES_TO_ALLOC;
973 		}
974 	}
975 	spin_unlock_irqrestore(&pool->lock, irq_flags);
976 
977 	if (is_cached) {
978 		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
979 			ttm_mem_global_free_page(ttm->glob->mem_glob,
980 						 d_page->p);
981 			ttm_dma_page_put(pool, d_page);
982 		}
983 	} else {
984 		for (i = 0; i < count; i++) {
985 			ttm_mem_global_free_page(ttm->glob->mem_glob,
986 						 ttm->pages[i]);
987 		}
988 	}
989 
990 	INIT_LIST_HEAD(&ttm_dma->pages_list);
991 	for (i = 0; i < ttm->num_pages; i++) {
992 		ttm->pages[i] = NULL;
993 		ttm_dma->dma_address[i] = 0;
994 	}
995 
996 	/* shrink pool if necessary (only on !is_cached pools)*/
997 	if (npages)
998 		ttm_dma_page_pool_free(pool, npages);
999 	ttm->state = tt_unpopulated;
1000 }
1001 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1002 
1003 /**
1004  * Callback for mm to request pool to reduce number of page held.
1005  */
ttm_dma_pool_mm_shrink(struct shrinker * shrink,struct shrink_control * sc)1006 static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1007 				  struct shrink_control *sc)
1008 {
1009 	static atomic_t start_pool = ATOMIC_INIT(0);
1010 	unsigned idx = 0;
1011 	unsigned pool_offset = atomic_add_return(1, &start_pool);
1012 	unsigned shrink_pages = sc->nr_to_scan;
1013 	struct device_pools *p;
1014 
1015 	if (list_empty(&_manager->pools))
1016 		return 0;
1017 
1018 	mutex_lock(&_manager->lock);
1019 	pool_offset = pool_offset % _manager->npools;
1020 	list_for_each_entry(p, &_manager->pools, pools) {
1021 		unsigned nr_free;
1022 
1023 		if (!p->dev)
1024 			continue;
1025 		if (shrink_pages == 0)
1026 			break;
1027 		/* Do it in round-robin fashion. */
1028 		if (++idx < pool_offset)
1029 			continue;
1030 		nr_free = shrink_pages;
1031 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1032 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1033 			 p->pool->dev_name, p->pool->name, current->pid,
1034 			 nr_free, shrink_pages);
1035 	}
1036 	mutex_unlock(&_manager->lock);
1037 	/* return estimated number of unused pages in pool */
1038 	return ttm_dma_pool_get_num_unused_pages();
1039 }
1040 
ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager * manager)1041 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1042 {
1043 	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1044 	manager->mm_shrink.seeks = 1;
1045 	register_shrinker(&manager->mm_shrink);
1046 }
1047 
ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager * manager)1048 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1049 {
1050 	unregister_shrinker(&manager->mm_shrink);
1051 }
1052 
ttm_dma_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)1053 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1054 {
1055 	int ret = -ENOMEM;
1056 
1057 	WARN_ON(_manager);
1058 
1059 	pr_info("Initializing DMA pool allocator\n");
1060 
1061 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1062 	if (!_manager)
1063 		goto err_manager;
1064 
1065 	mutex_init(&_manager->lock);
1066 	INIT_LIST_HEAD(&_manager->pools);
1067 
1068 	_manager->options.max_size = max_pages;
1069 	_manager->options.small = SMALL_ALLOCATION;
1070 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1071 
1072 	/* This takes care of auto-freeing the _manager */
1073 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1074 				   &glob->kobj, "dma_pool");
1075 	if (unlikely(ret != 0)) {
1076 		kobject_put(&_manager->kobj);
1077 		goto err;
1078 	}
1079 	ttm_dma_pool_mm_shrink_init(_manager);
1080 	return 0;
1081 err_manager:
1082 	kfree(_manager);
1083 	_manager = NULL;
1084 err:
1085 	return ret;
1086 }
1087 
ttm_dma_page_alloc_fini(void)1088 void ttm_dma_page_alloc_fini(void)
1089 {
1090 	struct device_pools *p, *t;
1091 
1092 	pr_info("Finalizing DMA pool allocator\n");
1093 	ttm_dma_pool_mm_shrink_fini(_manager);
1094 
1095 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1096 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1097 			current->pid);
1098 		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1099 			ttm_dma_pool_match, p->pool));
1100 		ttm_dma_free_pool(p->dev, p->pool->type);
1101 	}
1102 	kobject_put(&_manager->kobj);
1103 	_manager = NULL;
1104 }
1105 
ttm_dma_page_alloc_debugfs(struct seq_file * m,void * data)1106 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1107 {
1108 	struct device_pools *p;
1109 	struct dma_pool *pool = NULL;
1110 	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1111 		     "name", "virt", "busaddr"};
1112 
1113 	if (!_manager) {
1114 		seq_printf(m, "No pool allocator running.\n");
1115 		return 0;
1116 	}
1117 	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1118 		   h[0], h[1], h[2], h[3], h[4], h[5]);
1119 	mutex_lock(&_manager->lock);
1120 	list_for_each_entry(p, &_manager->pools, pools) {
1121 		struct device *dev = p->dev;
1122 		if (!dev)
1123 			continue;
1124 		pool = p->pool;
1125 		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1126 				pool->name, pool->nrefills,
1127 				pool->nfrees, pool->npages_in_use,
1128 				pool->npages_free,
1129 				pool->dev_name);
1130 	}
1131 	mutex_unlock(&_manager->lock);
1132 	return 0;
1133 }
1134 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1135