• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) Red Hat Inc.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33 
34 #define pr_fmt(fmt) "[TTM] " fmt
35 
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/highmem.h>
39 #include <linux/mm_types.h>
40 #include <linux/module.h>
41 #include <linux/mm.h>
42 #include <linux/seq_file.h> /* for seq_printf */
43 #include <linux/slab.h>
44 #include <linux/dma-mapping.h>
45 
46 #include <linux/atomic.h>
47 
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
50 
51 #ifdef TTM_HAS_AGP
52 #include <asm/agp.h>
53 #endif
54 
55 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
56 #define SMALL_ALLOCATION		16
57 #define FREE_ALL_PAGES			(~0U)
58 /* times are in msecs */
59 #define PAGE_FREE_INTERVAL		1000
60 
61 /**
62  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
63  *
64  * @lock: Protects the shared pool from concurrnet access. Must be used with
65  * irqsave/irqrestore variants because pool allocator maybe called from
66  * delayed work.
67  * @fill_lock: Prevent concurrent calls to fill.
68  * @list: Pool of free uc/wc pages for fast reuse.
69  * @gfp_flags: Flags to pass for alloc_page.
70  * @npages: Number of pages in pool.
71  */
72 struct ttm_page_pool {
73 	spinlock_t		lock;
74 	bool			fill_lock;
75 	struct list_head	list;
76 	gfp_t			gfp_flags;
77 	unsigned		npages;
78 	char			*name;
79 	unsigned long		nfrees;
80 	unsigned long		nrefills;
81 };
82 
83 /**
84  * Limits for the pool. They are handled without locks because only place where
85  * they may change is in sysfs store. They won't have immediate effect anyway
86  * so forcing serialization to access them is pointless.
87  */
88 
89 struct ttm_pool_opts {
90 	unsigned	alloc_size;
91 	unsigned	max_size;
92 	unsigned	small;
93 };
94 
95 #define NUM_POOLS 4
96 
97 /**
98  * struct ttm_pool_manager - Holds memory pools for fst allocation
99  *
100  * Manager is read only object for pool code so it doesn't need locking.
101  *
102  * @free_interval: minimum number of jiffies between freeing pages from pool.
103  * @page_alloc_inited: reference counting for pool allocation.
104  * @work: Work that is used to shrink the pool. Work is only run when there is
105  * some pages to free.
106  * @small_allocation: Limit in number of pages what is small allocation.
107  *
108  * @pools: All pool objects in use.
109  **/
110 struct ttm_pool_manager {
111 	struct kobject		kobj;
112 	struct shrinker		mm_shrink;
113 	struct ttm_pool_opts	options;
114 
115 	union {
116 		struct ttm_page_pool	pools[NUM_POOLS];
117 		struct {
118 			struct ttm_page_pool	wc_pool;
119 			struct ttm_page_pool	uc_pool;
120 			struct ttm_page_pool	wc_pool_dma32;
121 			struct ttm_page_pool	uc_pool_dma32;
122 		} ;
123 	};
124 };
125 
126 static struct attribute ttm_page_pool_max = {
127 	.name = "pool_max_size",
128 	.mode = S_IRUGO | S_IWUSR
129 };
130 static struct attribute ttm_page_pool_small = {
131 	.name = "pool_small_allocation",
132 	.mode = S_IRUGO | S_IWUSR
133 };
134 static struct attribute ttm_page_pool_alloc_size = {
135 	.name = "pool_allocation_size",
136 	.mode = S_IRUGO | S_IWUSR
137 };
138 
139 static struct attribute *ttm_pool_attrs[] = {
140 	&ttm_page_pool_max,
141 	&ttm_page_pool_small,
142 	&ttm_page_pool_alloc_size,
143 	NULL
144 };
145 
ttm_pool_kobj_release(struct kobject * kobj)146 static void ttm_pool_kobj_release(struct kobject *kobj)
147 {
148 	struct ttm_pool_manager *m =
149 		container_of(kobj, struct ttm_pool_manager, kobj);
150 	kfree(m);
151 }
152 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)153 static ssize_t ttm_pool_store(struct kobject *kobj,
154 		struct attribute *attr, const char *buffer, size_t size)
155 {
156 	struct ttm_pool_manager *m =
157 		container_of(kobj, struct ttm_pool_manager, kobj);
158 	int chars;
159 	unsigned val;
160 	chars = sscanf(buffer, "%u", &val);
161 	if (chars == 0)
162 		return size;
163 
164 	/* Convert kb to number of pages */
165 	val = val / (PAGE_SIZE >> 10);
166 
167 	if (attr == &ttm_page_pool_max)
168 		m->options.max_size = val;
169 	else if (attr == &ttm_page_pool_small)
170 		m->options.small = val;
171 	else if (attr == &ttm_page_pool_alloc_size) {
172 		if (val > NUM_PAGES_TO_ALLOC*8) {
173 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
174 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176 			return size;
177 		} else if (val > NUM_PAGES_TO_ALLOC) {
178 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180 		}
181 		m->options.alloc_size = val;
182 	}
183 
184 	return size;
185 }
186 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)187 static ssize_t ttm_pool_show(struct kobject *kobj,
188 		struct attribute *attr, char *buffer)
189 {
190 	struct ttm_pool_manager *m =
191 		container_of(kobj, struct ttm_pool_manager, kobj);
192 	unsigned val = 0;
193 
194 	if (attr == &ttm_page_pool_max)
195 		val = m->options.max_size;
196 	else if (attr == &ttm_page_pool_small)
197 		val = m->options.small;
198 	else if (attr == &ttm_page_pool_alloc_size)
199 		val = m->options.alloc_size;
200 
201 	val = val * (PAGE_SIZE >> 10);
202 
203 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
204 }
205 
206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
207 	.show = &ttm_pool_show,
208 	.store = &ttm_pool_store,
209 };
210 
211 static struct kobj_type ttm_pool_kobj_type = {
212 	.release = &ttm_pool_kobj_release,
213 	.sysfs_ops = &ttm_pool_sysfs_ops,
214 	.default_attrs = ttm_pool_attrs,
215 };
216 
217 static struct ttm_pool_manager *_manager;
218 
219 #ifndef CONFIG_X86
set_pages_array_wb(struct page ** pages,int addrinarray)220 static int set_pages_array_wb(struct page **pages, int addrinarray)
221 {
222 #ifdef TTM_HAS_AGP
223 	int i;
224 
225 	for (i = 0; i < addrinarray; i++)
226 		unmap_page_from_agp(pages[i]);
227 #endif
228 	return 0;
229 }
230 
set_pages_array_wc(struct page ** pages,int addrinarray)231 static int set_pages_array_wc(struct page **pages, int addrinarray)
232 {
233 #ifdef TTM_HAS_AGP
234 	int i;
235 
236 	for (i = 0; i < addrinarray; i++)
237 		map_page_into_agp(pages[i]);
238 #endif
239 	return 0;
240 }
241 
set_pages_array_uc(struct page ** pages,int addrinarray)242 static int set_pages_array_uc(struct page **pages, int addrinarray)
243 {
244 #ifdef TTM_HAS_AGP
245 	int i;
246 
247 	for (i = 0; i < addrinarray; i++)
248 		map_page_into_agp(pages[i]);
249 #endif
250 	return 0;
251 }
252 #endif
253 
254 /**
255  * Select the right pool or requested caching state and ttm flags. */
ttm_get_pool(int flags,enum ttm_caching_state cstate)256 static struct ttm_page_pool *ttm_get_pool(int flags,
257 		enum ttm_caching_state cstate)
258 {
259 	int pool_index;
260 
261 	if (cstate == tt_cached)
262 		return NULL;
263 
264 	if (cstate == tt_wc)
265 		pool_index = 0x0;
266 	else
267 		pool_index = 0x1;
268 
269 	if (flags & TTM_PAGE_FLAG_DMA32)
270 		pool_index |= 0x2;
271 
272 	return &_manager->pools[pool_index];
273 }
274 
275 /* set memory back to wb and free the pages. */
ttm_pages_put(struct page * pages[],unsigned npages)276 static void ttm_pages_put(struct page *pages[], unsigned npages)
277 {
278 	unsigned i;
279 	if (set_pages_array_wb(pages, npages))
280 		pr_err("Failed to set %d pages to wb!\n", npages);
281 	for (i = 0; i < npages; ++i)
282 		__free_page(pages[i]);
283 }
284 
ttm_pool_update_free_locked(struct ttm_page_pool * pool,unsigned freed_pages)285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
286 		unsigned freed_pages)
287 {
288 	pool->npages -= freed_pages;
289 	pool->nfrees += freed_pages;
290 }
291 
292 /**
293  * Free pages from pool.
294  *
295  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
296  * number of pages in one go.
297  *
298  * @pool: to free the pages from
299  * @free_all: If set to true will free all pages in pool
300  * @use_static: Safe to use static buffer
301  **/
ttm_page_pool_free(struct ttm_page_pool * pool,unsigned nr_free,bool use_static)302 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
303 			      bool use_static)
304 {
305 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
306 	unsigned long irq_flags;
307 	struct page *p;
308 	struct page **pages_to_free;
309 	unsigned freed_pages = 0,
310 		 npages_to_free = nr_free;
311 
312 	if (NUM_PAGES_TO_ALLOC < nr_free)
313 		npages_to_free = NUM_PAGES_TO_ALLOC;
314 
315 	if (use_static)
316 		pages_to_free = static_buf;
317 	else
318 		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
319 					GFP_KERNEL);
320 	if (!pages_to_free) {
321 		pr_err("Failed to allocate memory for pool free operation\n");
322 		return 0;
323 	}
324 
325 restart:
326 	spin_lock_irqsave(&pool->lock, irq_flags);
327 
328 	list_for_each_entry_reverse(p, &pool->list, lru) {
329 		if (freed_pages >= npages_to_free)
330 			break;
331 
332 		pages_to_free[freed_pages++] = p;
333 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
334 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
335 			/* remove range of pages from the pool */
336 			__list_del(p->lru.prev, &pool->list);
337 
338 			ttm_pool_update_free_locked(pool, freed_pages);
339 			/**
340 			 * Because changing page caching is costly
341 			 * we unlock the pool to prevent stalling.
342 			 */
343 			spin_unlock_irqrestore(&pool->lock, irq_flags);
344 
345 			ttm_pages_put(pages_to_free, freed_pages);
346 			if (likely(nr_free != FREE_ALL_PAGES))
347 				nr_free -= freed_pages;
348 
349 			if (NUM_PAGES_TO_ALLOC >= nr_free)
350 				npages_to_free = nr_free;
351 			else
352 				npages_to_free = NUM_PAGES_TO_ALLOC;
353 
354 			freed_pages = 0;
355 
356 			/* free all so restart the processing */
357 			if (nr_free)
358 				goto restart;
359 
360 			/* Not allowed to fall through or break because
361 			 * following context is inside spinlock while we are
362 			 * outside here.
363 			 */
364 			goto out;
365 
366 		}
367 	}
368 
369 	/* remove range of pages from the pool */
370 	if (freed_pages) {
371 		__list_del(&p->lru, &pool->list);
372 
373 		ttm_pool_update_free_locked(pool, freed_pages);
374 		nr_free -= freed_pages;
375 	}
376 
377 	spin_unlock_irqrestore(&pool->lock, irq_flags);
378 
379 	if (freed_pages)
380 		ttm_pages_put(pages_to_free, freed_pages);
381 out:
382 	if (pages_to_free != static_buf)
383 		kfree(pages_to_free);
384 	return nr_free;
385 }
386 
387 /**
388  * Callback for mm to request pool to reduce number of page held.
389  *
390  * XXX: (dchinner) Deadlock warning!
391  *
392  * This code is crying out for a shrinker per pool....
393  */
394 static unsigned long
ttm_pool_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)395 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
396 {
397 	static DEFINE_MUTEX(lock);
398 	static unsigned start_pool;
399 	unsigned i;
400 	unsigned pool_offset;
401 	struct ttm_page_pool *pool;
402 	int shrink_pages = sc->nr_to_scan;
403 	unsigned long freed = 0;
404 
405 	if (!mutex_trylock(&lock))
406 		return SHRINK_STOP;
407 	pool_offset = ++start_pool % NUM_POOLS;
408 	/* select start pool in round robin fashion */
409 	for (i = 0; i < NUM_POOLS; ++i) {
410 		unsigned nr_free = shrink_pages;
411 		if (shrink_pages == 0)
412 			break;
413 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
414 		/* OK to use static buffer since global mutex is held. */
415 		shrink_pages = ttm_page_pool_free(pool, nr_free, true);
416 		freed += nr_free - shrink_pages;
417 	}
418 	mutex_unlock(&lock);
419 	return freed;
420 }
421 
422 
423 static unsigned long
ttm_pool_shrink_count(struct shrinker * shrink,struct shrink_control * sc)424 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
425 {
426 	unsigned i;
427 	unsigned long count = 0;
428 
429 	for (i = 0; i < NUM_POOLS; ++i)
430 		count += _manager->pools[i].npages;
431 
432 	return count;
433 }
434 
ttm_pool_mm_shrink_init(struct ttm_pool_manager * manager)435 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
436 {
437 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
438 	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
439 	manager->mm_shrink.seeks = 1;
440 	register_shrinker(&manager->mm_shrink);
441 }
442 
ttm_pool_mm_shrink_fini(struct ttm_pool_manager * manager)443 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
444 {
445 	unregister_shrinker(&manager->mm_shrink);
446 }
447 
ttm_set_pages_caching(struct page ** pages,enum ttm_caching_state cstate,unsigned cpages)448 static int ttm_set_pages_caching(struct page **pages,
449 		enum ttm_caching_state cstate, unsigned cpages)
450 {
451 	int r = 0;
452 	/* Set page caching */
453 	switch (cstate) {
454 	case tt_uncached:
455 		r = set_pages_array_uc(pages, cpages);
456 		if (r)
457 			pr_err("Failed to set %d pages to uc!\n", cpages);
458 		break;
459 	case tt_wc:
460 		r = set_pages_array_wc(pages, cpages);
461 		if (r)
462 			pr_err("Failed to set %d pages to wc!\n", cpages);
463 		break;
464 	default:
465 		break;
466 	}
467 	return r;
468 }
469 
470 /**
471  * Free pages the pages that failed to change the caching state. If there is
472  * any pages that have changed their caching state already put them to the
473  * pool.
474  */
ttm_handle_caching_state_failure(struct list_head * pages,int ttm_flags,enum ttm_caching_state cstate,struct page ** failed_pages,unsigned cpages)475 static void ttm_handle_caching_state_failure(struct list_head *pages,
476 		int ttm_flags, enum ttm_caching_state cstate,
477 		struct page **failed_pages, unsigned cpages)
478 {
479 	unsigned i;
480 	/* Failed pages have to be freed */
481 	for (i = 0; i < cpages; ++i) {
482 		list_del(&failed_pages[i]->lru);
483 		__free_page(failed_pages[i]);
484 	}
485 }
486 
487 /**
488  * Allocate new pages with correct caching.
489  *
490  * This function is reentrant if caller updates count depending on number of
491  * pages returned in pages array.
492  */
ttm_alloc_new_pages(struct list_head * pages,gfp_t gfp_flags,int ttm_flags,enum ttm_caching_state cstate,unsigned count)493 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
494 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
495 {
496 	struct page **caching_array;
497 	struct page *p;
498 	int r = 0;
499 	unsigned i, cpages;
500 	unsigned max_cpages = min(count,
501 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
502 
503 	/* allocate array for page caching change */
504 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
505 
506 	if (!caching_array) {
507 		pr_err("Unable to allocate table for new pages\n");
508 		return -ENOMEM;
509 	}
510 
511 	for (i = 0, cpages = 0; i < count; ++i) {
512 		p = alloc_page(gfp_flags);
513 
514 		if (!p) {
515 			pr_err("Unable to get page %u\n", i);
516 
517 			/* store already allocated pages in the pool after
518 			 * setting the caching state */
519 			if (cpages) {
520 				r = ttm_set_pages_caching(caching_array,
521 							  cstate, cpages);
522 				if (r)
523 					ttm_handle_caching_state_failure(pages,
524 						ttm_flags, cstate,
525 						caching_array, cpages);
526 			}
527 			r = -ENOMEM;
528 			goto out;
529 		}
530 
531 #ifdef CONFIG_HIGHMEM
532 		/* gfp flags of highmem page should never be dma32 so we
533 		 * we should be fine in such case
534 		 */
535 		if (!PageHighMem(p))
536 #endif
537 		{
538 			caching_array[cpages++] = p;
539 			if (cpages == max_cpages) {
540 
541 				r = ttm_set_pages_caching(caching_array,
542 						cstate, cpages);
543 				if (r) {
544 					ttm_handle_caching_state_failure(pages,
545 						ttm_flags, cstate,
546 						caching_array, cpages);
547 					goto out;
548 				}
549 				cpages = 0;
550 			}
551 		}
552 
553 		list_add(&p->lru, pages);
554 	}
555 
556 	if (cpages) {
557 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
558 		if (r)
559 			ttm_handle_caching_state_failure(pages,
560 					ttm_flags, cstate,
561 					caching_array, cpages);
562 	}
563 out:
564 	kfree(caching_array);
565 
566 	return r;
567 }
568 
569 /**
570  * Fill the given pool if there aren't enough pages and the requested number of
571  * pages is small.
572  */
ttm_page_pool_fill_locked(struct ttm_page_pool * pool,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned long * irq_flags)573 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
574 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
575 		unsigned long *irq_flags)
576 {
577 	struct page *p;
578 	int r;
579 	unsigned cpages = 0;
580 	/**
581 	 * Only allow one pool fill operation at a time.
582 	 * If pool doesn't have enough pages for the allocation new pages are
583 	 * allocated from outside of pool.
584 	 */
585 	if (pool->fill_lock)
586 		return;
587 
588 	pool->fill_lock = true;
589 
590 	/* If allocation request is small and there are not enough
591 	 * pages in a pool we fill the pool up first. */
592 	if (count < _manager->options.small
593 		&& count > pool->npages) {
594 		struct list_head new_pages;
595 		unsigned alloc_size = _manager->options.alloc_size;
596 
597 		/**
598 		 * Can't change page caching if in irqsave context. We have to
599 		 * drop the pool->lock.
600 		 */
601 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
602 
603 		INIT_LIST_HEAD(&new_pages);
604 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
605 				cstate,	alloc_size);
606 		spin_lock_irqsave(&pool->lock, *irq_flags);
607 
608 		if (!r) {
609 			list_splice(&new_pages, &pool->list);
610 			++pool->nrefills;
611 			pool->npages += alloc_size;
612 		} else {
613 			pr_err("Failed to fill pool (%p)\n", pool);
614 			/* If we have any pages left put them to the pool. */
615 			list_for_each_entry(p, &new_pages, lru) {
616 				++cpages;
617 			}
618 			list_splice(&new_pages, &pool->list);
619 			pool->npages += cpages;
620 		}
621 
622 	}
623 	pool->fill_lock = false;
624 }
625 
626 /**
627  * Cut 'count' number of pages from the pool and put them on the return list.
628  *
629  * @return count of pages still required to fulfill the request.
630  */
ttm_page_pool_get_pages(struct ttm_page_pool * pool,struct list_head * pages,int ttm_flags,enum ttm_caching_state cstate,unsigned count)631 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
632 					struct list_head *pages,
633 					int ttm_flags,
634 					enum ttm_caching_state cstate,
635 					unsigned count)
636 {
637 	unsigned long irq_flags;
638 	struct list_head *p;
639 	unsigned i;
640 
641 	spin_lock_irqsave(&pool->lock, irq_flags);
642 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
643 
644 	if (count >= pool->npages) {
645 		/* take all pages from the pool */
646 		list_splice_init(&pool->list, pages);
647 		count -= pool->npages;
648 		pool->npages = 0;
649 		goto out;
650 	}
651 	/* find the last pages to include for requested number of pages. Split
652 	 * pool to begin and halve it to reduce search space. */
653 	if (count <= pool->npages/2) {
654 		i = 0;
655 		list_for_each(p, &pool->list) {
656 			if (++i == count)
657 				break;
658 		}
659 	} else {
660 		i = pool->npages + 1;
661 		list_for_each_prev(p, &pool->list) {
662 			if (--i == count)
663 				break;
664 		}
665 	}
666 	/* Cut 'count' number of pages from the pool */
667 	list_cut_position(pages, &pool->list, p);
668 	pool->npages -= count;
669 	count = 0;
670 out:
671 	spin_unlock_irqrestore(&pool->lock, irq_flags);
672 	return count;
673 }
674 
675 /* Put all pages in pages list to correct pool to wait for reuse */
ttm_put_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)676 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
677 			  enum ttm_caching_state cstate)
678 {
679 	unsigned long irq_flags;
680 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
681 	unsigned i;
682 
683 	if (pool == NULL) {
684 		/* No pool for this memory type so free the pages */
685 		for (i = 0; i < npages; i++) {
686 			if (pages[i]) {
687 				if (page_count(pages[i]) != 1)
688 					pr_err("Erroneous page count. Leaking pages.\n");
689 				__free_page(pages[i]);
690 				pages[i] = NULL;
691 			}
692 		}
693 		return;
694 	}
695 
696 	spin_lock_irqsave(&pool->lock, irq_flags);
697 	for (i = 0; i < npages; i++) {
698 		if (pages[i]) {
699 			if (page_count(pages[i]) != 1)
700 				pr_err("Erroneous page count. Leaking pages.\n");
701 			list_add_tail(&pages[i]->lru, &pool->list);
702 			pages[i] = NULL;
703 			pool->npages++;
704 		}
705 	}
706 	/* Check that we don't go over the pool limit */
707 	npages = 0;
708 	if (pool->npages > _manager->options.max_size) {
709 		npages = pool->npages - _manager->options.max_size;
710 		/* free at least NUM_PAGES_TO_ALLOC number of pages
711 		 * to reduce calls to set_memory_wb */
712 		if (npages < NUM_PAGES_TO_ALLOC)
713 			npages = NUM_PAGES_TO_ALLOC;
714 	}
715 	spin_unlock_irqrestore(&pool->lock, irq_flags);
716 	if (npages)
717 		ttm_page_pool_free(pool, npages, false);
718 }
719 
720 /*
721  * On success pages list will hold count number of correctly
722  * cached pages.
723  */
ttm_get_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)724 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
725 			 enum ttm_caching_state cstate)
726 {
727 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
728 	struct list_head plist;
729 	struct page *p = NULL;
730 	gfp_t gfp_flags = GFP_USER;
731 	unsigned count;
732 	int r;
733 
734 	/* set zero flag for page allocation if required */
735 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
736 		gfp_flags |= __GFP_ZERO;
737 
738 	/* No pool for cached pages */
739 	if (pool == NULL) {
740 		if (flags & TTM_PAGE_FLAG_DMA32)
741 			gfp_flags |= GFP_DMA32;
742 		else
743 			gfp_flags |= GFP_HIGHUSER;
744 
745 		for (r = 0; r < npages; ++r) {
746 			p = alloc_page(gfp_flags);
747 			if (!p) {
748 
749 				pr_err("Unable to allocate page\n");
750 				return -ENOMEM;
751 			}
752 
753 			pages[r] = p;
754 		}
755 		return 0;
756 	}
757 
758 	/* combine zero flag to pool flags */
759 	gfp_flags |= pool->gfp_flags;
760 
761 	/* First we take pages from the pool */
762 	INIT_LIST_HEAD(&plist);
763 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
764 	count = 0;
765 	list_for_each_entry(p, &plist, lru) {
766 		pages[count++] = p;
767 	}
768 
769 	/* clear the pages coming from the pool if requested */
770 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
771 		list_for_each_entry(p, &plist, lru) {
772 			if (PageHighMem(p))
773 				clear_highpage(p);
774 			else
775 				clear_page(page_address(p));
776 		}
777 	}
778 
779 	/* If pool didn't have enough pages allocate new one. */
780 	if (npages > 0) {
781 		/* ttm_alloc_new_pages doesn't reference pool so we can run
782 		 * multiple requests in parallel.
783 		 **/
784 		INIT_LIST_HEAD(&plist);
785 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
786 		list_for_each_entry(p, &plist, lru) {
787 			pages[count++] = p;
788 		}
789 		if (r) {
790 			/* If there is any pages in the list put them back to
791 			 * the pool. */
792 			pr_err("Failed to allocate extra pages for large request\n");
793 			ttm_put_pages(pages, count, flags, cstate);
794 			return r;
795 		}
796 	}
797 
798 	return 0;
799 }
800 
ttm_page_pool_init_locked(struct ttm_page_pool * pool,gfp_t flags,char * name)801 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
802 		char *name)
803 {
804 	spin_lock_init(&pool->lock);
805 	pool->fill_lock = false;
806 	INIT_LIST_HEAD(&pool->list);
807 	pool->npages = pool->nfrees = 0;
808 	pool->gfp_flags = flags;
809 	pool->name = name;
810 }
811 
ttm_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)812 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
813 {
814 	int ret;
815 
816 	WARN_ON(_manager);
817 
818 	pr_info("Initializing pool allocator\n");
819 
820 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
821 
822 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
823 
824 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
825 
826 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
827 				  GFP_USER | GFP_DMA32, "wc dma");
828 
829 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
830 				  GFP_USER | GFP_DMA32, "uc dma");
831 
832 	_manager->options.max_size = max_pages;
833 	_manager->options.small = SMALL_ALLOCATION;
834 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
835 
836 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
837 				   &glob->kobj, "pool");
838 	if (unlikely(ret != 0)) {
839 		kobject_put(&_manager->kobj);
840 		_manager = NULL;
841 		return ret;
842 	}
843 
844 	ttm_pool_mm_shrink_init(_manager);
845 
846 	return 0;
847 }
848 
ttm_page_alloc_fini(void)849 void ttm_page_alloc_fini(void)
850 {
851 	int i;
852 
853 	pr_info("Finalizing pool allocator\n");
854 	ttm_pool_mm_shrink_fini(_manager);
855 
856 	/* OK to use static buffer since global mutex is no longer used. */
857 	for (i = 0; i < NUM_POOLS; ++i)
858 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
859 
860 	kobject_put(&_manager->kobj);
861 	_manager = NULL;
862 }
863 
ttm_pool_populate(struct ttm_tt * ttm)864 int ttm_pool_populate(struct ttm_tt *ttm)
865 {
866 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
867 	unsigned i;
868 	int ret;
869 
870 	if (ttm->state != tt_unpopulated)
871 		return 0;
872 
873 	for (i = 0; i < ttm->num_pages; ++i) {
874 		ret = ttm_get_pages(&ttm->pages[i], 1,
875 				    ttm->page_flags,
876 				    ttm->caching_state);
877 		if (ret != 0) {
878 			ttm_pool_unpopulate(ttm);
879 			return -ENOMEM;
880 		}
881 
882 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
883 						false, false);
884 		if (unlikely(ret != 0)) {
885 			ttm_pool_unpopulate(ttm);
886 			return -ENOMEM;
887 		}
888 	}
889 
890 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
891 		ret = ttm_tt_swapin(ttm);
892 		if (unlikely(ret != 0)) {
893 			ttm_pool_unpopulate(ttm);
894 			return ret;
895 		}
896 	}
897 
898 	ttm->state = tt_unbound;
899 	return 0;
900 }
901 EXPORT_SYMBOL(ttm_pool_populate);
902 
ttm_pool_unpopulate(struct ttm_tt * ttm)903 void ttm_pool_unpopulate(struct ttm_tt *ttm)
904 {
905 	unsigned i;
906 
907 	for (i = 0; i < ttm->num_pages; ++i) {
908 		if (ttm->pages[i]) {
909 			ttm_mem_global_free_page(ttm->glob->mem_glob,
910 						 ttm->pages[i]);
911 			ttm_put_pages(&ttm->pages[i], 1,
912 				      ttm->page_flags,
913 				      ttm->caching_state);
914 		}
915 	}
916 	ttm->state = tt_unpopulated;
917 }
918 EXPORT_SYMBOL(ttm_pool_unpopulate);
919 
ttm_page_alloc_debugfs(struct seq_file * m,void * data)920 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
921 {
922 	struct ttm_page_pool *p;
923 	unsigned i;
924 	char *h[] = {"pool", "refills", "pages freed", "size"};
925 	if (!_manager) {
926 		seq_printf(m, "No pool allocator running.\n");
927 		return 0;
928 	}
929 	seq_printf(m, "%6s %12s %13s %8s\n",
930 			h[0], h[1], h[2], h[3]);
931 	for (i = 0; i < NUM_POOLS; ++i) {
932 		p = &_manager->pools[i];
933 
934 		seq_printf(m, "%6s %12ld %13ld %8d\n",
935 				p->name, p->nrefills,
936 				p->nfrees, p->npages);
937 	}
938 	return 0;
939 }
940 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
941