• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #define pr_fmt(fmt) "[TTM] " fmt
30 
31 #include <drm/ttm/ttm_memory.h>
32 #include <drm/ttm/ttm_module.h>
33 #include <drm/ttm/ttm_page_alloc.h>
34 #include <linux/spinlock.h>
35 #include <linux/sched.h>
36 #include <linux/wait.h>
37 #include <linux/mm.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 
42 #define TTM_MEMORY_ALLOC_RETRIES 4
43 
44 struct ttm_mem_global ttm_mem_glob;
45 EXPORT_SYMBOL(ttm_mem_glob);
46 
47 struct ttm_mem_zone {
48 	struct kobject kobj;
49 	struct ttm_mem_global *glob;
50 	const char *name;
51 	uint64_t zone_mem;
52 	uint64_t emer_mem;
53 	uint64_t max_mem;
54 	uint64_t swap_limit;
55 	uint64_t used_mem;
56 };
57 
58 static struct attribute ttm_mem_sys = {
59 	.name = "zone_memory",
60 	.mode = S_IRUGO
61 };
62 static struct attribute ttm_mem_emer = {
63 	.name = "emergency_memory",
64 	.mode = S_IRUGO | S_IWUSR
65 };
66 static struct attribute ttm_mem_max = {
67 	.name = "available_memory",
68 	.mode = S_IRUGO | S_IWUSR
69 };
70 static struct attribute ttm_mem_swap = {
71 	.name = "swap_limit",
72 	.mode = S_IRUGO | S_IWUSR
73 };
74 static struct attribute ttm_mem_used = {
75 	.name = "used_memory",
76 	.mode = S_IRUGO
77 };
78 
ttm_mem_zone_kobj_release(struct kobject * kobj)79 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
80 {
81 	struct ttm_mem_zone *zone =
82 		container_of(kobj, struct ttm_mem_zone, kobj);
83 
84 	pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
85 		zone->name, (unsigned long long)zone->used_mem >> 10);
86 	kfree(zone);
87 }
88 
ttm_mem_zone_show(struct kobject * kobj,struct attribute * attr,char * buffer)89 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
90 				 struct attribute *attr,
91 				 char *buffer)
92 {
93 	struct ttm_mem_zone *zone =
94 		container_of(kobj, struct ttm_mem_zone, kobj);
95 	uint64_t val = 0;
96 
97 	spin_lock(&zone->glob->lock);
98 	if (attr == &ttm_mem_sys)
99 		val = zone->zone_mem;
100 	else if (attr == &ttm_mem_emer)
101 		val = zone->emer_mem;
102 	else if (attr == &ttm_mem_max)
103 		val = zone->max_mem;
104 	else if (attr == &ttm_mem_swap)
105 		val = zone->swap_limit;
106 	else if (attr == &ttm_mem_used)
107 		val = zone->used_mem;
108 	spin_unlock(&zone->glob->lock);
109 
110 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
111 			(unsigned long long) val >> 10);
112 }
113 
114 static void ttm_check_swapping(struct ttm_mem_global *glob);
115 
ttm_mem_zone_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)116 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
117 				  struct attribute *attr,
118 				  const char *buffer,
119 				  size_t size)
120 {
121 	struct ttm_mem_zone *zone =
122 		container_of(kobj, struct ttm_mem_zone, kobj);
123 	int chars;
124 	unsigned long val;
125 	uint64_t val64;
126 
127 	chars = sscanf(buffer, "%lu", &val);
128 	if (chars == 0)
129 		return size;
130 
131 	val64 = val;
132 	val64 <<= 10;
133 
134 	spin_lock(&zone->glob->lock);
135 	if (val64 > zone->zone_mem)
136 		val64 = zone->zone_mem;
137 	if (attr == &ttm_mem_emer) {
138 		zone->emer_mem = val64;
139 		if (zone->max_mem > val64)
140 			zone->max_mem = val64;
141 	} else if (attr == &ttm_mem_max) {
142 		zone->max_mem = val64;
143 		if (zone->emer_mem < val64)
144 			zone->emer_mem = val64;
145 	} else if (attr == &ttm_mem_swap)
146 		zone->swap_limit = val64;
147 	spin_unlock(&zone->glob->lock);
148 
149 	ttm_check_swapping(zone->glob);
150 
151 	return size;
152 }
153 
154 static struct attribute *ttm_mem_zone_attrs[] = {
155 	&ttm_mem_sys,
156 	&ttm_mem_emer,
157 	&ttm_mem_max,
158 	&ttm_mem_swap,
159 	&ttm_mem_used,
160 	NULL
161 };
162 
163 static const struct sysfs_ops ttm_mem_zone_ops = {
164 	.show = &ttm_mem_zone_show,
165 	.store = &ttm_mem_zone_store
166 };
167 
168 static struct kobj_type ttm_mem_zone_kobj_type = {
169 	.release = &ttm_mem_zone_kobj_release,
170 	.sysfs_ops = &ttm_mem_zone_ops,
171 	.default_attrs = ttm_mem_zone_attrs,
172 };
173 
174 static struct attribute ttm_mem_global_lower_mem_limit = {
175 	.name = "lower_mem_limit",
176 	.mode = S_IRUGO | S_IWUSR
177 };
178 
ttm_mem_global_show(struct kobject * kobj,struct attribute * attr,char * buffer)179 static ssize_t ttm_mem_global_show(struct kobject *kobj,
180 				 struct attribute *attr,
181 				 char *buffer)
182 {
183 	struct ttm_mem_global *glob =
184 		container_of(kobj, struct ttm_mem_global, kobj);
185 	uint64_t val = 0;
186 
187 	spin_lock(&glob->lock);
188 	val = glob->lower_mem_limit;
189 	spin_unlock(&glob->lock);
190 	/* convert from number of pages to KB */
191 	val <<= (PAGE_SHIFT - 10);
192 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
193 			(unsigned long long) val);
194 }
195 
ttm_mem_global_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)196 static ssize_t ttm_mem_global_store(struct kobject *kobj,
197 				  struct attribute *attr,
198 				  const char *buffer,
199 				  size_t size)
200 {
201 	int chars;
202 	uint64_t val64;
203 	unsigned long val;
204 	struct ttm_mem_global *glob =
205 		container_of(kobj, struct ttm_mem_global, kobj);
206 
207 	chars = sscanf(buffer, "%lu", &val);
208 	if (chars == 0)
209 		return size;
210 
211 	val64 = val;
212 	/* convert from KB to number of pages */
213 	val64 >>= (PAGE_SHIFT - 10);
214 
215 	spin_lock(&glob->lock);
216 	glob->lower_mem_limit = val64;
217 	spin_unlock(&glob->lock);
218 
219 	return size;
220 }
221 
222 static struct attribute *ttm_mem_global_attrs[] = {
223 	&ttm_mem_global_lower_mem_limit,
224 	NULL
225 };
226 
227 static const struct sysfs_ops ttm_mem_global_ops = {
228 	.show = &ttm_mem_global_show,
229 	.store = &ttm_mem_global_store,
230 };
231 
232 static struct kobj_type ttm_mem_glob_kobj_type = {
233 	.sysfs_ops = &ttm_mem_global_ops,
234 	.default_attrs = ttm_mem_global_attrs,
235 };
236 
ttm_zones_above_swap_target(struct ttm_mem_global * glob,bool from_wq,uint64_t extra)237 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
238 					bool from_wq, uint64_t extra)
239 {
240 	unsigned int i;
241 	struct ttm_mem_zone *zone;
242 	uint64_t target;
243 
244 	for (i = 0; i < glob->num_zones; ++i) {
245 		zone = glob->zones[i];
246 
247 		if (from_wq)
248 			target = zone->swap_limit;
249 		else if (capable(CAP_SYS_ADMIN))
250 			target = zone->emer_mem;
251 		else
252 			target = zone->max_mem;
253 
254 		target = (extra > target) ? 0ULL : target;
255 
256 		if (zone->used_mem > target)
257 			return true;
258 	}
259 	return false;
260 }
261 
262 /*
263  * At this point we only support a single shrink callback.
264  * Extend this if needed, perhaps using a linked list of callbacks.
265  * Note that this function is reentrant:
266  * many threads may try to swap out at any given time.
267  */
268 
ttm_shrink(struct ttm_mem_global * glob,bool from_wq,uint64_t extra,struct ttm_operation_ctx * ctx)269 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
270 			uint64_t extra, struct ttm_operation_ctx *ctx)
271 {
272 	int ret;
273 
274 	spin_lock(&glob->lock);
275 
276 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
277 		spin_unlock(&glob->lock);
278 		ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
279 		spin_lock(&glob->lock);
280 		if (unlikely(ret != 0))
281 			break;
282 	}
283 
284 	spin_unlock(&glob->lock);
285 }
286 
ttm_shrink_work(struct work_struct * work)287 static void ttm_shrink_work(struct work_struct *work)
288 {
289 	struct ttm_operation_ctx ctx = {
290 		.interruptible = false,
291 		.no_wait_gpu = false
292 	};
293 	struct ttm_mem_global *glob =
294 	    container_of(work, struct ttm_mem_global, work);
295 
296 	ttm_shrink(glob, true, 0ULL, &ctx);
297 }
298 
ttm_mem_init_kernel_zone(struct ttm_mem_global * glob,const struct sysinfo * si)299 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
300 				    const struct sysinfo *si)
301 {
302 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
303 	uint64_t mem;
304 	int ret;
305 
306 	if (unlikely(!zone))
307 		return -ENOMEM;
308 
309 	mem = si->totalram - si->totalhigh;
310 	mem *= si->mem_unit;
311 
312 	zone->name = "kernel";
313 	zone->zone_mem = mem;
314 	zone->max_mem = mem >> 1;
315 	zone->emer_mem = (mem >> 1) + (mem >> 2);
316 	zone->swap_limit = zone->max_mem - (mem >> 3);
317 	zone->used_mem = 0;
318 	zone->glob = glob;
319 	glob->zone_kernel = zone;
320 	ret = kobject_init_and_add(
321 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
322 	if (unlikely(ret != 0)) {
323 		kobject_put(&zone->kobj);
324 		return ret;
325 	}
326 	glob->zones[glob->num_zones++] = zone;
327 	return 0;
328 }
329 
330 #ifdef CONFIG_HIGHMEM
ttm_mem_init_highmem_zone(struct ttm_mem_global * glob,const struct sysinfo * si)331 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
332 				     const struct sysinfo *si)
333 {
334 	struct ttm_mem_zone *zone;
335 	uint64_t mem;
336 	int ret;
337 
338 	if (si->totalhigh == 0)
339 		return 0;
340 
341 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
342 	if (unlikely(!zone))
343 		return -ENOMEM;
344 
345 	mem = si->totalram;
346 	mem *= si->mem_unit;
347 
348 	zone->name = "highmem";
349 	zone->zone_mem = mem;
350 	zone->max_mem = mem >> 1;
351 	zone->emer_mem = (mem >> 1) + (mem >> 2);
352 	zone->swap_limit = zone->max_mem - (mem >> 3);
353 	zone->used_mem = 0;
354 	zone->glob = glob;
355 	glob->zone_highmem = zone;
356 	ret = kobject_init_and_add(
357 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
358 		zone->name);
359 	if (unlikely(ret != 0)) {
360 		kobject_put(&zone->kobj);
361 		return ret;
362 	}
363 	glob->zones[glob->num_zones++] = zone;
364 	return 0;
365 }
366 #else
ttm_mem_init_dma32_zone(struct ttm_mem_global * glob,const struct sysinfo * si)367 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
368 				   const struct sysinfo *si)
369 {
370 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
371 	uint64_t mem;
372 	int ret;
373 
374 	if (unlikely(!zone))
375 		return -ENOMEM;
376 
377 	mem = si->totalram;
378 	mem *= si->mem_unit;
379 
380 	/**
381 	 * No special dma32 zone needed.
382 	 */
383 
384 	if (mem <= ((uint64_t) 1ULL << 32)) {
385 		kfree(zone);
386 		return 0;
387 	}
388 
389 	/*
390 	 * Limit max dma32 memory to 4GB for now
391 	 * until we can figure out how big this
392 	 * zone really is.
393 	 */
394 
395 	mem = ((uint64_t) 1ULL << 32);
396 	zone->name = "dma32";
397 	zone->zone_mem = mem;
398 	zone->max_mem = mem >> 1;
399 	zone->emer_mem = (mem >> 1) + (mem >> 2);
400 	zone->swap_limit = zone->max_mem - (mem >> 3);
401 	zone->used_mem = 0;
402 	zone->glob = glob;
403 	glob->zone_dma32 = zone;
404 	ret = kobject_init_and_add(
405 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
406 	if (unlikely(ret != 0)) {
407 		kobject_put(&zone->kobj);
408 		return ret;
409 	}
410 	glob->zones[glob->num_zones++] = zone;
411 	return 0;
412 }
413 #endif
414 
ttm_mem_global_init(struct ttm_mem_global * glob)415 int ttm_mem_global_init(struct ttm_mem_global *glob)
416 {
417 	struct sysinfo si;
418 	int ret;
419 	int i;
420 	struct ttm_mem_zone *zone;
421 
422 	spin_lock_init(&glob->lock);
423 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
424 	INIT_WORK(&glob->work, ttm_shrink_work);
425 	ret = kobject_init_and_add(
426 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
427 	if (unlikely(ret != 0)) {
428 		kobject_put(&glob->kobj);
429 		return ret;
430 	}
431 
432 	si_meminfo(&si);
433 
434 	/* set it as 0 by default to keep original behavior of OOM */
435 	glob->lower_mem_limit = 0;
436 
437 	ret = ttm_mem_init_kernel_zone(glob, &si);
438 	if (unlikely(ret != 0))
439 		goto out_no_zone;
440 #ifdef CONFIG_HIGHMEM
441 	ret = ttm_mem_init_highmem_zone(glob, &si);
442 	if (unlikely(ret != 0))
443 		goto out_no_zone;
444 #else
445 	ret = ttm_mem_init_dma32_zone(glob, &si);
446 	if (unlikely(ret != 0))
447 		goto out_no_zone;
448 #endif
449 	for (i = 0; i < glob->num_zones; ++i) {
450 		zone = glob->zones[i];
451 		pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
452 			zone->name, (unsigned long long)zone->max_mem >> 10);
453 	}
454 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
455 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
456 	return 0;
457 out_no_zone:
458 	ttm_mem_global_release(glob);
459 	return ret;
460 }
461 
ttm_mem_global_release(struct ttm_mem_global * glob)462 void ttm_mem_global_release(struct ttm_mem_global *glob)
463 {
464 	struct ttm_mem_zone *zone;
465 	unsigned int i;
466 
467 	/* let the page allocator first stop the shrink work. */
468 	ttm_page_alloc_fini();
469 	ttm_dma_page_alloc_fini();
470 
471 	flush_workqueue(glob->swap_queue);
472 	destroy_workqueue(glob->swap_queue);
473 	glob->swap_queue = NULL;
474 	for (i = 0; i < glob->num_zones; ++i) {
475 		zone = glob->zones[i];
476 		kobject_del(&zone->kobj);
477 		kobject_put(&zone->kobj);
478 	}
479 	kobject_del(&glob->kobj);
480 	kobject_put(&glob->kobj);
481 	memset(glob, 0, sizeof(*glob));
482 }
483 
ttm_check_swapping(struct ttm_mem_global * glob)484 static void ttm_check_swapping(struct ttm_mem_global *glob)
485 {
486 	bool needs_swapping = false;
487 	unsigned int i;
488 	struct ttm_mem_zone *zone;
489 
490 	spin_lock(&glob->lock);
491 	for (i = 0; i < glob->num_zones; ++i) {
492 		zone = glob->zones[i];
493 		if (zone->used_mem > zone->swap_limit) {
494 			needs_swapping = true;
495 			break;
496 		}
497 	}
498 
499 	spin_unlock(&glob->lock);
500 
501 	if (unlikely(needs_swapping))
502 		(void)queue_work(glob->swap_queue, &glob->work);
503 
504 }
505 
ttm_mem_global_free_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount)506 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
507 				     struct ttm_mem_zone *single_zone,
508 				     uint64_t amount)
509 {
510 	unsigned int i;
511 	struct ttm_mem_zone *zone;
512 
513 	spin_lock(&glob->lock);
514 	for (i = 0; i < glob->num_zones; ++i) {
515 		zone = glob->zones[i];
516 		if (single_zone && zone != single_zone)
517 			continue;
518 		zone->used_mem -= amount;
519 	}
520 	spin_unlock(&glob->lock);
521 }
522 
ttm_mem_global_free(struct ttm_mem_global * glob,uint64_t amount)523 void ttm_mem_global_free(struct ttm_mem_global *glob,
524 			 uint64_t amount)
525 {
526 	return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
527 }
528 EXPORT_SYMBOL(ttm_mem_global_free);
529 
530 /*
531  * check if the available mem is under lower memory limit
532  *
533  * a. if no swap disk at all or free swap space is under swap_mem_limit
534  * but available system mem is bigger than sys_mem_limit, allow TTM
535  * allocation;
536  *
537  * b. if the available system mem is less than sys_mem_limit but free
538  * swap disk is bigger than swap_mem_limit, allow TTM allocation.
539  */
540 bool
ttm_check_under_lowerlimit(struct ttm_mem_global * glob,uint64_t num_pages,struct ttm_operation_ctx * ctx)541 ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
542 			uint64_t num_pages,
543 			struct ttm_operation_ctx *ctx)
544 {
545 	int64_t available;
546 
547 	if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
548 		return false;
549 
550 	available = get_nr_swap_pages() + si_mem_available();
551 	available -= num_pages;
552 	if (available < glob->lower_mem_limit)
553 		return true;
554 
555 	return false;
556 }
557 
ttm_mem_global_reserve(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t amount,bool reserve)558 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
559 				  struct ttm_mem_zone *single_zone,
560 				  uint64_t amount, bool reserve)
561 {
562 	uint64_t limit;
563 	int ret = -ENOMEM;
564 	unsigned int i;
565 	struct ttm_mem_zone *zone;
566 
567 	spin_lock(&glob->lock);
568 	for (i = 0; i < glob->num_zones; ++i) {
569 		zone = glob->zones[i];
570 		if (single_zone && zone != single_zone)
571 			continue;
572 
573 		limit = (capable(CAP_SYS_ADMIN)) ?
574 			zone->emer_mem : zone->max_mem;
575 
576 		if (zone->used_mem > limit)
577 			goto out_unlock;
578 	}
579 
580 	if (reserve) {
581 		for (i = 0; i < glob->num_zones; ++i) {
582 			zone = glob->zones[i];
583 			if (single_zone && zone != single_zone)
584 				continue;
585 			zone->used_mem += amount;
586 		}
587 	}
588 
589 	ret = 0;
590 out_unlock:
591 	spin_unlock(&glob->lock);
592 	ttm_check_swapping(glob);
593 
594 	return ret;
595 }
596 
597 
ttm_mem_global_alloc_zone(struct ttm_mem_global * glob,struct ttm_mem_zone * single_zone,uint64_t memory,struct ttm_operation_ctx * ctx)598 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
599 				     struct ttm_mem_zone *single_zone,
600 				     uint64_t memory,
601 				     struct ttm_operation_ctx *ctx)
602 {
603 	int count = TTM_MEMORY_ALLOC_RETRIES;
604 
605 	while (unlikely(ttm_mem_global_reserve(glob,
606 					       single_zone,
607 					       memory, true)
608 			!= 0)) {
609 		if (ctx->no_wait_gpu)
610 			return -ENOMEM;
611 		if (unlikely(count-- == 0))
612 			return -ENOMEM;
613 		ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
614 	}
615 
616 	return 0;
617 }
618 
ttm_mem_global_alloc(struct ttm_mem_global * glob,uint64_t memory,struct ttm_operation_ctx * ctx)619 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
620 			 struct ttm_operation_ctx *ctx)
621 {
622 	/**
623 	 * Normal allocations of kernel memory are registered in
624 	 * the kernel zone.
625 	 */
626 
627 	return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
628 }
629 EXPORT_SYMBOL(ttm_mem_global_alloc);
630 
ttm_mem_global_alloc_page(struct ttm_mem_global * glob,struct page * page,uint64_t size,struct ttm_operation_ctx * ctx)631 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
632 			      struct page *page, uint64_t size,
633 			      struct ttm_operation_ctx *ctx)
634 {
635 	struct ttm_mem_zone *zone = NULL;
636 
637 	/**
638 	 * Page allocations may be registed in a single zone
639 	 * only if highmem or !dma32.
640 	 */
641 
642 #ifdef CONFIG_HIGHMEM
643 	if (PageHighMem(page) && glob->zone_highmem != NULL)
644 		zone = glob->zone_highmem;
645 #else
646 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
647 		zone = glob->zone_kernel;
648 #endif
649 	return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
650 }
651 
ttm_mem_global_free_page(struct ttm_mem_global * glob,struct page * page,uint64_t size)652 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
653 			      uint64_t size)
654 {
655 	struct ttm_mem_zone *zone = NULL;
656 
657 #ifdef CONFIG_HIGHMEM
658 	if (PageHighMem(page) && glob->zone_highmem != NULL)
659 		zone = glob->zone_highmem;
660 #else
661 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
662 		zone = glob->zone_kernel;
663 #endif
664 	ttm_mem_global_free_zone(glob, zone, size);
665 }
666 
ttm_round_pot(size_t size)667 size_t ttm_round_pot(size_t size)
668 {
669 	if ((size & (size - 1)) == 0)
670 		return size;
671 	else if (size > PAGE_SIZE)
672 		return PAGE_ALIGN(size);
673 	else {
674 		size_t tmp_size = 4;
675 
676 		while (tmp_size < size)
677 			tmp_size <<= 1;
678 
679 		return tmp_size;
680 	}
681 	return 0;
682 }
683 EXPORT_SYMBOL(ttm_round_pot);
684