1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/cc_platform.h>
35 #include <linux/debugfs.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <linux/sched.h>
39 #include <linux/shmem_fs.h>
40 #include <drm/drm_cache.h>
41 #include <drm/drm_device.h>
42 #include <drm/drm_util.h>
43 #include <drm/ttm/ttm_bo.h>
44 #include <drm/ttm/ttm_tt.h>
45
46 #include "ttm_module.h"
47
48 #include <linux/android_kabi.h>
49 ANDROID_KABI_DECLONLY(dma_buf);
50 ANDROID_KABI_DECLONLY(dma_buf_attachment);
51
52 static unsigned long ttm_pages_limit;
53
54 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
55 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
56
57 static unsigned long ttm_dma32_pages_limit;
58
59 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
60 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
61
62 static atomic_long_t ttm_pages_allocated;
63 static atomic_long_t ttm_dma32_pages_allocated;
64
65 /*
66 * Allocates a ttm structure for the given BO.
67 */
ttm_tt_create(struct ttm_buffer_object * bo,bool zero_alloc)68 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
69 {
70 struct ttm_device *bdev = bo->bdev;
71 struct drm_device *ddev = bo->base.dev;
72 uint32_t page_flags = 0;
73
74 dma_resv_assert_held(bo->base.resv);
75
76 if (bo->ttm)
77 return 0;
78
79 switch (bo->type) {
80 case ttm_bo_type_device:
81 if (zero_alloc)
82 page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
83 break;
84 case ttm_bo_type_kernel:
85 break;
86 case ttm_bo_type_sg:
87 page_flags |= TTM_TT_FLAG_EXTERNAL;
88 break;
89 default:
90 pr_err("Illegal buffer object type\n");
91 return -EINVAL;
92 }
93 /*
94 * When using dma_alloc_coherent with memory encryption the
95 * mapped TT pages need to be decrypted or otherwise the drivers
96 * will end up sending encrypted mem to the gpu.
97 */
98 if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
99 page_flags |= TTM_TT_FLAG_DECRYPTED;
100 drm_info_once(ddev, "TT memory decryption enabled.");
101 }
102
103 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
104 if (unlikely(bo->ttm == NULL))
105 return -ENOMEM;
106
107 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
108 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
109
110 return 0;
111 }
112 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
113
114 /*
115 * Allocates storage for pointers to the pages that back the ttm.
116 */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)117 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
118 {
119 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
120 if (!ttm->pages)
121 return -ENOMEM;
122
123 return 0;
124 }
125
ttm_dma_tt_alloc_page_directory(struct ttm_tt * ttm)126 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
127 {
128 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
129 sizeof(*ttm->dma_address), GFP_KERNEL);
130 if (!ttm->pages)
131 return -ENOMEM;
132
133 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
134 return 0;
135 }
136
ttm_sg_tt_alloc_page_directory(struct ttm_tt * ttm)137 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
138 {
139 ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
140 GFP_KERNEL);
141 if (!ttm->dma_address)
142 return -ENOMEM;
143
144 return 0;
145 }
146
ttm_tt_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)147 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
148 {
149 bdev->funcs->ttm_tt_destroy(bdev, ttm);
150 }
151 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
152
ttm_tt_init_fields(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)153 static void ttm_tt_init_fields(struct ttm_tt *ttm,
154 struct ttm_buffer_object *bo,
155 uint32_t page_flags,
156 enum ttm_caching caching,
157 unsigned long extra_pages)
158 {
159 ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
160 ttm->page_flags = page_flags;
161 ttm->dma_address = NULL;
162 ttm->swap_storage = NULL;
163 ttm->sg = bo->sg;
164 ttm->caching = caching;
165 }
166
ttm_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)167 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
168 uint32_t page_flags, enum ttm_caching caching,
169 unsigned long extra_pages)
170 {
171 ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
172
173 if (ttm_tt_alloc_page_directory(ttm)) {
174 pr_err("Failed allocating page table\n");
175 return -ENOMEM;
176 }
177 return 0;
178 }
179 EXPORT_SYMBOL(ttm_tt_init);
180
ttm_tt_fini(struct ttm_tt * ttm)181 void ttm_tt_fini(struct ttm_tt *ttm)
182 {
183 WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
184
185 if (ttm->swap_storage)
186 fput(ttm->swap_storage);
187 ttm->swap_storage = NULL;
188
189 if (ttm->pages)
190 kvfree(ttm->pages);
191 else
192 kvfree(ttm->dma_address);
193 ttm->pages = NULL;
194 ttm->dma_address = NULL;
195 }
196 EXPORT_SYMBOL(ttm_tt_fini);
197
ttm_sg_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching)198 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
199 uint32_t page_flags, enum ttm_caching caching)
200 {
201 int ret;
202
203 ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
204
205 if (page_flags & TTM_TT_FLAG_EXTERNAL)
206 ret = ttm_sg_tt_alloc_page_directory(ttm);
207 else
208 ret = ttm_dma_tt_alloc_page_directory(ttm);
209 if (ret) {
210 pr_err("Failed allocating page table\n");
211 return -ENOMEM;
212 }
213 return 0;
214 }
215 EXPORT_SYMBOL(ttm_sg_tt_init);
216
ttm_tt_swapin(struct ttm_tt * ttm)217 int ttm_tt_swapin(struct ttm_tt *ttm)
218 {
219 struct address_space *swap_space;
220 struct file *swap_storage;
221 struct page *from_page;
222 struct page *to_page;
223 gfp_t gfp_mask;
224 int i, ret;
225
226 swap_storage = ttm->swap_storage;
227 BUG_ON(swap_storage == NULL);
228
229 swap_space = swap_storage->f_mapping;
230 gfp_mask = mapping_gfp_mask(swap_space);
231
232 for (i = 0; i < ttm->num_pages; ++i) {
233 from_page = shmem_read_mapping_page_gfp(swap_space, i,
234 gfp_mask);
235 if (IS_ERR(from_page)) {
236 ret = PTR_ERR(from_page);
237 goto out_err;
238 }
239 to_page = ttm->pages[i];
240 if (unlikely(to_page == NULL)) {
241 ret = -ENOMEM;
242 goto out_err;
243 }
244
245 copy_highpage(to_page, from_page);
246 put_page(from_page);
247 }
248
249 fput(swap_storage);
250 ttm->swap_storage = NULL;
251 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
252
253 return 0;
254
255 out_err:
256 return ret;
257 }
258 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
259
260 /**
261 * ttm_tt_swapout - swap out tt object
262 *
263 * @bdev: TTM device structure.
264 * @ttm: The struct ttm_tt.
265 * @gfp_flags: Flags to use for memory allocation.
266 *
267 * Swapout a TT object to a shmem_file, return number of pages swapped out or
268 * negative error code.
269 */
ttm_tt_swapout(struct ttm_device * bdev,struct ttm_tt * ttm,gfp_t gfp_flags)270 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
271 gfp_t gfp_flags)
272 {
273 loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
274 struct address_space *swap_space;
275 struct file *swap_storage;
276 struct page *from_page;
277 struct page *to_page;
278 int i, ret;
279
280 swap_storage = shmem_file_setup("ttm swap", size, 0);
281 if (IS_ERR(swap_storage)) {
282 pr_err("Failed allocating swap storage\n");
283 return PTR_ERR(swap_storage);
284 }
285
286 swap_space = swap_storage->f_mapping;
287 gfp_flags &= mapping_gfp_mask(swap_space);
288
289 for (i = 0; i < ttm->num_pages; ++i) {
290 from_page = ttm->pages[i];
291 if (unlikely(from_page == NULL))
292 continue;
293
294 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
295 if (IS_ERR(to_page)) {
296 ret = PTR_ERR(to_page);
297 goto out_err;
298 }
299 copy_highpage(to_page, from_page);
300 set_page_dirty(to_page);
301 mark_page_accessed(to_page);
302 put_page(to_page);
303 }
304
305 ttm_tt_unpopulate(bdev, ttm);
306 ttm->swap_storage = swap_storage;
307 ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
308
309 return ttm->num_pages;
310
311 out_err:
312 fput(swap_storage);
313
314 return ret;
315 }
316 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
317
ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)318 int ttm_tt_populate(struct ttm_device *bdev,
319 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
320 {
321 int ret;
322
323 if (!ttm)
324 return -EINVAL;
325
326 if (ttm_tt_is_populated(ttm))
327 return 0;
328
329 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
330 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
331 if (bdev->pool.use_dma32)
332 atomic_long_add(ttm->num_pages,
333 &ttm_dma32_pages_allocated);
334 }
335
336 while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
337 atomic_long_read(&ttm_dma32_pages_allocated) >
338 ttm_dma32_pages_limit) {
339
340 ret = ttm_global_swapout(ctx, GFP_KERNEL);
341 if (ret == 0)
342 break;
343 if (ret < 0)
344 goto error;
345 }
346
347 if (bdev->funcs->ttm_tt_populate)
348 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
349 else
350 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
351 if (ret)
352 goto error;
353
354 ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
355 if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
356 ret = ttm_tt_swapin(ttm);
357 if (unlikely(ret != 0)) {
358 ttm_tt_unpopulate(bdev, ttm);
359 return ret;
360 }
361 }
362
363 return 0;
364
365 error:
366 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
367 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
368 if (bdev->pool.use_dma32)
369 atomic_long_sub(ttm->num_pages,
370 &ttm_dma32_pages_allocated);
371 }
372 return ret;
373 }
374 EXPORT_SYMBOL(ttm_tt_populate);
375
ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)376 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
377 {
378 if (!ttm_tt_is_populated(ttm))
379 return;
380
381 if (bdev->funcs->ttm_tt_unpopulate)
382 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
383 else
384 ttm_pool_free(&bdev->pool, ttm);
385
386 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
387 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
388 if (bdev->pool.use_dma32)
389 atomic_long_sub(ttm->num_pages,
390 &ttm_dma32_pages_allocated);
391 }
392
393 ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
394 }
395 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
396
397 #ifdef CONFIG_DEBUG_FS
398
399 /* Test the shrinker functions and dump the result */
ttm_tt_debugfs_shrink_show(struct seq_file * m,void * data)400 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
401 {
402 struct ttm_operation_ctx ctx = { false, false };
403
404 seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
405 return 0;
406 }
407 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
408
409 #endif
410
411
412 /*
413 * ttm_tt_mgr_init - register with the MM shrinker
414 *
415 * Register with the MM shrinker for swapping out BOs.
416 */
ttm_tt_mgr_init(unsigned long num_pages,unsigned long num_dma32_pages)417 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
418 {
419 #ifdef CONFIG_DEBUG_FS
420 debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
421 &ttm_tt_debugfs_shrink_fops);
422 #endif
423
424 if (!ttm_pages_limit)
425 ttm_pages_limit = num_pages;
426
427 if (!ttm_dma32_pages_limit)
428 ttm_dma32_pages_limit = num_dma32_pages;
429 }
430
ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter * iter,struct iosys_map * dmap,pgoff_t i)431 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
432 struct iosys_map *dmap,
433 pgoff_t i)
434 {
435 struct ttm_kmap_iter_tt *iter_tt =
436 container_of(iter, typeof(*iter_tt), base);
437
438 iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
439 iter_tt->prot));
440 }
441
ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter * iter,struct iosys_map * map)442 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
443 struct iosys_map *map)
444 {
445 kunmap_local(map->vaddr);
446 }
447
448 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
449 .map_local = ttm_kmap_iter_tt_map_local,
450 .unmap_local = ttm_kmap_iter_tt_unmap_local,
451 .maps_tt = true,
452 };
453
454 /**
455 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
456 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
457 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
458 *
459 * Return: Pointer to the embedded struct ttm_kmap_iter.
460 */
461 struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt * iter_tt,struct ttm_tt * tt)462 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
463 struct ttm_tt *tt)
464 {
465 iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
466 iter_tt->tt = tt;
467 if (tt)
468 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
469 else
470 iter_tt->prot = PAGE_KERNEL;
471
472 return &iter_tt->base;
473 }
474 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
475
ttm_tt_pages_limit(void)476 unsigned long ttm_tt_pages_limit(void)
477 {
478 return ttm_pages_limit;
479 }
480 EXPORT_SYMBOL(ttm_tt_pages_limit);
481