• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 #include <drm/ttm/ttm_resource.h>
6 #include <drm/ttm/ttm_device.h>
7 #include <drm/ttm/ttm_placement.h>
8 
9 #include "ttm_kunit_helpers.h"
10 
11 struct ttm_device_test_case {
12 	const char *description;
13 	bool use_dma_alloc;
14 	bool use_dma32;
15 	bool pools_init_expected;
16 };
17 
ttm_device_init_basic(struct kunit * test)18 static void ttm_device_init_basic(struct kunit *test)
19 {
20 	struct ttm_test_devices *priv = test->priv;
21 	struct ttm_device *ttm_dev;
22 	struct ttm_resource_manager *ttm_sys_man;
23 	int err;
24 
25 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
26 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
27 
28 	err = ttm_device_kunit_init(priv, ttm_dev, false, false);
29 	KUNIT_ASSERT_EQ(test, err, 0);
30 
31 	KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
32 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
33 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
34 
35 	ttm_sys_man = &ttm_dev->sysman;
36 	KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
37 	KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
38 	KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
39 	KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
40 
41 	KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
42 			    priv->drm->anon_inode->i_mapping);
43 
44 	ttm_device_fini(ttm_dev);
45 }
46 
ttm_device_init_multiple(struct kunit * test)47 static void ttm_device_init_multiple(struct kunit *test)
48 {
49 	struct ttm_test_devices *priv = test->priv;
50 	struct ttm_device *ttm_devs;
51 	unsigned int i, num_dev = 3;
52 	int err;
53 
54 	ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
55 	KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
56 
57 	for (i = 0; i < num_dev; i++) {
58 		err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
59 		KUNIT_ASSERT_EQ(test, err, 0);
60 
61 		KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
62 				    priv->drm->anon_inode->i_mapping);
63 		KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
64 		KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
65 		KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
66 	}
67 
68 	KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
69 
70 	for (i = 0; i < num_dev; i++)
71 		ttm_device_fini(&ttm_devs[i]);
72 }
73 
ttm_device_fini_basic(struct kunit * test)74 static void ttm_device_fini_basic(struct kunit *test)
75 {
76 	struct ttm_test_devices *priv = test->priv;
77 	struct ttm_device *ttm_dev;
78 	struct ttm_resource_manager *man;
79 	int err;
80 
81 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
82 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
83 
84 	err = ttm_device_kunit_init(priv, ttm_dev, false, false);
85 	KUNIT_ASSERT_EQ(test, err, 0);
86 
87 	man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
88 	KUNIT_ASSERT_NOT_NULL(test, man);
89 
90 	ttm_device_fini(ttm_dev);
91 
92 	KUNIT_ASSERT_FALSE(test, man->use_type);
93 	KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
94 	KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
95 }
96 
ttm_device_init_no_vma_man(struct kunit * test)97 static void ttm_device_init_no_vma_man(struct kunit *test)
98 {
99 	struct ttm_test_devices *priv = test->priv;
100 	struct drm_device *drm = priv->drm;
101 	struct ttm_device *ttm_dev;
102 	struct drm_vma_offset_manager *vma_man;
103 	int err;
104 
105 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
106 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
107 
108 	/* Let's pretend there's no VMA manager allocated */
109 	vma_man = drm->vma_offset_manager;
110 	drm->vma_offset_manager = NULL;
111 
112 	err = ttm_device_kunit_init(priv, ttm_dev, false, false);
113 	KUNIT_EXPECT_EQ(test, err, -EINVAL);
114 
115 	/* Bring the manager back for a graceful cleanup */
116 	drm->vma_offset_manager = vma_man;
117 }
118 
119 static const struct ttm_device_test_case ttm_device_cases[] = {
120 	{
121 		.description = "No DMA allocations, no DMA32 required",
122 		.use_dma_alloc = false,
123 		.use_dma32 = false,
124 		.pools_init_expected = false,
125 	},
126 	{
127 		.description = "DMA allocations, DMA32 required",
128 		.use_dma_alloc = true,
129 		.use_dma32 = true,
130 		.pools_init_expected = true,
131 	},
132 	{
133 		.description = "No DMA allocations, DMA32 required",
134 		.use_dma_alloc = false,
135 		.use_dma32 = true,
136 		.pools_init_expected = false,
137 	},
138 	{
139 		.description = "DMA allocations, no DMA32 required",
140 		.use_dma_alloc = true,
141 		.use_dma32 = false,
142 		.pools_init_expected = true,
143 	},
144 };
145 
ttm_device_case_desc(const struct ttm_device_test_case * t,char * desc)146 static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
147 {
148 	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
149 }
150 
151 KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
152 
ttm_device_init_pools(struct kunit * test)153 static void ttm_device_init_pools(struct kunit *test)
154 {
155 	struct ttm_test_devices *priv = test->priv;
156 	const struct ttm_device_test_case *params = test->param_value;
157 	struct ttm_device *ttm_dev;
158 	struct ttm_pool *pool;
159 	struct ttm_pool_type pt;
160 	int err;
161 
162 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
163 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
164 
165 	err = ttm_device_kunit_init(priv, ttm_dev,
166 				    params->use_dma_alloc,
167 				    params->use_dma32);
168 	KUNIT_ASSERT_EQ(test, err, 0);
169 
170 	pool = &ttm_dev->pool;
171 	KUNIT_ASSERT_NOT_NULL(test, pool);
172 	KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
173 	KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
174 	KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
175 
176 	if (params->pools_init_expected) {
177 		for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
178 			for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
179 				pt = pool->caching[i].orders[j];
180 				KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
181 				KUNIT_EXPECT_EQ(test, pt.caching, i);
182 				KUNIT_EXPECT_EQ(test, pt.order, j);
183 
184 				if (params->use_dma_alloc)
185 					KUNIT_ASSERT_FALSE(test,
186 							   list_empty(&pt.pages));
187 			}
188 		}
189 	}
190 
191 	ttm_device_fini(ttm_dev);
192 }
193 
194 static struct kunit_case ttm_device_test_cases[] = {
195 	KUNIT_CASE(ttm_device_init_basic),
196 	KUNIT_CASE(ttm_device_init_multiple),
197 	KUNIT_CASE(ttm_device_fini_basic),
198 	KUNIT_CASE(ttm_device_init_no_vma_man),
199 	KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
200 	{}
201 };
202 
203 static struct kunit_suite ttm_device_test_suite = {
204 	.name = "ttm_device",
205 	.init = ttm_test_devices_init,
206 	.exit = ttm_test_devices_fini,
207 	.test_cases = ttm_device_test_cases,
208 };
209 
210 kunit_test_suites(&ttm_device_test_suite);
211 
212 MODULE_LICENSE("GPL");
213