1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24
25 #ifndef _TTM_DEVICE_H_
26 #define _TTM_DEVICE_H_
27
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <drm/ttm/ttm_resource.h>
31 #include <drm/ttm/ttm_pool.h>
32
33 #define TTM_NUM_MEM_TYPES 8
34
35 struct ttm_device;
36 struct ttm_placement;
37 struct ttm_buffer_object;
38 struct ttm_operation_ctx;
39
40 /**
41 * struct ttm_global - Buffer object driver global data.
42 *
43 * @dummy_read_page: Pointer to a dummy page used for mapping requests
44 * of unpopulated pages.
45 * @shrink: A shrink callback object used for buffer object swap.
46 * @device_list_mutex: Mutex protecting the device list.
47 * This mutex is held while traversing the device list for pm options.
48 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
49 * @device_list: List of buffer object devices.
50 * @swap_lru: Lru list of buffer objects used for swapping.
51 */
52 extern struct ttm_global {
53
54 /**
55 * Constant after init.
56 */
57
58 struct page *dummy_read_page;
59
60 /**
61 * Protected by ttm_global_mutex.
62 */
63 struct list_head device_list;
64
65 /**
66 * Internal protection.
67 */
68 atomic_t bo_count;
69 } ttm_glob;
70
71 struct ttm_device_funcs {
72 /**
73 * ttm_tt_create
74 *
75 * @bo: The buffer object to create the ttm for.
76 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
77 *
78 * Create a struct ttm_tt to back data with system memory pages.
79 * No pages are actually allocated.
80 * Returns:
81 * NULL: Out of memory.
82 */
83 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
84 uint32_t page_flags);
85
86 /**
87 * ttm_tt_populate
88 *
89 * @ttm: The struct ttm_tt to contain the backing pages.
90 *
91 * Allocate all backing pages
92 * Returns:
93 * -ENOMEM: Out of memory.
94 */
95 int (*ttm_tt_populate)(struct ttm_device *bdev,
96 struct ttm_tt *ttm,
97 struct ttm_operation_ctx *ctx);
98
99 /**
100 * ttm_tt_unpopulate
101 *
102 * @ttm: The struct ttm_tt to contain the backing pages.
103 *
104 * Free all backing page
105 */
106 void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
107 struct ttm_tt *ttm);
108
109 /**
110 * ttm_tt_destroy
111 *
112 * @bdev: Pointer to a ttm device
113 * @ttm: Pointer to a struct ttm_tt.
114 *
115 * Destroy the backend. This will be call back from ttm_tt_destroy so
116 * don't call ttm_tt_destroy from the callback or infinite loop.
117 */
118 void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
119
120 /**
121 * struct ttm_bo_driver member eviction_valuable
122 *
123 * @bo: the buffer object to be evicted
124 * @place: placement we need room for
125 *
126 * Check with the driver if it is valuable to evict a BO to make room
127 * for a certain placement.
128 */
129 bool (*eviction_valuable)(struct ttm_buffer_object *bo,
130 const struct ttm_place *place);
131 /**
132 * struct ttm_bo_driver member evict_flags:
133 *
134 * @bo: the buffer object to be evicted
135 *
136 * Return the bo flags for a buffer which is not mapped to the hardware.
137 * These will be placed in proposed_flags so that when the move is
138 * finished, they'll end up in bo->mem.flags
139 * This should not cause multihop evictions, and the core will warn
140 * if one is proposed.
141 */
142
143 void (*evict_flags)(struct ttm_buffer_object *bo,
144 struct ttm_placement *placement);
145
146 /**
147 * struct ttm_bo_driver member move:
148 *
149 * @bo: the buffer to move
150 * @evict: whether this motion is evicting the buffer from
151 * the graphics address space
152 * @ctx: context for this move with parameters
153 * @new_mem: the new memory region receiving the buffer
154 @ @hop: placement for driver directed intermediate hop
155 *
156 * Move a buffer between two memory regions.
157 * Returns errno -EMULTIHOP if driver requests a hop
158 */
159 int (*move)(struct ttm_buffer_object *bo, bool evict,
160 struct ttm_operation_ctx *ctx,
161 struct ttm_resource *new_mem,
162 struct ttm_place *hop);
163
164 /**
165 * Hook to notify driver about a resource delete.
166 */
167 void (*delete_mem_notify)(struct ttm_buffer_object *bo);
168
169 /**
170 * notify the driver that we're about to swap out this bo
171 */
172 void (*swap_notify)(struct ttm_buffer_object *bo);
173
174 /**
175 * Driver callback on when mapping io memory (for bo_move_memcpy
176 * for instance). TTM will take care to call io_mem_free whenever
177 * the mapping is not use anymore. io_mem_reserve & io_mem_free
178 * are balanced.
179 */
180 int (*io_mem_reserve)(struct ttm_device *bdev,
181 struct ttm_resource *mem);
182 void (*io_mem_free)(struct ttm_device *bdev,
183 struct ttm_resource *mem);
184
185 /**
186 * Return the pfn for a given page_offset inside the BO.
187 *
188 * @bo: the BO to look up the pfn for
189 * @page_offset: the offset to look up
190 */
191 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
192 unsigned long page_offset);
193
194 /**
195 * Read/write memory buffers for ptrace access
196 *
197 * @bo: the BO to access
198 * @offset: the offset from the start of the BO
199 * @buf: pointer to source/destination buffer
200 * @len: number of bytes to copy
201 * @write: whether to read (0) from or write (non-0) to BO
202 *
203 * If successful, this function should return the number of
204 * bytes copied, -EIO otherwise. If the number of bytes
205 * returned is < len, the function may be called again with
206 * the remainder of the buffer to copy.
207 */
208 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
209 void *buf, int len, int write);
210
211 /**
212 * struct ttm_bo_driver member del_from_lru_notify
213 *
214 * @bo: the buffer object deleted from lru
215 *
216 * notify driver that a BO was deleted from LRU.
217 */
218 void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
219
220 /**
221 * Notify the driver that we're about to release a BO
222 *
223 * @bo: BO that is about to be released
224 *
225 * Gives the driver a chance to do any cleanup, including
226 * adding fences that may force a delayed delete
227 */
228 void (*release_notify)(struct ttm_buffer_object *bo);
229 };
230
231 /**
232 * struct ttm_device - Buffer object driver device-specific data.
233 *
234 * @device_list: Our entry in the global device list.
235 * @funcs: Function table for the device.
236 * @sysman: Resource manager for the system domain.
237 * @man_drv: An array of resource_managers.
238 * @vma_manager: Address space manager.
239 * @pool: page pool for the device.
240 * @dev_mapping: A pointer to the struct address_space representing the
241 * device address space.
242 * @wq: Work queue structure for the delayed delete workqueue.
243 */
244 struct ttm_device {
245 /*
246 * Constant after bo device init
247 */
248 struct list_head device_list;
249 struct ttm_device_funcs *funcs;
250
251 /*
252 * Access via ttm_manager_type.
253 */
254 struct ttm_resource_manager sysman;
255 struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
256
257 /*
258 * Protected by internal locks.
259 */
260 struct drm_vma_offset_manager *vma_manager;
261 struct ttm_pool pool;
262
263 /*
264 * Protection for the per manager LRU and ddestroy lists.
265 */
266 spinlock_t lru_lock;
267 struct list_head ddestroy;
268
269 /*
270 * Protected by load / firstopen / lastclose /unload sync.
271 */
272 struct address_space *dev_mapping;
273
274 /*
275 * Internal protection.
276 */
277 struct delayed_work wq;
278 };
279
280 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
281 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
282 gfp_t gfp_flags);
283
284 static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device * bdev,int mem_type)285 ttm_manager_type(struct ttm_device *bdev, int mem_type)
286 {
287 return bdev->man_drv[mem_type];
288 }
289
ttm_set_driver_manager(struct ttm_device * bdev,int type,struct ttm_resource_manager * manager)290 static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
291 struct ttm_resource_manager *manager)
292 {
293 bdev->man_drv[type] = manager;
294 }
295
296 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
297 struct device *dev, struct address_space *mapping,
298 struct drm_vma_offset_manager *vma_manager,
299 bool use_dma_alloc, bool use_dma32);
300 void ttm_device_fini(struct ttm_device *bdev);
301
302 #endif
303