1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13
14 #include "display/intel_frontbuffer.h"
15 #include "intel_memory_region.h"
16 #include "i915_gem_object_types.h"
17 #include "i915_gem_gtt.h"
18 #include "i915_gem_ww.h"
19 #include "i915_vma_types.h"
20
21 enum intel_region_id;
22
23 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
24
i915_gem_object_size_2big(u64 size)25 static inline bool i915_gem_object_size_2big(u64 size)
26 {
27 struct drm_i915_gem_object *obj;
28
29 if (overflows_type(size, obj->base.size))
30 return true;
31
32 return false;
33 }
34
35 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
36 enum i915_cache_level level);
37 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
38 enum i915_cache_level lvl);
39 void i915_gem_init__objects(struct drm_i915_private *i915);
40
41 void i915_objects_module_exit(void);
42 int i915_objects_module_init(void);
43
44 struct drm_i915_gem_object *i915_gem_object_alloc(void);
45 void i915_gem_object_free(struct drm_i915_gem_object *obj);
46
47 void i915_gem_object_init(struct drm_i915_gem_object *obj,
48 const struct drm_i915_gem_object_ops *ops,
49 struct lock_class_key *key,
50 unsigned alloc_flags);
51
52 void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
53
54 struct drm_i915_gem_object *
55 i915_gem_object_create_shmem(struct drm_i915_private *i915,
56 resource_size_t size);
57 struct drm_i915_gem_object *
58 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
59 const void *data, resource_size_t size);
60 struct drm_i915_gem_object *
61 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
62 struct intel_memory_region **placements,
63 unsigned int n_placements);
64
65 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
66
67 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
68 struct sg_table *pages,
69 bool needs_clflush);
70
71 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
72 const struct drm_i915_gem_pwrite *args);
73 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
74 const struct drm_i915_gem_pread *args);
75
76 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
77 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
78 struct sg_table *pages);
79 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
80 struct sg_table *pages);
81
82 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
83
84 struct sg_table *
85 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
86
87 /**
88 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
89 * @file: DRM file private date
90 * @handle: userspace handle
91 *
92 * Returns:
93 *
94 * A pointer to the object named by the handle if such exists on @filp, NULL
95 * otherwise. This object is only valid whilst under the RCU read lock, and
96 * note carefully the object may be in the process of being destroyed.
97 */
98 static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file * file,u32 handle)99 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
100 {
101 #ifdef CONFIG_LOCKDEP
102 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
103 #endif
104 return idr_find(&file->object_idr, handle);
105 }
106
107 static inline struct drm_i915_gem_object *
i915_gem_object_get_rcu(struct drm_i915_gem_object * obj)108 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
109 {
110 if (obj && !kref_get_unless_zero(&obj->base.refcount))
111 obj = NULL;
112
113 return obj;
114 }
115
116 static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file * file,u32 handle)117 i915_gem_object_lookup(struct drm_file *file, u32 handle)
118 {
119 struct drm_i915_gem_object *obj;
120
121 rcu_read_lock();
122 obj = i915_gem_object_lookup_rcu(file, handle);
123 obj = i915_gem_object_get_rcu(obj);
124 rcu_read_unlock();
125
126 return obj;
127 }
128
129 __deprecated
130 struct drm_gem_object *
131 drm_gem_object_lookup(struct drm_file *file, u32 handle);
132
133 __attribute__((nonnull))
134 static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object * obj)135 i915_gem_object_get(struct drm_i915_gem_object *obj)
136 {
137 drm_gem_object_get(&obj->base);
138 return obj;
139 }
140
141 __attribute__((nonnull))
142 static inline void
i915_gem_object_put(struct drm_i915_gem_object * obj)143 i915_gem_object_put(struct drm_i915_gem_object *obj)
144 {
145 __drm_gem_object_put(&obj->base);
146 }
147
148 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
149
150 /*
151 * If more than one potential simultaneous locker, assert held.
152 */
assert_object_held_shared(const struct drm_i915_gem_object * obj)153 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
154 {
155 /*
156 * Note mm list lookup is protected by
157 * kref_get_unless_zero().
158 */
159 if (IS_ENABLED(CONFIG_LOCKDEP) &&
160 kref_read(&obj->base.refcount) > 0)
161 assert_object_held(obj);
162 }
163
__i915_gem_object_lock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,bool intr)164 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
165 struct i915_gem_ww_ctx *ww,
166 bool intr)
167 {
168 int ret;
169
170 if (intr)
171 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
172 else
173 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
174
175 if (!ret && ww) {
176 i915_gem_object_get(obj);
177 list_add_tail(&obj->obj_link, &ww->obj_list);
178 }
179 if (ret == -EALREADY)
180 ret = 0;
181
182 if (ret == -EDEADLK) {
183 i915_gem_object_get(obj);
184 ww->contended = obj;
185 }
186
187 return ret;
188 }
189
i915_gem_object_lock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)190 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
191 struct i915_gem_ww_ctx *ww)
192 {
193 return __i915_gem_object_lock(obj, ww, ww && ww->intr);
194 }
195
i915_gem_object_lock_interruptible(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)196 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
197 struct i915_gem_ww_ctx *ww)
198 {
199 WARN_ON(ww && !ww->intr);
200 return __i915_gem_object_lock(obj, ww, true);
201 }
202
i915_gem_object_trylock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)203 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
204 struct i915_gem_ww_ctx *ww)
205 {
206 if (!ww)
207 return dma_resv_trylock(obj->base.resv);
208 else
209 return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
210 }
211
i915_gem_object_unlock(struct drm_i915_gem_object * obj)212 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
213 {
214 if (obj->ops->adjust_lru)
215 obj->ops->adjust_lru(obj);
216
217 dma_resv_unlock(obj->base.resv);
218 }
219
220 static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object * obj)221 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
222 {
223 obj->flags |= I915_BO_READONLY;
224 }
225
226 static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object * obj)227 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
228 {
229 return obj->flags & I915_BO_READONLY;
230 }
231
232 static inline bool
i915_gem_object_is_contiguous(const struct drm_i915_gem_object * obj)233 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
234 {
235 return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
236 }
237
238 static inline bool
i915_gem_object_is_volatile(const struct drm_i915_gem_object * obj)239 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
240 {
241 return obj->flags & I915_BO_ALLOC_VOLATILE;
242 }
243
244 static inline void
i915_gem_object_set_volatile(struct drm_i915_gem_object * obj)245 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
246 {
247 obj->flags |= I915_BO_ALLOC_VOLATILE;
248 }
249
250 static inline bool
i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object * obj)251 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
252 {
253 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
254 }
255
256 static inline void
i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object * obj)257 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
258 {
259 set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
260 }
261
262 static inline void
i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object * obj)263 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
264 {
265 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
266 }
267
268 static inline bool
i915_gem_object_is_protected(const struct drm_i915_gem_object * obj)269 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
270 {
271 return obj->flags & I915_BO_PROTECTED;
272 }
273
274 static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object * obj,unsigned long flags)275 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
276 unsigned long flags)
277 {
278 return obj->ops->flags & flags;
279 }
280
281 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
282
283 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
284
285 static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object * obj)286 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
287 {
288 /* TODO: make DPT shrinkable when it has no bound vmas */
289 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
290 !obj->is_dpt;
291 }
292
293 static inline bool
i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object * obj)294 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
295 {
296 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
297 }
298
299 static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object * obj)300 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
301 {
302 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
303 }
304
305 static inline bool
i915_gem_object_never_mmap(const struct drm_i915_gem_object * obj)306 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
307 {
308 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
309 }
310
311 static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object * obj)312 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
313 {
314 return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
315 }
316
317 static inline unsigned int
i915_gem_object_get_tiling(const struct drm_i915_gem_object * obj)318 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
319 {
320 return obj->tiling_and_stride & TILING_MASK;
321 }
322
323 static inline bool
i915_gem_object_is_tiled(const struct drm_i915_gem_object * obj)324 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
325 {
326 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
327 }
328
329 static inline unsigned int
i915_gem_object_get_stride(const struct drm_i915_gem_object * obj)330 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
331 {
332 return obj->tiling_and_stride & STRIDE_MASK;
333 }
334
335 static inline unsigned int
i915_gem_tile_height(unsigned int tiling)336 i915_gem_tile_height(unsigned int tiling)
337 {
338 GEM_BUG_ON(!tiling);
339 return tiling == I915_TILING_Y ? 32 : 8;
340 }
341
342 static inline unsigned int
i915_gem_object_get_tile_height(const struct drm_i915_gem_object * obj)343 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
344 {
345 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
346 }
347
348 static inline unsigned int
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object * obj)349 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
350 {
351 return (i915_gem_object_get_stride(obj) *
352 i915_gem_object_get_tile_height(obj));
353 }
354
355 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
356 unsigned int tiling, unsigned int stride);
357
358 /**
359 * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
360 * pointer and the target page position using pgoff_t n input argument and
361 * i915_gem_object_page_iter
362 * @obj: i915 GEM buffer object
363 * @iter: i915 GEM buffer object page iterator
364 * @n: page offset
365 * @offset: searched physical offset,
366 * it will be used for returning physical page offset value
367 *
368 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
369 * Takes and releases the RCU lock to search the radix_tree of
370 * i915_gem_object_page_iter.
371 *
372 * Returns:
373 * The target scatterlist pointer and the target page position.
374 *
375 * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
376 */
377 struct scatterlist *
378 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
379 struct i915_gem_object_page_iter *iter,
380 pgoff_t n,
381 unsigned int *offset);
382
383 /**
384 * i915_gem_object_page_iter_get_sg - wrapper macro for
385 * __i915_gem_object_page_iter_get_sg()
386 * @obj: i915 GEM buffer object
387 * @it: i915 GEM buffer object page iterator
388 * @n: page offset
389 * @offset: searched physical offset,
390 * it will be used for returning physical page offset value
391 *
392 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
393 * Takes and releases the RCU lock to search the radix_tree of
394 * i915_gem_object_page_iter.
395 *
396 * Returns:
397 * The target scatterlist pointer and the target page position.
398 *
399 * In order to avoid the truncation of the input parameter, it checks the page
400 * offset n's type from the input parameter before calling
401 * __i915_gem_object_page_iter_get_sg().
402 */
403 #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({ \
404 static_assert(castable_to_type(n, pgoff_t)); \
405 __i915_gem_object_page_iter_get_sg(obj, it, n, offset); \
406 })
407
408 /**
409 * __i915_gem_object_get_sg - helper to find the target scatterlist
410 * pointer and the target page position using pgoff_t n input argument and
411 * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
412 * @obj: i915 GEM buffer object
413 * @n: page offset
414 * @offset: searched physical offset,
415 * it will be used for returning physical page offset value
416 *
417 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
418 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
419 *
420 * Returns:
421 * The target scatterlist pointer and the target page position.
422 *
423 * Recommended to use wrapper macro: i915_gem_object_get_sg()
424 * See also __i915_gem_object_page_iter_get_sg()
425 */
426 static inline struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * offset)427 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
428 unsigned int *offset)
429 {
430 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
431 }
432
433 /**
434 * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
435 * @obj: i915 GEM buffer object
436 * @n: page offset
437 * @offset: searched physical offset,
438 * it will be used for returning physical page offset value
439 *
440 * Returns:
441 * The target scatterlist pointer and the target page position.
442 *
443 * In order to avoid the truncation of the input parameter, it checks the page
444 * offset n's type from the input parameter before calling
445 * __i915_gem_object_get_sg().
446 * See also __i915_gem_object_page_iter_get_sg()
447 */
448 #define i915_gem_object_get_sg(obj, n, offset) ({ \
449 static_assert(castable_to_type(n, pgoff_t)); \
450 __i915_gem_object_get_sg(obj, n, offset); \
451 })
452
453 /**
454 * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
455 * pointer and the target page position using pgoff_t n input argument and
456 * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
457 * @obj: i915 GEM buffer object
458 * @n: page offset
459 * @offset: searched physical offset,
460 * it will be used for returning physical page offset value
461 *
462 * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
463 * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
464 *
465 * Returns:
466 * The target scatterlist pointer and the target page position.
467 *
468 * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
469 * See also __i915_gem_object_page_iter_get_sg()
470 */
471 static inline struct scatterlist *
__i915_gem_object_get_sg_dma(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * offset)472 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
473 unsigned int *offset)
474 {
475 return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
476 }
477
478 /**
479 * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
480 * @obj: i915 GEM buffer object
481 * @n: page offset
482 * @offset: searched physical offset,
483 * it will be used for returning physical page offset value
484 *
485 * Returns:
486 * The target scatterlist pointer and the target page position.
487 *
488 * In order to avoid the truncation of the input parameter, it checks the page
489 * offset n's type from the input parameter before calling
490 * __i915_gem_object_get_sg_dma().
491 * See also __i915_gem_object_page_iter_get_sg()
492 */
493 #define i915_gem_object_get_sg_dma(obj, n, offset) ({ \
494 static_assert(castable_to_type(n, pgoff_t)); \
495 __i915_gem_object_get_sg_dma(obj, n, offset); \
496 })
497
498 /**
499 * __i915_gem_object_get_page - helper to find the target page with a page offset
500 * @obj: i915 GEM buffer object
501 * @n: page offset
502 *
503 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
504 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
505 * internally.
506 *
507 * Returns:
508 * The target page pointer.
509 *
510 * Recommended to use wrapper macro: i915_gem_object_get_page()
511 * See also __i915_gem_object_page_iter_get_sg()
512 */
513 struct page *
514 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
515
516 /**
517 * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
518 * @obj: i915 GEM buffer object
519 * @n: page offset
520 *
521 * Returns:
522 * The target page pointer.
523 *
524 * In order to avoid the truncation of the input parameter, it checks the page
525 * offset n's type from the input parameter before calling
526 * __i915_gem_object_get_page().
527 * See also __i915_gem_object_page_iter_get_sg()
528 */
529 #define i915_gem_object_get_page(obj, n) ({ \
530 static_assert(castable_to_type(n, pgoff_t)); \
531 __i915_gem_object_get_page(obj, n); \
532 })
533
534 /**
535 * __i915_gem_object_get_dirty_page - helper to find the target page with a page
536 * offset
537 * @obj: i915 GEM buffer object
538 * @n: page offset
539 *
540 * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
541 *
542 * Returns:
543 * The target page pointer.
544 *
545 * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
546 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
547 */
548 struct page *
549 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
550
551 /**
552 * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
553 * @obj: i915 GEM buffer object
554 * @n: page offset
555 *
556 * Returns:
557 * The target page pointer.
558 *
559 * In order to avoid the truncation of the input parameter, it checks the page
560 * offset n's type from the input parameter before calling
561 * __i915_gem_object_get_dirty_page().
562 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
563 */
564 #define i915_gem_object_get_dirty_page(obj, n) ({ \
565 static_assert(castable_to_type(n, pgoff_t)); \
566 __i915_gem_object_get_dirty_page(obj, n); \
567 })
568
569 /**
570 * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
571 * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
572 * @obj: i915 GEM buffer object
573 * @n: page offset
574 * @len: DMA mapped scatterlist's DMA bus addresses length to return
575 *
576 * Returns:
577 * Bus addresses of targeted DMA mapped scatterlist
578 *
579 * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
580 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
581 */
582 dma_addr_t
583 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
584 unsigned int *len);
585
586 /**
587 * i915_gem_object_get_dma_address_len - wrapper macro for
588 * __i915_gem_object_get_dma_address_len
589 * @obj: i915 GEM buffer object
590 * @n: page offset
591 * @len: DMA mapped scatterlist's DMA bus addresses length to return
592 *
593 * Returns:
594 * Bus addresses of targeted DMA mapped scatterlist
595 *
596 * In order to avoid the truncation of the input parameter, it checks the page
597 * offset n's type from the input parameter before calling
598 * __i915_gem_object_get_dma_address_len().
599 * See also __i915_gem_object_page_iter_get_sg() and
600 * __i915_gem_object_get_dma_address_len()
601 */
602 #define i915_gem_object_get_dma_address_len(obj, n, len) ({ \
603 static_assert(castable_to_type(n, pgoff_t)); \
604 __i915_gem_object_get_dma_address_len(obj, n, len); \
605 })
606
607 /**
608 * __i915_gem_object_get_dma_address - helper to get bus addresses of
609 * targeted DMA mapped scatterlist from i915 GEM buffer object
610 * @obj: i915 GEM buffer object
611 * @n: page offset
612 *
613 * Returns:
614 * Bus addresses of targeted DMA mapped scatterlis
615 *
616 * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
617 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
618 */
619 dma_addr_t
620 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
621
622 /**
623 * i915_gem_object_get_dma_address - wrapper macro for
624 * __i915_gem_object_get_dma_address
625 * @obj: i915 GEM buffer object
626 * @n: page offset
627 *
628 * Returns:
629 * Bus addresses of targeted DMA mapped scatterlist
630 *
631 * In order to avoid the truncation of the input parameter, it checks the page
632 * offset n's type from the input parameter before calling
633 * __i915_gem_object_get_dma_address().
634 * See also __i915_gem_object_page_iter_get_sg() and
635 * __i915_gem_object_get_dma_address()
636 */
637 #define i915_gem_object_get_dma_address(obj, n) ({ \
638 static_assert(castable_to_type(n, pgoff_t)); \
639 __i915_gem_object_get_dma_address(obj, n); \
640 })
641
642 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
643 struct sg_table *pages);
644
645 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
646 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
647
648 static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object * obj)649 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
650 {
651 assert_object_held(obj);
652
653 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
654 return 0;
655
656 return __i915_gem_object_get_pages(obj);
657 }
658
659 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
660
661 static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object * obj)662 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
663 {
664 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
665 }
666
667 static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object * obj)668 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
669 {
670 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
671
672 atomic_inc(&obj->mm.pages_pin_count);
673 }
674
675 static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object * obj)676 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
677 {
678 return atomic_read(&obj->mm.pages_pin_count);
679 }
680
681 static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object * obj)682 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
683 {
684 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
685 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
686
687 atomic_dec(&obj->mm.pages_pin_count);
688 }
689
690 static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object * obj)691 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
692 {
693 __i915_gem_object_unpin_pages(obj);
694 }
695
696 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
697 int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
698
699 /**
700 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
701 * @obj: the object to map into kernel address space
702 * @type: the type of mapping, used to select pgprot_t
703 *
704 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
705 * pages and then returns a contiguous mapping of the backing storage into
706 * the kernel address space. Based on the @type of mapping, the PTE will be
707 * set to either WriteBack or WriteCombine (via pgprot_t).
708 *
709 * The caller is responsible for calling i915_gem_object_unpin_map() when the
710 * mapping is no longer required.
711 *
712 * Returns the pointer through which to access the mapped object, or an
713 * ERR_PTR() on error.
714 */
715 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
716 enum i915_map_type type);
717
718 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
719 enum i915_map_type type);
720
721 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
722 unsigned long offset,
723 unsigned long size);
i915_gem_object_flush_map(struct drm_i915_gem_object * obj)724 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
725 {
726 __i915_gem_object_flush_map(obj, 0, obj->base.size);
727 }
728
729 /**
730 * i915_gem_object_unpin_map - releases an earlier mapping
731 * @obj: the object to unmap
732 *
733 * After pinning the object and mapping its pages, once you are finished
734 * with your access, call i915_gem_object_unpin_map() to release the pin
735 * upon the mapping. Once the pin count reaches zero, that mapping may be
736 * removed.
737 */
i915_gem_object_unpin_map(struct drm_i915_gem_object * obj)738 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
739 {
740 i915_gem_object_unpin_pages(obj);
741 }
742
743 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
744
745 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
746 unsigned int *needs_clflush);
747 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
748 unsigned int *needs_clflush);
749 #define CLFLUSH_BEFORE BIT(0)
750 #define CLFLUSH_AFTER BIT(1)
751 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
752
753 static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object * obj)754 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
755 {
756 i915_gem_object_unpin_pages(obj);
757 }
758
759 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
760 struct dma_fence **fence);
761 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
762 bool intr);
763 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
764
765 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
766 unsigned int cache_level);
767 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
768 unsigned int pat_index);
769 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
770 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
771 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
772 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
773
774 int __must_check
775 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
776 int __must_check
777 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
778 int __must_check
779 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
780 struct i915_vma * __must_check
781 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
782 struct i915_gem_ww_ctx *ww,
783 u32 alignment,
784 const struct i915_gtt_view *view,
785 unsigned int flags);
786
787 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
788 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
789 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
790 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
791 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
792
__start_cpu_write(struct drm_i915_gem_object * obj)793 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
794 {
795 obj->read_domains = I915_GEM_DOMAIN_CPU;
796 obj->write_domain = I915_GEM_DOMAIN_CPU;
797 if (i915_gem_cpu_write_needs_clflush(obj))
798 obj->cache_dirty = true;
799 }
800
801 void i915_gem_fence_wait_priority(struct dma_fence *fence,
802 const struct i915_sched_attr *attr);
803
804 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
805 unsigned int flags,
806 long timeout);
807 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
808 unsigned int flags,
809 const struct i915_sched_attr *attr);
810
811 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
812 enum fb_op_origin origin);
813 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
814 enum fb_op_origin origin);
815
816 static inline void
i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)817 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
818 enum fb_op_origin origin)
819 {
820 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
821 __i915_gem_object_flush_frontbuffer(obj, origin);
822 }
823
824 static inline void
i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)825 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
826 enum fb_op_origin origin)
827 {
828 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
829 __i915_gem_object_invalidate_frontbuffer(obj, origin);
830 }
831
832 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
833
834 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
835
836 void __i915_gem_free_object_rcu(struct rcu_head *head);
837
838 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
839
840 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
841
842 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
843
844 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
845
846 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
847 struct i915_gem_ww_ctx *ww,
848 enum intel_region_id id);
849 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
850 struct i915_gem_ww_ctx *ww,
851 enum intel_region_id id,
852 unsigned int flags);
853
854 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
855 enum intel_region_id id);
856
857 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
858 unsigned int flags);
859
860 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
861 enum intel_memory_type type);
862
863 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
864
865 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
866 size_t size, struct intel_memory_region *mr,
867 struct address_space *mapping,
868 unsigned int max_segment);
869 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
870 bool dirty, bool backup);
871 void __shmem_writeback(size_t size, struct address_space *mapping);
872
873 #ifdef CONFIG_MMU_NOTIFIER
874 static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object * obj)875 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
876 {
877 return obj->userptr.notifier.mm;
878 }
879
880 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
881 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
882 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
883 #else
i915_gem_object_is_userptr(struct drm_i915_gem_object * obj)884 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
885
i915_gem_object_userptr_submit_init(struct drm_i915_gem_object * obj)886 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
i915_gem_object_userptr_submit_done(struct drm_i915_gem_object * obj)887 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
i915_gem_object_userptr_validate(struct drm_i915_gem_object * obj)888 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
889
890 #endif
891
892 /**
893 * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
894 * @obj: The object whose frontbuffer to get.
895 *
896 * Get pointer to object's frontbuffer if such exists. Please note that RCU
897 * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
898 *
899 * Return: pointer to object's frontbuffer is such exists or NULL
900 */
901 static inline struct intel_frontbuffer *
i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object * obj)902 i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
903 {
904 struct intel_frontbuffer *front;
905
906 if (likely(!rcu_access_pointer(obj->frontbuffer)))
907 return NULL;
908
909 rcu_read_lock();
910 do {
911 front = rcu_dereference(obj->frontbuffer);
912 if (!front)
913 break;
914
915 if (unlikely(!kref_get_unless_zero(&front->ref)))
916 continue;
917
918 if (likely(front == rcu_access_pointer(obj->frontbuffer)))
919 break;
920
921 intel_frontbuffer_put(front);
922 } while (1);
923 rcu_read_unlock();
924
925 return front;
926 }
927
928 /**
929 * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
930 * @obj: The object whose frontbuffer to set.
931 * @front: The frontbuffer to set
932 *
933 * Set object's frontbuffer pointer. If frontbuffer is already set for the
934 * object keep it and return it's pointer to the caller. Please note that RCU
935 * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
936 * function is protected by i915->display.fb_tracking.lock
937 *
938 * Return: pointer to frontbuffer which was set.
939 */
940 static inline struct intel_frontbuffer *
i915_gem_object_set_frontbuffer(struct drm_i915_gem_object * obj,struct intel_frontbuffer * front)941 i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
942 struct intel_frontbuffer *front)
943 {
944 struct intel_frontbuffer *cur = front;
945
946 if (!front) {
947 RCU_INIT_POINTER(obj->frontbuffer, NULL);
948 } else if (rcu_access_pointer(obj->frontbuffer)) {
949 cur = rcu_dereference_protected(obj->frontbuffer, true);
950 kref_get(&cur->ref);
951 } else {
952 drm_gem_object_get(intel_bo_to_drm_bo(obj));
953 rcu_assign_pointer(obj->frontbuffer, front);
954 }
955
956 return cur;
957 }
958
959 #endif
960