1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13
14 #include "display/intel_frontbuffer.h"
15 #include "i915_gem_object_types.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_vma_types.h"
18
19 void i915_gem_init__objects(struct drm_i915_private *i915);
20
21 struct drm_i915_gem_object *i915_gem_object_alloc(void);
22 void i915_gem_object_free(struct drm_i915_gem_object *obj);
23
24 void i915_gem_object_init(struct drm_i915_gem_object *obj,
25 const struct drm_i915_gem_object_ops *ops,
26 struct lock_class_key *key);
27 struct drm_i915_gem_object *
28 i915_gem_object_create_shmem(struct drm_i915_private *i915,
29 resource_size_t size);
30 struct drm_i915_gem_object *
31 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
32 const void *data, resource_size_t size);
33
34 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
35 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
36 struct sg_table *pages,
37 bool needs_clflush);
38
39 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
40
41 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
42 void i915_gem_free_object(struct drm_gem_object *obj);
43
44 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
45
46 struct sg_table *
47 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
48 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
49
50 /**
51 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
52 * @filp: DRM file private date
53 * @handle: userspace handle
54 *
55 * Returns:
56 *
57 * A pointer to the object named by the handle if such exists on @filp, NULL
58 * otherwise. This object is only valid whilst under the RCU read lock, and
59 * note carefully the object may be in the process of being destroyed.
60 */
61 static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file * file,u32 handle)62 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
63 {
64 #ifdef CONFIG_LOCKDEP
65 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
66 #endif
67 return idr_find(&file->object_idr, handle);
68 }
69
70 static inline struct drm_i915_gem_object *
i915_gem_object_get_rcu(struct drm_i915_gem_object * obj)71 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
72 {
73 if (obj && !kref_get_unless_zero(&obj->base.refcount))
74 obj = NULL;
75
76 return obj;
77 }
78
79 static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file * file,u32 handle)80 i915_gem_object_lookup(struct drm_file *file, u32 handle)
81 {
82 struct drm_i915_gem_object *obj;
83
84 rcu_read_lock();
85 obj = i915_gem_object_lookup_rcu(file, handle);
86 obj = i915_gem_object_get_rcu(obj);
87 rcu_read_unlock();
88
89 return obj;
90 }
91
92 __deprecated
93 struct drm_gem_object *
94 drm_gem_object_lookup(struct drm_file *file, u32 handle);
95
96 __attribute__((nonnull))
97 static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object * obj)98 i915_gem_object_get(struct drm_i915_gem_object *obj)
99 {
100 drm_gem_object_get(&obj->base);
101 return obj;
102 }
103
104 __attribute__((nonnull))
105 static inline void
i915_gem_object_put(struct drm_i915_gem_object * obj)106 i915_gem_object_put(struct drm_i915_gem_object *obj)
107 {
108 __drm_gem_object_put(&obj->base);
109 }
110
111 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
112
__i915_gem_object_lock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,bool intr)113 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
114 struct i915_gem_ww_ctx *ww,
115 bool intr)
116 {
117 int ret;
118
119 if (intr)
120 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
121 else
122 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
123
124 if (!ret && ww)
125 list_add_tail(&obj->obj_link, &ww->obj_list);
126 if (ret == -EALREADY)
127 ret = 0;
128
129 if (ret == -EDEADLK)
130 ww->contended = obj;
131
132 return ret;
133 }
134
i915_gem_object_lock(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)135 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
136 struct i915_gem_ww_ctx *ww)
137 {
138 return __i915_gem_object_lock(obj, ww, ww && ww->intr);
139 }
140
i915_gem_object_lock_interruptible(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww)141 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
142 struct i915_gem_ww_ctx *ww)
143 {
144 WARN_ON(ww && !ww->intr);
145 return __i915_gem_object_lock(obj, ww, true);
146 }
147
i915_gem_object_trylock(struct drm_i915_gem_object * obj)148 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
149 {
150 return dma_resv_trylock(obj->base.resv);
151 }
152
i915_gem_object_unlock(struct drm_i915_gem_object * obj)153 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
154 {
155 dma_resv_unlock(obj->base.resv);
156 }
157
158 struct dma_fence *
159 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
160 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
161 struct dma_fence *fence);
162
163 static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object * obj)164 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
165 {
166 obj->flags |= I915_BO_READONLY;
167 }
168
169 static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object * obj)170 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
171 {
172 return obj->flags & I915_BO_READONLY;
173 }
174
175 static inline bool
i915_gem_object_is_contiguous(const struct drm_i915_gem_object * obj)176 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
177 {
178 return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
179 }
180
181 static inline bool
i915_gem_object_is_volatile(const struct drm_i915_gem_object * obj)182 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
183 {
184 return obj->flags & I915_BO_ALLOC_VOLATILE;
185 }
186
187 static inline void
i915_gem_object_set_volatile(struct drm_i915_gem_object * obj)188 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
189 {
190 obj->flags |= I915_BO_ALLOC_VOLATILE;
191 }
192
193 static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object * obj,unsigned long flags)194 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
195 unsigned long flags)
196 {
197 return obj->ops->flags & flags;
198 }
199
200 static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object * obj)201 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
202 {
203 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
204 }
205
206 static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object * obj)207 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
208 {
209 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
210 }
211
212 static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object * obj)213 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
214 {
215 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
216 }
217
218 static inline bool
i915_gem_object_never_mmap(const struct drm_i915_gem_object * obj)219 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
220 {
221 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
222 }
223
224 static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object * obj)225 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
226 {
227 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
228 }
229
230 static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object * obj)231 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
232 {
233 return READ_ONCE(obj->frontbuffer);
234 }
235
236 static inline unsigned int
i915_gem_object_get_tiling(const struct drm_i915_gem_object * obj)237 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
238 {
239 return obj->tiling_and_stride & TILING_MASK;
240 }
241
242 static inline bool
i915_gem_object_is_tiled(const struct drm_i915_gem_object * obj)243 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
244 {
245 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
246 }
247
248 static inline unsigned int
i915_gem_object_get_stride(const struct drm_i915_gem_object * obj)249 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
250 {
251 return obj->tiling_and_stride & STRIDE_MASK;
252 }
253
254 static inline unsigned int
i915_gem_tile_height(unsigned int tiling)255 i915_gem_tile_height(unsigned int tiling)
256 {
257 GEM_BUG_ON(!tiling);
258 return tiling == I915_TILING_Y ? 32 : 8;
259 }
260
261 static inline unsigned int
i915_gem_object_get_tile_height(const struct drm_i915_gem_object * obj)262 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
263 {
264 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
265 }
266
267 static inline unsigned int
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object * obj)268 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
269 {
270 return (i915_gem_object_get_stride(obj) *
271 i915_gem_object_get_tile_height(obj));
272 }
273
274 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
275 unsigned int tiling, unsigned int stride);
276
277 struct scatterlist *
278 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
279 unsigned int n, unsigned int *offset);
280
281 struct page *
282 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
283 unsigned int n);
284
285 struct page *
286 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
287 unsigned int n);
288
289 dma_addr_t
290 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
291 unsigned long n,
292 unsigned int *len);
293
294 dma_addr_t
295 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
296 unsigned long n);
297
298 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
299 struct sg_table *pages,
300 unsigned int sg_page_sizes);
301
302 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
303 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
304
305 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
306 I915_MM_NORMAL = 0,
307 /*
308 * Only used by struct_mutex, when called "recursively" from
309 * direct-reclaim-esque. Safe because there is only every one
310 * struct_mutex in the entire system.
311 */
312 I915_MM_SHRINKER = 1,
313 /*
314 * Used for obj->mm.lock when allocating pages. Safe because the object
315 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
316 * it. As soon as the object has pages, obj->mm.lock nests within
317 * fs_reclaim.
318 */
319 I915_MM_GET_PAGES = 1,
320 };
321
322 static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object * obj)323 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
324 {
325 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
326
327 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
328 return 0;
329
330 return __i915_gem_object_get_pages(obj);
331 }
332
333 static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object * obj)334 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
335 {
336 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
337 }
338
339 static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object * obj)340 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
341 {
342 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
343
344 atomic_inc(&obj->mm.pages_pin_count);
345 }
346
347 static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object * obj)348 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
349 {
350 return atomic_read(&obj->mm.pages_pin_count);
351 }
352
353 static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object * obj)354 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
355 {
356 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
357 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
358
359 atomic_dec(&obj->mm.pages_pin_count);
360 }
361
362 static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object * obj)363 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
364 {
365 __i915_gem_object_unpin_pages(obj);
366 }
367
368 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
369 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
370 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
371
372 enum i915_map_type {
373 I915_MAP_WB = 0,
374 I915_MAP_WC,
375 #define I915_MAP_OVERRIDE BIT(31)
376 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
377 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
378 };
379
380 /**
381 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
382 * @obj: the object to map into kernel address space
383 * @type: the type of mapping, used to select pgprot_t
384 *
385 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
386 * pages and then returns a contiguous mapping of the backing storage into
387 * the kernel address space. Based on the @type of mapping, the PTE will be
388 * set to either WriteBack or WriteCombine (via pgprot_t).
389 *
390 * The caller is responsible for calling i915_gem_object_unpin_map() when the
391 * mapping is no longer required.
392 *
393 * Returns the pointer through which to access the mapped object, or an
394 * ERR_PTR() on error.
395 */
396 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
397 enum i915_map_type type);
398
399 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
400 unsigned long offset,
401 unsigned long size);
i915_gem_object_flush_map(struct drm_i915_gem_object * obj)402 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
403 {
404 __i915_gem_object_flush_map(obj, 0, obj->base.size);
405 }
406
407 /**
408 * i915_gem_object_unpin_map - releases an earlier mapping
409 * @obj: the object to unmap
410 *
411 * After pinning the object and mapping its pages, once you are finished
412 * with your access, call i915_gem_object_unpin_map() to release the pin
413 * upon the mapping. Once the pin count reaches zero, that mapping may be
414 * removed.
415 */
i915_gem_object_unpin_map(struct drm_i915_gem_object * obj)416 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
417 {
418 i915_gem_object_unpin_pages(obj);
419 }
420
421 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
422
423 void
424 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
425 unsigned int flush_domains);
426
427 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
428 unsigned int *needs_clflush);
429 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
430 unsigned int *needs_clflush);
431 #define CLFLUSH_BEFORE BIT(0)
432 #define CLFLUSH_AFTER BIT(1)
433 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
434
435 static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object * obj)436 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
437 {
438 i915_gem_object_unpin_pages(obj);
439 }
440
441 static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object * obj)442 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
443 {
444 struct intel_engine_cs *engine = NULL;
445 struct dma_fence *fence;
446
447 rcu_read_lock();
448 fence = dma_resv_get_excl_rcu(obj->base.resv);
449 rcu_read_unlock();
450
451 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
452 engine = to_request(fence)->engine;
453 dma_fence_put(fence);
454
455 return engine;
456 }
457
458 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
459 unsigned int cache_level);
460 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
461 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
462
463 int __must_check
464 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
465 int __must_check
466 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
467 int __must_check
468 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
469 struct i915_vma * __must_check
470 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
471 u32 alignment,
472 const struct i915_ggtt_view *view,
473 unsigned int flags);
474
475 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
476 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
477 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
478
cpu_write_needs_clflush(struct drm_i915_gem_object * obj)479 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
480 {
481 if (obj->cache_dirty)
482 return false;
483
484 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
485 return true;
486
487 /* Currently in use by HW (display engine)? Keep flushed. */
488 return i915_gem_object_is_framebuffer(obj);
489 }
490
__start_cpu_write(struct drm_i915_gem_object * obj)491 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
492 {
493 obj->read_domains = I915_GEM_DOMAIN_CPU;
494 obj->write_domain = I915_GEM_DOMAIN_CPU;
495 if (cpu_write_needs_clflush(obj))
496 obj->cache_dirty = true;
497 }
498
499 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
500 unsigned int flags,
501 long timeout);
502 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
503 unsigned int flags,
504 const struct i915_sched_attr *attr);
505
506 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
507 enum fb_op_origin origin);
508 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
509 enum fb_op_origin origin);
510
511 static inline void
i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)512 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
513 enum fb_op_origin origin)
514 {
515 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
516 __i915_gem_object_flush_frontbuffer(obj, origin);
517 }
518
519 static inline void
i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)520 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
521 enum fb_op_origin origin)
522 {
523 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
524 __i915_gem_object_invalidate_frontbuffer(obj, origin);
525 }
526
527 #endif
528