1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9
10 #include <linux/mmu_notifier.h>
11
12 #include <drm/drm_gem.h>
13 #include <drm/ttm/ttm_bo_api.h>
14 #include <uapi/drm/i915_drm.h>
15
16 #include "i915_active.h"
17 #include "i915_selftest.h"
18 #include "i915_vma_resource.h"
19
20 struct drm_i915_gem_object;
21 struct intel_fronbuffer;
22 struct intel_memory_region;
23
24 /*
25 * struct i915_lut_handle tracks the fast lookups from handle to vma used
26 * for execbuf. Although we use a radixtree for that mapping, in order to
27 * remove them as the object or context is closed, we need a secondary list
28 * and a translation entry (i915_lut_handle).
29 */
30 struct i915_lut_handle {
31 struct list_head obj_link;
32 struct i915_gem_context *ctx;
33 u32 handle;
34 };
35
36 struct drm_i915_gem_object_ops {
37 unsigned int flags;
38 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
39 /* Skip the shrinker management in set_pages/unset_pages */
40 #define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2)
41 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
42 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
43
44 /* Interface between the GEM object and its backing storage.
45 * get_pages() is called once prior to the use of the associated set
46 * of pages before to binding them into the GTT, and put_pages() is
47 * called after we no longer need them. As we expect there to be
48 * associated cost with migrating pages between the backing storage
49 * and making them available for the GPU (e.g. clflush), we may hold
50 * onto the pages after they are no longer referenced by the GPU
51 * in case they may be used again shortly (for example migrating the
52 * pages to a different memory domain within the GTT). put_pages()
53 * will therefore most likely be called when the object itself is
54 * being released or under memory pressure (where we attempt to
55 * reap pages for the shrinker).
56 */
57 int (*get_pages)(struct drm_i915_gem_object *obj);
58 void (*put_pages)(struct drm_i915_gem_object *obj,
59 struct sg_table *pages);
60 int (*truncate)(struct drm_i915_gem_object *obj);
61 /**
62 * shrink - Perform further backend specific actions to facilate
63 * shrinking.
64 * @obj: The gem object
65 * @flags: Extra flags to control shrinking behaviour in the backend
66 *
67 * Possible values for @flags:
68 *
69 * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
70 * backing pages, if supported.
71 *
72 * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
73 * idle. Active objects can be considered later. The TTM backend for
74 * example might have aync migrations going on, which don't use any
75 * i915_vma to track the active GTT binding, and hence having an unbound
76 * object might not be enough.
77 */
78 #define I915_GEM_OBJECT_SHRINK_WRITEBACK BIT(0)
79 #define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
80 int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
81
82 int (*pread)(struct drm_i915_gem_object *obj,
83 const struct drm_i915_gem_pread *arg);
84 int (*pwrite)(struct drm_i915_gem_object *obj,
85 const struct drm_i915_gem_pwrite *arg);
86 u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
87 void (*unmap_virtual)(struct drm_i915_gem_object *obj);
88
89 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
90
91 /**
92 * adjust_lru - notify that the madvise value was updated
93 * @obj: The gem object
94 *
95 * The madvise value may have been updated, or object was recently
96 * referenced so act accordingly (Perhaps changing an LRU list etc).
97 */
98 void (*adjust_lru)(struct drm_i915_gem_object *obj);
99
100 /**
101 * delayed_free - Override the default delayed free implementation
102 */
103 void (*delayed_free)(struct drm_i915_gem_object *obj);
104
105 /**
106 * migrate - Migrate object to a different region either for
107 * pinning or for as long as the object lock is held.
108 */
109 int (*migrate)(struct drm_i915_gem_object *obj,
110 struct intel_memory_region *mr,
111 unsigned int flags);
112
113 void (*release)(struct drm_i915_gem_object *obj);
114
115 const struct vm_operations_struct *mmap_ops;
116 const char *name; /* friendly name for debug, e.g. lockdep classes */
117 };
118
119 /**
120 * enum i915_cache_level - The supported GTT caching values for system memory
121 * pages.
122 *
123 * These translate to some special GTT PTE bits when binding pages into some
124 * address space. It also determines whether an object, or rather its pages are
125 * coherent with the GPU, when also reading or writing through the CPU cache
126 * with those pages.
127 *
128 * Userspace can also control this through struct drm_i915_gem_caching.
129 */
130 enum i915_cache_level {
131 /**
132 * @I915_CACHE_NONE:
133 *
134 * GPU access is not coherent with the CPU cache. If the cache is dirty
135 * and we need the underlying pages to be coherent with some later GPU
136 * access then we need to manually flush the pages.
137 *
138 * On shared LLC platforms reads and writes through the CPU cache are
139 * still coherent even with this setting. See also
140 * &drm_i915_gem_object.cache_coherent for more details. Due to this we
141 * should only ever use uncached for scanout surfaces, otherwise we end
142 * up over-flushing in some places.
143 *
144 * This is the default on non-LLC platforms.
145 */
146 I915_CACHE_NONE = 0,
147 /**
148 * @I915_CACHE_LLC:
149 *
150 * GPU access is coherent with the CPU cache. If the cache is dirty,
151 * then the GPU will ensure that access remains coherent, when both
152 * reading and writing through the CPU cache. GPU writes can dirty the
153 * CPU cache.
154 *
155 * Not used for scanout surfaces.
156 *
157 * Applies to both platforms with shared LLC(HAS_LLC), and snooping
158 * based platforms(HAS_SNOOP).
159 *
160 * This is the default on shared LLC platforms. The only exception is
161 * scanout objects, where the display engine is not coherent with the
162 * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
163 * automatically applied by the kernel in pin_for_display, if userspace
164 * has not done so already.
165 */
166 I915_CACHE_LLC,
167 /**
168 * @I915_CACHE_L3_LLC:
169 *
170 * Explicitly enable the Gfx L3 cache, with coherent LLC.
171 *
172 * The Gfx L3 sits between the domain specific caches, e.g
173 * sampler/render caches, and the larger LLC. LLC is coherent with the
174 * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
175 * when the workload completes.
176 *
177 * Not used for scanout surfaces.
178 *
179 * Only exposed on some gen7 + GGTT. More recent hardware has dropped
180 * this explicit setting, where it should now be enabled by default.
181 */
182 I915_CACHE_L3_LLC,
183 /**
184 * @I915_CACHE_WT:
185 *
186 * Write-through. Used for scanout surfaces.
187 *
188 * The GPU can utilise the caches, while still having the display engine
189 * be coherent with GPU writes, as a result we don't need to flush the
190 * CPU caches when moving out of the render domain. This is the default
191 * setting chosen by the kernel, if supported by the HW, otherwise we
192 * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
193 * cache still need to be flushed, to remain coherent with the display
194 * engine.
195 */
196 I915_CACHE_WT,
197 };
198
199 enum i915_map_type {
200 I915_MAP_WB = 0,
201 I915_MAP_WC,
202 #define I915_MAP_OVERRIDE BIT(31)
203 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
204 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
205 };
206
207 enum i915_mmap_type {
208 I915_MMAP_TYPE_GTT = 0,
209 I915_MMAP_TYPE_WC,
210 I915_MMAP_TYPE_WB,
211 I915_MMAP_TYPE_UC,
212 I915_MMAP_TYPE_FIXED,
213 };
214
215 struct i915_mmap_offset {
216 struct drm_vma_offset_node vma_node;
217 struct drm_i915_gem_object *obj;
218 enum i915_mmap_type mmap_type;
219
220 struct rb_node offset;
221 };
222
223 struct i915_gem_object_page_iter {
224 struct scatterlist *sg_pos;
225 unsigned int sg_idx; /* in pages, but 32bit eek! */
226
227 struct radix_tree_root radix;
228 struct mutex lock; /* protects this cache */
229 };
230
231 struct drm_i915_gem_object {
232 /*
233 * We might have reason to revisit the below since it wastes
234 * a lot of space for non-ttm gem objects.
235 * In any case, always use the accessors for the ttm_buffer_object
236 * when accessing it.
237 */
238 union {
239 struct drm_gem_object base;
240 struct ttm_buffer_object __do_not_access;
241 };
242
243 const struct drm_i915_gem_object_ops *ops;
244
245 struct {
246 /**
247 * @vma.lock: protect the list/tree of vmas
248 */
249 spinlock_t lock;
250
251 /**
252 * @vma.list: List of VMAs backed by this object
253 *
254 * The VMA on this list are ordered by type, all GGTT vma are
255 * placed at the head and all ppGTT vma are placed at the tail.
256 * The different types of GGTT vma are unordered between
257 * themselves, use the @vma.tree (which has a defined order
258 * between all VMA) to quickly find an exact match.
259 */
260 struct list_head list;
261
262 /**
263 * @vma.tree: Ordered tree of VMAs backed by this object
264 *
265 * All VMA created for this object are placed in the @vma.tree
266 * for fast retrieval via a binary search in
267 * i915_vma_instance(). They are also added to @vma.list for
268 * easy iteration.
269 */
270 struct rb_root tree;
271 } vma;
272
273 /**
274 * @lut_list: List of vma lookup entries in use for this object.
275 *
276 * If this object is closed, we need to remove all of its VMA from
277 * the fast lookup index in associated contexts; @lut_list provides
278 * this translation from object to context->handles_vma.
279 */
280 struct list_head lut_list;
281 spinlock_t lut_lock; /* guards lut_list */
282
283 /**
284 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
285 *
286 * When we lock this object through i915_gem_object_lock() with a
287 * context, we add it to the list to ensure we can unlock everything
288 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
289 */
290 struct list_head obj_link;
291 /**
292 * @shared_resv_from: The object shares the resv from this vm.
293 */
294 struct i915_address_space *shares_resv_from;
295
296 union {
297 struct rcu_head rcu;
298 struct llist_node freed;
299 };
300
301 /**
302 * Whether the object is currently in the GGTT or any other supported
303 * fake offset mmap backed by lmem.
304 */
305 unsigned int userfault_count;
306 struct list_head userfault_link;
307
308 struct {
309 spinlock_t lock; /* Protects access to mmo offsets */
310 struct rb_root offsets;
311 } mmo;
312
313 I915_SELFTEST_DECLARE(struct list_head st_link);
314
315 unsigned long flags;
316 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
317 #define I915_BO_ALLOC_VOLATILE BIT(1)
318 #define I915_BO_ALLOC_CPU_CLEAR BIT(2)
319 #define I915_BO_ALLOC_USER BIT(3)
320 /* Object is allowed to lose its contents on suspend / resume, even if pinned */
321 #define I915_BO_ALLOC_PM_VOLATILE BIT(4)
322 /* Object needs to be restored early using memcpy during resume */
323 #define I915_BO_ALLOC_PM_EARLY BIT(5)
324 /*
325 * Object is likely never accessed by the CPU. This will prioritise the BO to be
326 * allocated in the non-mappable portion of lmem. This is merely a hint, and if
327 * dealing with userspace objects the CPU fault handler is free to ignore this.
328 */
329 #define I915_BO_ALLOC_GPU_ONLY BIT(6)
330 #define I915_BO_ALLOC_CCS_AUX BIT(7)
331 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
332 I915_BO_ALLOC_VOLATILE | \
333 I915_BO_ALLOC_CPU_CLEAR | \
334 I915_BO_ALLOC_USER | \
335 I915_BO_ALLOC_PM_VOLATILE | \
336 I915_BO_ALLOC_PM_EARLY | \
337 I915_BO_ALLOC_GPU_ONLY | \
338 I915_BO_ALLOC_CCS_AUX)
339 #define I915_BO_READONLY BIT(8)
340 #define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */
341 #define I915_BO_PROTECTED BIT(10)
342 /**
343 * @mem_flags - Mutable placement-related flags
344 *
345 * These are flags that indicate specifics of the memory region
346 * the object is currently in. As such they are only stable
347 * either under the object lock or if the object is pinned.
348 */
349 unsigned int mem_flags;
350 #define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
351 #define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
352 /**
353 * @cache_level: The desired GTT caching level.
354 *
355 * See enum i915_cache_level for possible values, along with what
356 * each does.
357 */
358 unsigned int cache_level:3;
359 /**
360 * @cache_coherent:
361 *
362 * Track whether the pages are coherent with the GPU if reading or
363 * writing through the CPU caches. The largely depends on the
364 * @cache_level setting.
365 *
366 * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
367 * platforms, coherency must be explicitly requested with some special
368 * GTT caching bits(see enum i915_cache_level). When enabling coherency
369 * it does come at a performance and power cost on such platforms. On
370 * the flip side the kernel does not need to manually flush any buffers
371 * which need to be coherent with the GPU, if the object is not coherent
372 * i.e @cache_coherent is zero.
373 *
374 * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
375 * access will automatically snoop the CPU caches(even with CACHE_NONE).
376 * The one exception is when dealing with the display engine, like with
377 * scanout surfaces. To handle this the kernel will always flush the
378 * surface out of the CPU caches when preparing it for scanout. Also
379 * note that since scanout surfaces are only ever read by the display
380 * engine we only need to care about flushing any writes through the CPU
381 * cache, reads on the other hand will always be coherent.
382 *
383 * Something strange here is why @cache_coherent is not a simple
384 * boolean, i.e coherent vs non-coherent. The reasoning for this is back
385 * to the display engine not being fully coherent. As a result scanout
386 * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
387 * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
388 * that this is likely a scanout surface, and will set @cache_coherent
389 * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
390 * LLC. The kernel uses this to always flush writes through the CPU
391 * cache as early as possible, where it can, in effect keeping
392 * @cache_dirty clean, so we can potentially avoid stalling when
393 * flushing the surface just before doing the scanout. This does mean
394 * we might unnecessarily flush non-scanout objects in some places, but
395 * the default assumption is that all normal objects should be using
396 * I915_CACHE_LLC, at least on platforms with the shared LLC.
397 *
398 * Supported values:
399 *
400 * I915_BO_CACHE_COHERENT_FOR_READ:
401 *
402 * On shared LLC platforms, we use this for special scanout surfaces,
403 * where the display engine is not coherent with the CPU cache. As such
404 * we need to ensure we flush any writes before doing the scanout. As an
405 * optimisation we try to flush any writes as early as possible to avoid
406 * stalling later.
407 *
408 * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
409 * platforms, we use:
410 *
411 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
412 *
413 * While for normal objects that are fully coherent, including special
414 * scanout surfaces marked as I915_CACHE_WT, we use:
415 *
416 * cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
417 * I915_BO_CACHE_COHERENT_FOR_WRITE
418 *
419 * And then for objects that are not coherent at all we use:
420 *
421 * cache_coherent = 0
422 *
423 * I915_BO_CACHE_COHERENT_FOR_WRITE:
424 *
425 * When writing through the CPU cache, the GPU is still coherent. Note
426 * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
427 */
428 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
429 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
430 unsigned int cache_coherent:2;
431
432 /**
433 * @cache_dirty:
434 *
435 * Track if we are we dirty with writes through the CPU cache for this
436 * object. As a result reading directly from main memory might yield
437 * stale data.
438 *
439 * This also ties into whether the kernel is tracking the object as
440 * coherent with the GPU, as per @cache_coherent, as it determines if
441 * flushing might be needed at various points.
442 *
443 * Another part of @cache_dirty is managing flushing when first
444 * acquiring the pages for system memory, at this point the pages are
445 * considered foreign, so the default assumption is that the cache is
446 * dirty, for example the page zeroing done by the kernel might leave
447 * writes though the CPU cache, or swapping-in, while the actual data in
448 * main memory is potentially stale. Note that this is a potential
449 * security issue when dealing with userspace objects and zeroing. Now,
450 * whether we actually need apply the big sledgehammer of flushing all
451 * the pages on acquire depends on if @cache_coherent is marked as
452 * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
453 * for both reads and writes though the CPU cache.
454 *
455 * Note that on shared LLC platforms we still apply the heavy flush for
456 * I915_CACHE_NONE objects, under the assumption that this is going to
457 * be used for scanout.
458 *
459 * Update: On some hardware there is now also the 'Bypass LLC' MOCS
460 * entry, which defeats our @cache_coherent tracking, since userspace
461 * can freely bypass the CPU cache when touching the pages with the GPU,
462 * where the kernel is completely unaware. On such platform we need
463 * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
464 *
465 * Special care is taken on non-LLC platforms, to prevent potential
466 * information leak. The driver currently ensures:
467 *
468 * 1. All userspace objects, by default, have @cache_level set as
469 * I915_CACHE_NONE. The only exception is userptr objects, where we
470 * instead force I915_CACHE_LLC, but we also don't allow userspace to
471 * ever change the @cache_level for such objects. Another special case
472 * is dma-buf, which doesn't rely on @cache_dirty, but there we
473 * always do a forced flush when acquiring the pages, if there is a
474 * chance that the pages can be read directly from main memory with
475 * the GPU.
476 *
477 * 2. All I915_CACHE_NONE objects have @cache_dirty initially true.
478 *
479 * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to
480 * true.
481 *
482 * 4. The @cache_dirty is never freely reset before the initial
483 * flush, even if userspace adjusts the @cache_level through the
484 * i915_gem_set_caching_ioctl.
485 *
486 * 5. All @cache_dirty objects(including swapped-in) are initially
487 * flushed with a synchronous call to drm_clflush_sg in
488 * __i915_gem_object_set_pages. The @cache_dirty can be freely reset
489 * at this point. All further asynchronous clfushes are never security
490 * critical, i.e userspace is free to race against itself.
491 */
492 unsigned int cache_dirty:1;
493
494 /* @is_dpt: Object houses a display page table (DPT) */
495 unsigned int is_dpt:1;
496
497 /**
498 * @read_domains: Read memory domains.
499 *
500 * These monitor which caches contain read/write data related to the
501 * object. When transitioning from one set of domains to another,
502 * the driver is called to ensure that caches are suitably flushed and
503 * invalidated.
504 */
505 u16 read_domains;
506
507 /**
508 * @write_domain: Corresponding unique write memory domain.
509 */
510 u16 write_domain;
511
512 struct intel_frontbuffer __rcu *frontbuffer;
513
514 /** Current tiling stride for the object, if it's tiled. */
515 unsigned int tiling_and_stride;
516 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
517 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
518 #define STRIDE_MASK (~TILING_MASK)
519
520 struct {
521 /*
522 * Protects the pages and their use. Do not use directly, but
523 * instead go through the pin/unpin interfaces.
524 */
525 atomic_t pages_pin_count;
526
527 /**
528 * @shrink_pin: Prevents the pages from being made visible to
529 * the shrinker, while the shrink_pin is non-zero. Most users
530 * should pretty much never have to care about this, outside of
531 * some special use cases.
532 *
533 * By default most objects will start out as visible to the
534 * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
535 * backing pages are attached to the object, like in
536 * __i915_gem_object_set_pages(). They will then be removed the
537 * shrinker list once the pages are released.
538 *
539 * The @shrink_pin is incremented by calling
540 * i915_gem_object_make_unshrinkable(), which will also remove
541 * the object from the shrinker list, if the pin count was zero.
542 *
543 * Callers will then typically call
544 * i915_gem_object_make_shrinkable() or
545 * i915_gem_object_make_purgeable() to decrement the pin count,
546 * and make the pages visible again.
547 */
548 atomic_t shrink_pin;
549
550 /**
551 * @ttm_shrinkable: True when the object is using shmem pages
552 * underneath. Protected by the object lock.
553 */
554 bool ttm_shrinkable;
555
556 /**
557 * @unknown_state: Indicate that the object is effectively
558 * borked. This is write-once and set if we somehow encounter a
559 * fatal error when moving/clearing the pages, and we are not
560 * able to fallback to memcpy/memset, like on small-BAR systems.
561 * The GPU should also be wedged (or in the process) at this
562 * point.
563 *
564 * Only valid to read this after acquiring the dma-resv lock and
565 * waiting for all DMA_RESV_USAGE_KERNEL fences to be signalled,
566 * or if we otherwise know that the moving fence has signalled,
567 * and we are certain the pages underneath are valid for
568 * immediate access (under normal operation), like just prior to
569 * binding the object or when setting up the CPU fault handler.
570 * See i915_gem_object_has_unknown_state();
571 */
572 bool unknown_state;
573
574 /**
575 * Priority list of potential placements for this object.
576 */
577 struct intel_memory_region **placements;
578 int n_placements;
579
580 /**
581 * Memory region for this object.
582 */
583 struct intel_memory_region *region;
584
585 /**
586 * Memory manager resource allocated for this object. Only
587 * needed for the mock region.
588 */
589 struct ttm_resource *res;
590
591 /**
592 * Element within memory_region->objects or region->purgeable
593 * if the object is marked as DONTNEED. Access is protected by
594 * region->obj_lock.
595 */
596 struct list_head region_link;
597
598 struct i915_refct_sgt *rsgt;
599 struct sg_table *pages;
600 void *mapping;
601
602 struct i915_page_sizes page_sizes;
603
604 I915_SELFTEST_DECLARE(unsigned int page_mask);
605
606 struct i915_gem_object_page_iter get_page;
607 struct i915_gem_object_page_iter get_dma_page;
608
609 /**
610 * Element within i915->mm.shrink_list or i915->mm.purge_list,
611 * locked by i915->mm.obj_lock.
612 */
613 struct list_head link;
614
615 /**
616 * Advice: are the backing pages purgeable?
617 */
618 unsigned int madv:2;
619
620 /**
621 * This is set if the object has been written to since the
622 * pages were last acquired.
623 */
624 bool dirty:1;
625
626 u32 tlb;
627 } mm;
628
629 struct {
630 struct i915_refct_sgt *cached_io_rsgt;
631 struct i915_gem_object_page_iter get_io_page;
632 struct drm_i915_gem_object *backup;
633 bool created:1;
634 } ttm;
635
636 /*
637 * Record which PXP key instance this object was created against (if
638 * any), so we can use it to determine if the encryption is valid by
639 * comparing against the current key instance.
640 */
641 u32 pxp_key_instance;
642
643 /** Record of address bit 17 of each page at last unbind. */
644 unsigned long *bit_17;
645
646 union {
647 #ifdef CONFIG_MMU_NOTIFIER
648 struct i915_gem_userptr {
649 uintptr_t ptr;
650 unsigned long notifier_seq;
651
652 struct mmu_interval_notifier notifier;
653 struct page **pvec;
654 int page_ref;
655 } userptr;
656 #endif
657
658 struct drm_mm_node *stolen;
659
660 resource_size_t bo_offset;
661
662 unsigned long scratch;
663 u64 encode;
664
665 void *gvt_info;
666 };
667 };
668
669 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)670 to_intel_bo(struct drm_gem_object *gem)
671 {
672 /* Assert that to_intel_bo(NULL) == NULL */
673 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
674
675 return container_of(gem, struct drm_i915_gem_object, base);
676 }
677
678 #endif
679