1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9
10 #include <drm/drm_gem.h>
11 #include <uapi/drm/i915_drm.h>
12
13 #include "i915_active.h"
14 #include "i915_selftest.h"
15
16 struct drm_i915_gem_object;
17 struct intel_fronbuffer;
18
19 /*
20 * struct i915_lut_handle tracks the fast lookups from handle to vma used
21 * for execbuf. Although we use a radixtree for that mapping, in order to
22 * remove them as the object or context is closed, we need a secondary list
23 * and a translation entry (i915_lut_handle).
24 */
25 struct i915_lut_handle {
26 struct list_head obj_link;
27 struct i915_gem_context *ctx;
28 u32 handle;
29 };
30
31 struct drm_i915_gem_object_ops {
32 unsigned int flags;
33 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
34 #define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
35 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
36 #define I915_GEM_OBJECT_IS_PROXY BIT(3)
37 #define I915_GEM_OBJECT_NO_MMAP BIT(4)
38 #define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
39
40 /* Interface between the GEM object and its backing storage.
41 * get_pages() is called once prior to the use of the associated set
42 * of pages before to binding them into the GTT, and put_pages() is
43 * called after we no longer need them. As we expect there to be
44 * associated cost with migrating pages between the backing storage
45 * and making them available for the GPU (e.g. clflush), we may hold
46 * onto the pages after they are no longer referenced by the GPU
47 * in case they may be used again shortly (for example migrating the
48 * pages to a different memory domain within the GTT). put_pages()
49 * will therefore most likely be called when the object itself is
50 * being released or under memory pressure (where we attempt to
51 * reap pages for the shrinker).
52 */
53 int (*get_pages)(struct drm_i915_gem_object *obj);
54 void (*put_pages)(struct drm_i915_gem_object *obj,
55 struct sg_table *pages);
56 void (*truncate)(struct drm_i915_gem_object *obj);
57 void (*writeback)(struct drm_i915_gem_object *obj);
58
59 int (*pread)(struct drm_i915_gem_object *obj,
60 const struct drm_i915_gem_pread *arg);
61 int (*pwrite)(struct drm_i915_gem_object *obj,
62 const struct drm_i915_gem_pwrite *arg);
63
64 int (*dmabuf_export)(struct drm_i915_gem_object *obj);
65 void (*release)(struct drm_i915_gem_object *obj);
66
67 const char *name; /* friendly name for debug, e.g. lockdep classes */
68 };
69
70 enum i915_mmap_type {
71 I915_MMAP_TYPE_GTT = 0,
72 I915_MMAP_TYPE_WC,
73 I915_MMAP_TYPE_WB,
74 I915_MMAP_TYPE_UC,
75 };
76
77 struct i915_mmap_offset {
78 struct drm_vma_offset_node vma_node;
79 struct drm_i915_gem_object *obj;
80 enum i915_mmap_type mmap_type;
81
82 struct rb_node offset;
83 };
84
85 struct drm_i915_gem_object {
86 struct drm_gem_object base;
87
88 const struct drm_i915_gem_object_ops *ops;
89
90 struct {
91 /**
92 * @vma.lock: protect the list/tree of vmas
93 */
94 spinlock_t lock;
95
96 /**
97 * @vma.list: List of VMAs backed by this object
98 *
99 * The VMA on this list are ordered by type, all GGTT vma are
100 * placed at the head and all ppGTT vma are placed at the tail.
101 * The different types of GGTT vma are unordered between
102 * themselves, use the @vma.tree (which has a defined order
103 * between all VMA) to quickly find an exact match.
104 */
105 struct list_head list;
106
107 /**
108 * @vma.tree: Ordered tree of VMAs backed by this object
109 *
110 * All VMA created for this object are placed in the @vma.tree
111 * for fast retrieval via a binary search in
112 * i915_vma_instance(). They are also added to @vma.list for
113 * easy iteration.
114 */
115 struct rb_root tree;
116 } vma;
117
118 /**
119 * @lut_list: List of vma lookup entries in use for this object.
120 *
121 * If this object is closed, we need to remove all of its VMA from
122 * the fast lookup index in associated contexts; @lut_list provides
123 * this translation from object to context->handles_vma.
124 */
125 struct list_head lut_list;
126 spinlock_t lut_lock; /* guards lut_list */
127
128 /**
129 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
130 *
131 * When we lock this object through i915_gem_object_lock() with a
132 * context, we add it to the list to ensure we can unlock everything
133 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
134 */
135 struct list_head obj_link;
136
137 /** Stolen memory for this object, instead of being backed by shmem. */
138 struct drm_mm_node *stolen;
139 union {
140 struct rcu_head rcu;
141 struct llist_node freed;
142 };
143
144 /**
145 * Whether the object is currently in the GGTT mmap.
146 */
147 unsigned int userfault_count;
148 struct list_head userfault_link;
149
150 struct {
151 spinlock_t lock; /* Protects access to mmo offsets */
152 struct rb_root offsets;
153 } mmo;
154
155 I915_SELFTEST_DECLARE(struct list_head st_link);
156
157 unsigned long flags;
158 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
159 #define I915_BO_ALLOC_VOLATILE BIT(1)
160 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
161 #define I915_BO_READONLY BIT(2)
162 #define I915_BO_WAS_BOUND_BIT 3
163
164 /*
165 * Is the object to be mapped as read-only to the GPU
166 * Only honoured if hardware has relevant pte bit
167 */
168 unsigned int cache_level:3;
169 unsigned int cache_coherent:2;
170 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
171 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
172 unsigned int cache_dirty:1;
173
174 /**
175 * @read_domains: Read memory domains.
176 *
177 * These monitor which caches contain read/write data related to the
178 * object. When transitioning from one set of domains to another,
179 * the driver is called to ensure that caches are suitably flushed and
180 * invalidated.
181 */
182 u16 read_domains;
183
184 /**
185 * @write_domain: Corresponding unique write memory domain.
186 */
187 u16 write_domain;
188
189 struct intel_frontbuffer __rcu *frontbuffer;
190
191 /** Current tiling stride for the object, if it's tiled. */
192 unsigned int tiling_and_stride;
193 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
194 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
195 #define STRIDE_MASK (~TILING_MASK)
196
197 struct {
198 /*
199 * Protects the pages and their use. Do not use directly, but
200 * instead go through the pin/unpin interfaces.
201 */
202 struct mutex lock;
203 atomic_t pages_pin_count;
204 atomic_t shrink_pin;
205
206 /**
207 * Memory region for this object.
208 */
209 struct intel_memory_region *region;
210 /**
211 * List of memory region blocks allocated for this object.
212 */
213 struct list_head blocks;
214 /**
215 * Element within memory_region->objects or region->purgeable
216 * if the object is marked as DONTNEED. Access is protected by
217 * region->obj_lock.
218 */
219 struct list_head region_link;
220
221 struct sg_table *pages;
222 void *mapping;
223
224 struct i915_page_sizes {
225 /**
226 * The sg mask of the pages sg_table. i.e the mask of
227 * of the lengths for each sg entry.
228 */
229 unsigned int phys;
230
231 /**
232 * The gtt page sizes we are allowed to use given the
233 * sg mask and the supported page sizes. This will
234 * express the smallest unit we can use for the whole
235 * object, as well as the larger sizes we may be able
236 * to use opportunistically.
237 */
238 unsigned int sg;
239
240 /**
241 * The actual gtt page size usage. Since we can have
242 * multiple vma associated with this object we need to
243 * prevent any trampling of state, hence a copy of this
244 * struct also lives in each vma, therefore the gtt
245 * value here should only be read/write through the vma.
246 */
247 unsigned int gtt;
248 } page_sizes;
249
250 I915_SELFTEST_DECLARE(unsigned int page_mask);
251
252 struct i915_gem_object_page_iter {
253 struct scatterlist *sg_pos;
254 unsigned int sg_idx; /* in pages, but 32bit eek! */
255
256 struct radix_tree_root radix;
257 struct mutex lock; /* protects this cache */
258 } get_page;
259
260 /**
261 * Element within i915->mm.unbound_list or i915->mm.bound_list,
262 * locked by i915->mm.obj_lock.
263 */
264 struct list_head link;
265
266 /**
267 * Advice: are the backing pages purgeable?
268 */
269 unsigned int madv:2;
270
271 /**
272 * This is set if the object has been written to since the
273 * pages were last acquired.
274 */
275 bool dirty:1;
276
277 /**
278 * This is set if the object has been pinned due to unknown
279 * swizzling.
280 */
281 bool quirked:1;
282 } mm;
283
284 /** Record of address bit 17 of each page at last unbind. */
285 unsigned long *bit_17;
286
287 union {
288 struct i915_gem_userptr {
289 uintptr_t ptr;
290
291 struct i915_mm_struct *mm;
292 struct i915_mmu_object *mmu_object;
293 struct work_struct *work;
294 } userptr;
295
296 unsigned long scratch;
297 u64 encode;
298
299 void *gvt_info;
300 };
301 };
302
303 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)304 to_intel_bo(struct drm_gem_object *gem)
305 {
306 /* Assert that to_intel_bo(NULL) == NULL */
307 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
308
309 return container_of(gem, struct drm_i915_gem_object, base);
310 }
311
312 #endif
313