• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __I915_GEM_OBJECT_H__
26 #define __I915_GEM_OBJECT_H__
27 
28 #include <linux/reservation.h>
29 
30 #include <drm/drm_vma_manager.h>
31 #include <drm/drm_gem.h>
32 #include <drm/drmP.h>
33 
34 #include <drm/i915_drm.h>
35 
36 #include "i915_gem_request.h"
37 #include "i915_selftest.h"
38 
39 struct drm_i915_gem_object;
40 
41 /*
42  * struct i915_lut_handle tracks the fast lookups from handle to vma used
43  * for execbuf. Although we use a radixtree for that mapping, in order to
44  * remove them as the object or context is closed, we need a secondary list
45  * and a translation entry (i915_lut_handle).
46  */
47 struct i915_lut_handle {
48 	struct list_head obj_link;
49 	struct list_head ctx_link;
50 	struct i915_gem_context *ctx;
51 	u32 handle;
52 };
53 
54 struct drm_i915_gem_object_ops {
55 	unsigned int flags;
56 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
57 #define I915_GEM_OBJECT_IS_SHRINKABLE   BIT(1)
58 
59 	/* Interface between the GEM object and its backing storage.
60 	 * get_pages() is called once prior to the use of the associated set
61 	 * of pages before to binding them into the GTT, and put_pages() is
62 	 * called after we no longer need them. As we expect there to be
63 	 * associated cost with migrating pages between the backing storage
64 	 * and making them available for the GPU (e.g. clflush), we may hold
65 	 * onto the pages after they are no longer referenced by the GPU
66 	 * in case they may be used again shortly (for example migrating the
67 	 * pages to a different memory domain within the GTT). put_pages()
68 	 * will therefore most likely be called when the object itself is
69 	 * being released or under memory pressure (where we attempt to
70 	 * reap pages for the shrinker).
71 	 */
72 	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
73 	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
74 
75 	int (*pwrite)(struct drm_i915_gem_object *,
76 		      const struct drm_i915_gem_pwrite *);
77 
78 	int (*dmabuf_export)(struct drm_i915_gem_object *);
79 	void (*release)(struct drm_i915_gem_object *);
80 };
81 
82 struct drm_i915_gem_object {
83 	struct drm_gem_object base;
84 
85 	const struct drm_i915_gem_object_ops *ops;
86 
87 	/**
88 	 * @vma_list: List of VMAs backed by this object
89 	 *
90 	 * The VMA on this list are ordered by type, all GGTT vma are placed
91 	 * at the head and all ppGTT vma are placed at the tail. The different
92 	 * types of GGTT vma are unordered between themselves, use the
93 	 * @vma_tree (which has a defined order between all VMA) to find an
94 	 * exact match.
95 	 */
96 	struct list_head vma_list;
97 	/**
98 	 * @vma_tree: Ordered tree of VMAs backed by this object
99 	 *
100 	 * All VMA created for this object are placed in the @vma_tree for
101 	 * fast retrieval via a binary search in i915_vma_instance().
102 	 * They are also added to @vma_list for easy iteration.
103 	 */
104 	struct rb_root vma_tree;
105 
106 	/**
107 	 * @lut_list: List of vma lookup entries in use for this object.
108 	 *
109 	 * If this object is closed, we need to remove all of its VMA from
110 	 * the fast lookup index in associated contexts; @lut_list provides
111 	 * this translation from object to context->handles_vma.
112 	 */
113 	struct list_head lut_list;
114 
115 	/** Stolen memory for this object, instead of being backed by shmem. */
116 	struct drm_mm_node *stolen;
117 	struct list_head global_link;
118 	union {
119 		struct rcu_head rcu;
120 		struct llist_node freed;
121 	};
122 
123 	/**
124 	 * Whether the object is currently in the GGTT mmap.
125 	 */
126 	struct list_head userfault_link;
127 
128 	struct list_head batch_pool_link;
129 	I915_SELFTEST_DECLARE(struct list_head st_link);
130 
131 	unsigned long flags;
132 
133 	/**
134 	 * Have we taken a reference for the object for incomplete GPU
135 	 * activity?
136 	 */
137 #define I915_BO_ACTIVE_REF 0
138 
139 	/*
140 	 * Is the object to be mapped as read-only to the GPU
141 	 * Only honoured if hardware has relevant pte bit
142 	 */
143 	unsigned int cache_level:3;
144 	unsigned int cache_coherent:2;
145 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
146 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
147 	unsigned int cache_dirty:1;
148 
149 	atomic_t frontbuffer_bits;
150 	unsigned int frontbuffer_ggtt_origin; /* write once */
151 	struct i915_gem_active frontbuffer_write;
152 
153 	/** Current tiling stride for the object, if it's tiled. */
154 	unsigned int tiling_and_stride;
155 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
156 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
157 #define STRIDE_MASK (~TILING_MASK)
158 
159 	/** Count of VMA actually bound by this object */
160 	unsigned int bind_count;
161 	unsigned int active_count;
162 	unsigned int pin_display;
163 
164 	struct {
165 		struct mutex lock; /* protects the pages and their use */
166 		atomic_t pages_pin_count;
167 
168 		struct sg_table *pages;
169 		void *mapping;
170 
171 		struct i915_gem_object_page_iter {
172 			struct scatterlist *sg_pos;
173 			unsigned int sg_idx; /* in pages, but 32bit eek! */
174 
175 			struct radix_tree_root radix;
176 			struct mutex lock; /* protects this cache */
177 		} get_page;
178 
179 		/**
180 		 * Advice: are the backing pages purgeable?
181 		 */
182 		unsigned int madv:2;
183 
184 		/**
185 		 * This is set if the object has been written to since the
186 		 * pages were last acquired.
187 		 */
188 		bool dirty:1;
189 
190 		/**
191 		 * This is set if the object has been pinned due to unknown
192 		 * swizzling.
193 		 */
194 		bool quirked:1;
195 	} mm;
196 
197 	/** Breadcrumb of last rendering to the buffer.
198 	 * There can only be one writer, but we allow for multiple readers.
199 	 * If there is a writer that necessarily implies that all other
200 	 * read requests are complete - but we may only be lazily clearing
201 	 * the read requests. A read request is naturally the most recent
202 	 * request on a ring, so we may have two different write and read
203 	 * requests on one ring where the write request is older than the
204 	 * read request. This allows for the CPU to read from an active
205 	 * buffer by only waiting for the write to complete.
206 	 */
207 	struct reservation_object *resv;
208 
209 	/** References from framebuffers, locks out tiling changes. */
210 	unsigned int framebuffer_references;
211 
212 	/** Record of address bit 17 of each page at last unbind. */
213 	unsigned long *bit_17;
214 
215 	union {
216 		struct i915_gem_userptr {
217 			uintptr_t ptr;
218 			unsigned read_only :1;
219 
220 			struct i915_mm_struct *mm;
221 			struct i915_mmu_object *mmu_object;
222 			struct work_struct *work;
223 		} userptr;
224 
225 		unsigned long scratch;
226 	};
227 
228 	/** for phys allocated objects */
229 	struct drm_dma_handle *phys_handle;
230 
231 	struct reservation_object __builtin_resv;
232 };
233 
234 static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object * gem)235 to_intel_bo(struct drm_gem_object *gem)
236 {
237 	/* Assert that to_intel_bo(NULL) == NULL */
238 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
239 
240 	return container_of(gem, struct drm_i915_gem_object, base);
241 }
242 
243 /**
244  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
245  * @filp: DRM file private date
246  * @handle: userspace handle
247  *
248  * Returns:
249  *
250  * A pointer to the object named by the handle if such exists on @filp, NULL
251  * otherwise. This object is only valid whilst under the RCU read lock, and
252  * note carefully the object may be in the process of being destroyed.
253  */
254 static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file * file,u32 handle)255 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
256 {
257 #ifdef CONFIG_LOCKDEP
258 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
259 #endif
260 	return idr_find(&file->object_idr, handle);
261 }
262 
263 static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file * file,u32 handle)264 i915_gem_object_lookup(struct drm_file *file, u32 handle)
265 {
266 	struct drm_i915_gem_object *obj;
267 
268 	rcu_read_lock();
269 	obj = i915_gem_object_lookup_rcu(file, handle);
270 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
271 		obj = NULL;
272 	rcu_read_unlock();
273 
274 	return obj;
275 }
276 
277 __deprecated
278 extern struct drm_gem_object *
279 drm_gem_object_lookup(struct drm_file *file, u32 handle);
280 
281 __attribute__((nonnull))
282 static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object * obj)283 i915_gem_object_get(struct drm_i915_gem_object *obj)
284 {
285 	drm_gem_object_reference(&obj->base);
286 	return obj;
287 }
288 
289 __deprecated
290 extern void drm_gem_object_reference(struct drm_gem_object *);
291 
292 __attribute__((nonnull))
293 static inline void
i915_gem_object_put(struct drm_i915_gem_object * obj)294 i915_gem_object_put(struct drm_i915_gem_object *obj)
295 {
296 	__drm_gem_object_unreference(&obj->base);
297 }
298 
299 __deprecated
300 extern void drm_gem_object_unreference(struct drm_gem_object *);
301 
302 __deprecated
303 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
304 
i915_gem_object_lock(struct drm_i915_gem_object * obj)305 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
306 {
307 	reservation_object_lock(obj->resv, NULL);
308 }
309 
i915_gem_object_unlock(struct drm_i915_gem_object * obj)310 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
311 {
312 	reservation_object_unlock(obj->resv);
313 }
314 
315 static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object * obj)316 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
317 {
318 	obj->base.vma_node.readonly = true;
319 }
320 
321 static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object * obj)322 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
323 {
324 	return obj->base.vma_node.readonly;
325 }
326 
327 static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object * obj)328 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
329 {
330 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
331 }
332 
333 static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object * obj)334 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
335 {
336 	return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
337 }
338 
339 static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object * obj)340 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
341 {
342 	return obj->active_count;
343 }
344 
345 static inline bool
i915_gem_object_has_active_reference(const struct drm_i915_gem_object * obj)346 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
347 {
348 	return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
349 }
350 
351 static inline void
i915_gem_object_set_active_reference(struct drm_i915_gem_object * obj)352 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
353 {
354 	lockdep_assert_held(&obj->base.dev->struct_mutex);
355 	__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
356 }
357 
358 static inline void
i915_gem_object_clear_active_reference(struct drm_i915_gem_object * obj)359 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
360 {
361 	lockdep_assert_held(&obj->base.dev->struct_mutex);
362 	__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
363 }
364 
365 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
366 
367 static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object * obj)368 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
369 {
370 	return READ_ONCE(obj->framebuffer_references);
371 }
372 
373 static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object * obj)374 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
375 {
376 	return obj->tiling_and_stride & TILING_MASK;
377 }
378 
379 static inline bool
i915_gem_object_is_tiled(struct drm_i915_gem_object * obj)380 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
381 {
382 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
383 }
384 
385 static inline unsigned int
i915_gem_object_get_stride(struct drm_i915_gem_object * obj)386 i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
387 {
388 	return obj->tiling_and_stride & STRIDE_MASK;
389 }
390 
391 static inline unsigned int
i915_gem_tile_height(unsigned int tiling)392 i915_gem_tile_height(unsigned int tiling)
393 {
394 	GEM_BUG_ON(!tiling);
395 	return tiling == I915_TILING_Y ? 32 : 8;
396 }
397 
398 static inline unsigned int
i915_gem_object_get_tile_height(struct drm_i915_gem_object * obj)399 i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
400 {
401 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
402 }
403 
404 static inline unsigned int
i915_gem_object_get_tile_row_size(struct drm_i915_gem_object * obj)405 i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
406 {
407 	return (i915_gem_object_get_stride(obj) *
408 		i915_gem_object_get_tile_height(obj));
409 }
410 
411 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
412 			       unsigned int tiling, unsigned int stride);
413 
414 static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object * obj)415 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
416 {
417 	struct intel_engine_cs *engine = NULL;
418 	struct dma_fence *fence;
419 
420 	rcu_read_lock();
421 	fence = reservation_object_get_excl_rcu(obj->resv);
422 	rcu_read_unlock();
423 
424 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
425 		engine = to_request(fence)->engine;
426 	dma_fence_put(fence);
427 
428 	return engine;
429 }
430 
431 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
432 					 unsigned int cache_level);
433 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
434 
435 #endif
436 
437