• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12 
13 #include "gt/intel_gt.h"
14 
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)15 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
16 				 struct sg_table *pages,
17 				 unsigned int sg_page_sizes)
18 {
19 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
20 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
21 	bool shrinkable;
22 	int i;
23 
24 	assert_object_held_shared(obj);
25 
26 	if (i915_gem_object_is_volatile(obj))
27 		obj->mm.madv = I915_MADV_DONTNEED;
28 
29 	/* Make the pages coherent with the GPU (flushing any swapin). */
30 	if (obj->cache_dirty) {
31 		obj->write_domain = 0;
32 		if (i915_gem_object_has_struct_page(obj))
33 			drm_clflush_sg(pages);
34 		obj->cache_dirty = false;
35 	}
36 
37 	obj->mm.get_page.sg_pos = pages->sgl;
38 	obj->mm.get_page.sg_idx = 0;
39 	obj->mm.get_dma_page.sg_pos = pages->sgl;
40 	obj->mm.get_dma_page.sg_idx = 0;
41 
42 	obj->mm.pages = pages;
43 
44 	GEM_BUG_ON(!sg_page_sizes);
45 	obj->mm.page_sizes.phys = sg_page_sizes;
46 
47 	/*
48 	 * Calculate the supported page-sizes which fit into the given
49 	 * sg_page_sizes. This will give us the page-sizes which we may be able
50 	 * to use opportunistically when later inserting into the GTT. For
51 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
52 	 * 64K or 4K pages, although in practice this will depend on a number of
53 	 * other factors.
54 	 */
55 	obj->mm.page_sizes.sg = 0;
56 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
57 		if (obj->mm.page_sizes.phys & ~0u << i)
58 			obj->mm.page_sizes.sg |= BIT(i);
59 	}
60 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
61 
62 	shrinkable = i915_gem_object_is_shrinkable(obj);
63 
64 	if (i915_gem_object_is_tiled(obj) &&
65 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
66 		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
67 		i915_gem_object_set_tiling_quirk(obj);
68 		GEM_BUG_ON(!list_empty(&obj->mm.link));
69 		atomic_inc(&obj->mm.shrink_pin);
70 		shrinkable = false;
71 	}
72 
73 	if (shrinkable) {
74 		struct list_head *list;
75 		unsigned long flags;
76 
77 		assert_object_held(obj);
78 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
79 
80 		i915->mm.shrink_count++;
81 		i915->mm.shrink_memory += obj->base.size;
82 
83 		if (obj->mm.madv != I915_MADV_WILLNEED)
84 			list = &i915->mm.purge_list;
85 		else
86 			list = &i915->mm.shrink_list;
87 		list_add_tail(&obj->mm.link, list);
88 
89 		atomic_set(&obj->mm.shrink_pin, 0);
90 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
91 	}
92 }
93 
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)94 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
95 {
96 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
97 	int err;
98 
99 	assert_object_held_shared(obj);
100 
101 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
102 		drm_dbg(&i915->drm,
103 			"Attempting to obtain a purgeable object\n");
104 		return -EFAULT;
105 	}
106 
107 	err = obj->ops->get_pages(obj);
108 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
109 
110 	return err;
111 }
112 
113 /* Ensure that the associated pages are gathered from the backing storage
114  * and pinned into our object. i915_gem_object_pin_pages() may be called
115  * multiple times before they are released by a single call to
116  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
117  * either as a result of memory pressure (reaping pages under the shrinker)
118  * or as the object is itself released.
119  */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)120 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
121 {
122 	int err;
123 
124 	assert_object_held(obj);
125 
126 	assert_object_held_shared(obj);
127 
128 	if (unlikely(!i915_gem_object_has_pages(obj))) {
129 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
130 
131 		err = ____i915_gem_object_get_pages(obj);
132 		if (err)
133 			return err;
134 
135 		smp_mb__before_atomic();
136 	}
137 	atomic_inc(&obj->mm.pages_pin_count);
138 
139 	return 0;
140 }
141 
i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object * obj)142 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
143 {
144 	struct i915_gem_ww_ctx ww;
145 	int err;
146 
147 	i915_gem_ww_ctx_init(&ww, true);
148 retry:
149 	err = i915_gem_object_lock(obj, &ww);
150 	if (!err)
151 		err = i915_gem_object_pin_pages(obj);
152 
153 	if (err == -EDEADLK) {
154 		err = i915_gem_ww_ctx_backoff(&ww);
155 		if (!err)
156 			goto retry;
157 	}
158 	i915_gem_ww_ctx_fini(&ww);
159 	return err;
160 }
161 
162 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)163 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
164 {
165 	if (obj->ops->truncate)
166 		obj->ops->truncate(obj);
167 }
168 
169 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)170 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
171 {
172 	assert_object_held_shared(obj);
173 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
174 
175 	if (obj->ops->writeback)
176 		obj->ops->writeback(obj);
177 }
178 
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)179 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
180 {
181 	struct radix_tree_iter iter;
182 	void __rcu **slot;
183 
184 	rcu_read_lock();
185 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
186 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
187 	radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
188 		radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
189 	rcu_read_unlock();
190 }
191 
unmap_object(struct drm_i915_gem_object * obj,void * ptr)192 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
193 {
194 	if (is_vmalloc_addr(ptr))
195 		vunmap(ptr);
196 }
197 
198 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)199 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
200 {
201 	struct sg_table *pages;
202 
203 	assert_object_held_shared(obj);
204 
205 	pages = fetch_and_zero(&obj->mm.pages);
206 	if (IS_ERR_OR_NULL(pages))
207 		return pages;
208 
209 	if (i915_gem_object_is_volatile(obj))
210 		obj->mm.madv = I915_MADV_WILLNEED;
211 
212 	i915_gem_object_make_unshrinkable(obj);
213 
214 	if (obj->mm.mapping) {
215 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
216 		obj->mm.mapping = NULL;
217 	}
218 
219 	__i915_gem_object_reset_page_iter(obj);
220 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
221 
222 	if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
223 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
224 		intel_wakeref_t wakeref;
225 
226 		with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
227 			intel_gt_invalidate_tlbs(&i915->gt);
228 	}
229 
230 	return pages;
231 }
232 
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)233 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
234 {
235 	struct sg_table *pages;
236 
237 	if (i915_gem_object_has_pinned_pages(obj))
238 		return -EBUSY;
239 
240 	/* May be called by shrinker from within get_pages() (on another bo) */
241 	assert_object_held_shared(obj);
242 
243 	i915_gem_object_release_mmap_offset(obj);
244 
245 	/*
246 	 * ->put_pages might need to allocate memory for the bit17 swizzle
247 	 * array, hence protect them from being reaped by removing them from gtt
248 	 * lists early.
249 	 */
250 	pages = __i915_gem_object_unset_pages(obj);
251 
252 	/*
253 	 * XXX Temporary hijinx to avoid updating all backends to handle
254 	 * NULL pages. In the future, when we have more asynchronous
255 	 * get_pages backends we should be better able to handle the
256 	 * cancellation of the async task in a more uniform manner.
257 	 */
258 	if (!IS_ERR_OR_NULL(pages))
259 		obj->ops->put_pages(obj, pages);
260 
261 	return 0;
262 }
263 
264 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)265 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
266 				      enum i915_map_type type)
267 {
268 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
269 	struct page *stack[32], **pages = stack, *page;
270 	struct sgt_iter iter;
271 	pgprot_t pgprot;
272 	void *vaddr;
273 
274 	switch (type) {
275 	default:
276 		MISSING_CASE(type);
277 		fallthrough;	/* to use PAGE_KERNEL anyway */
278 	case I915_MAP_WB:
279 		/*
280 		 * On 32b, highmem using a finite set of indirect PTE (i.e.
281 		 * vmap) to provide virtual mappings of the high pages.
282 		 * As these are finite, map_new_virtual() must wait for some
283 		 * other kmap() to finish when it runs out. If we map a large
284 		 * number of objects, there is no method for it to tell us
285 		 * to release the mappings, and we deadlock.
286 		 *
287 		 * However, if we make an explicit vmap of the page, that
288 		 * uses a larger vmalloc arena, and also has the ability
289 		 * to tell us to release unwanted mappings. Most importantly,
290 		 * it will fail and propagate an error instead of waiting
291 		 * forever.
292 		 *
293 		 * So if the page is beyond the 32b boundary, make an explicit
294 		 * vmap.
295 		 */
296 		if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
297 			return page_address(sg_page(obj->mm.pages->sgl));
298 		pgprot = PAGE_KERNEL;
299 		break;
300 	case I915_MAP_WC:
301 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
302 		break;
303 	}
304 
305 	if (n_pages > ARRAY_SIZE(stack)) {
306 		/* Too big for stack -- allocate temporary array instead */
307 		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
308 		if (!pages)
309 			return ERR_PTR(-ENOMEM);
310 	}
311 
312 	i = 0;
313 	for_each_sgt_page(page, iter, obj->mm.pages)
314 		pages[i++] = page;
315 	vaddr = vmap(pages, n_pages, 0, pgprot);
316 	if (pages != stack)
317 		kvfree(pages);
318 
319 	return vaddr ?: ERR_PTR(-ENOMEM);
320 }
321 
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)322 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
323 				     enum i915_map_type type)
324 {
325 	resource_size_t iomap = obj->mm.region->iomap.base -
326 		obj->mm.region->region.start;
327 	unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
328 	unsigned long stack[32], *pfns = stack, i;
329 	struct sgt_iter iter;
330 	dma_addr_t addr;
331 	void *vaddr;
332 
333 	GEM_BUG_ON(type != I915_MAP_WC);
334 
335 	if (n_pfn > ARRAY_SIZE(stack)) {
336 		/* Too big for stack -- allocate temporary array instead */
337 		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
338 		if (!pfns)
339 			return ERR_PTR(-ENOMEM);
340 	}
341 
342 	i = 0;
343 	for_each_sgt_daddr(addr, iter, obj->mm.pages)
344 		pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
345 	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
346 	if (pfns != stack)
347 		kvfree(pfns);
348 
349 	return vaddr ?: ERR_PTR(-ENOMEM);
350 }
351 
352 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)353 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
354 			      enum i915_map_type type)
355 {
356 	enum i915_map_type has_type;
357 	bool pinned;
358 	void *ptr;
359 	int err;
360 
361 	if (!i915_gem_object_has_struct_page(obj) &&
362 	    !i915_gem_object_has_iomem(obj))
363 		return ERR_PTR(-ENXIO);
364 
365 	assert_object_held(obj);
366 
367 	pinned = !(type & I915_MAP_OVERRIDE);
368 	type &= ~I915_MAP_OVERRIDE;
369 
370 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
371 		if (unlikely(!i915_gem_object_has_pages(obj))) {
372 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
373 
374 			err = ____i915_gem_object_get_pages(obj);
375 			if (err)
376 				return ERR_PTR(err);
377 
378 			smp_mb__before_atomic();
379 		}
380 		atomic_inc(&obj->mm.pages_pin_count);
381 		pinned = false;
382 	}
383 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
384 
385 	/*
386 	 * For discrete our CPU mappings needs to be consistent in order to
387 	 * function correctly on !x86. When mapping things through TTM, we use
388 	 * the same rules to determine the caching type.
389 	 *
390 	 * The caching rules, starting from DG1:
391 	 *
392 	 *	- If the object can be placed in device local-memory, then the
393 	 *	  pages should be allocated and mapped as write-combined only.
394 	 *
395 	 *	- Everything else is always allocated and mapped as write-back,
396 	 *	  with the guarantee that everything is also coherent with the
397 	 *	  GPU.
398 	 *
399 	 * Internal users of lmem are already expected to get this right, so no
400 	 * fudging needed there.
401 	 */
402 	if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
403 		if (type != I915_MAP_WC && !obj->mm.n_placements) {
404 			ptr = ERR_PTR(-ENODEV);
405 			goto err_unpin;
406 		}
407 
408 		type = I915_MAP_WC;
409 	} else if (IS_DGFX(to_i915(obj->base.dev))) {
410 		type = I915_MAP_WB;
411 	}
412 
413 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
414 	if (ptr && has_type != type) {
415 		if (pinned) {
416 			ptr = ERR_PTR(-EBUSY);
417 			goto err_unpin;
418 		}
419 
420 		unmap_object(obj, ptr);
421 
422 		ptr = obj->mm.mapping = NULL;
423 	}
424 
425 	if (!ptr) {
426 		if (GEM_WARN_ON(type == I915_MAP_WC &&
427 				!static_cpu_has(X86_FEATURE_PAT)))
428 			ptr = ERR_PTR(-ENODEV);
429 		else if (i915_gem_object_has_struct_page(obj))
430 			ptr = i915_gem_object_map_page(obj, type);
431 		else
432 			ptr = i915_gem_object_map_pfn(obj, type);
433 		if (IS_ERR(ptr))
434 			goto err_unpin;
435 
436 		obj->mm.mapping = page_pack_bits(ptr, type);
437 	}
438 
439 	return ptr;
440 
441 err_unpin:
442 	atomic_dec(&obj->mm.pages_pin_count);
443 	return ptr;
444 }
445 
i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object * obj,enum i915_map_type type)446 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
447 				       enum i915_map_type type)
448 {
449 	void *ret;
450 
451 	i915_gem_object_lock(obj, NULL);
452 	ret = i915_gem_object_pin_map(obj, type);
453 	i915_gem_object_unlock(obj);
454 
455 	return ret;
456 }
457 
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)458 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
459 				 unsigned long offset,
460 				 unsigned long size)
461 {
462 	enum i915_map_type has_type;
463 	void *ptr;
464 
465 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
466 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
467 				     offset, size, obj->base.size));
468 
469 	wmb(); /* let all previous writes be visible to coherent partners */
470 	obj->mm.dirty = true;
471 
472 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
473 		return;
474 
475 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
476 	if (has_type == I915_MAP_WC)
477 		return;
478 
479 	drm_clflush_virt_range(ptr + offset, size);
480 	if (size == obj->base.size) {
481 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
482 		obj->cache_dirty = false;
483 	}
484 }
485 
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)486 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
487 {
488 	GEM_BUG_ON(!obj->mm.mapping);
489 
490 	/*
491 	 * We allow removing the mapping from underneath pinned pages!
492 	 *
493 	 * Furthermore, since this is an unsafe operation reserved only
494 	 * for construction time manipulation, we ignore locking prudence.
495 	 */
496 	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
497 
498 	i915_gem_object_unpin_map(obj);
499 }
500 
501 struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object * obj,struct i915_gem_object_page_iter * iter,unsigned int n,unsigned int * offset,bool dma)502 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
503 			 struct i915_gem_object_page_iter *iter,
504 			 unsigned int n,
505 			 unsigned int *offset,
506 			 bool dma)
507 {
508 	struct scatterlist *sg;
509 	unsigned int idx, count;
510 
511 	might_sleep();
512 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
513 	if (!i915_gem_object_has_pinned_pages(obj))
514 		assert_object_held(obj);
515 
516 	/* As we iterate forward through the sg, we record each entry in a
517 	 * radixtree for quick repeated (backwards) lookups. If we have seen
518 	 * this index previously, we will have an entry for it.
519 	 *
520 	 * Initial lookup is O(N), but this is amortized to O(1) for
521 	 * sequential page access (where each new request is consecutive
522 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
523 	 * i.e. O(1) with a large constant!
524 	 */
525 	if (n < READ_ONCE(iter->sg_idx))
526 		goto lookup;
527 
528 	mutex_lock(&iter->lock);
529 
530 	/* We prefer to reuse the last sg so that repeated lookup of this
531 	 * (or the subsequent) sg are fast - comparing against the last
532 	 * sg is faster than going through the radixtree.
533 	 */
534 
535 	sg = iter->sg_pos;
536 	idx = iter->sg_idx;
537 	count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
538 
539 	while (idx + count <= n) {
540 		void *entry;
541 		unsigned long i;
542 		int ret;
543 
544 		/* If we cannot allocate and insert this entry, or the
545 		 * individual pages from this range, cancel updating the
546 		 * sg_idx so that on this lookup we are forced to linearly
547 		 * scan onwards, but on future lookups we will try the
548 		 * insertion again (in which case we need to be careful of
549 		 * the error return reporting that we have already inserted
550 		 * this index).
551 		 */
552 		ret = radix_tree_insert(&iter->radix, idx, sg);
553 		if (ret && ret != -EEXIST)
554 			goto scan;
555 
556 		entry = xa_mk_value(idx);
557 		for (i = 1; i < count; i++) {
558 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
559 			if (ret && ret != -EEXIST)
560 				goto scan;
561 		}
562 
563 		idx += count;
564 		sg = ____sg_next(sg);
565 		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
566 	}
567 
568 scan:
569 	iter->sg_pos = sg;
570 	iter->sg_idx = idx;
571 
572 	mutex_unlock(&iter->lock);
573 
574 	if (unlikely(n < idx)) /* insertion completed by another thread */
575 		goto lookup;
576 
577 	/* In case we failed to insert the entry into the radixtree, we need
578 	 * to look beyond the current sg.
579 	 */
580 	while (idx + count <= n) {
581 		idx += count;
582 		sg = ____sg_next(sg);
583 		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
584 	}
585 
586 	*offset = n - idx;
587 	return sg;
588 
589 lookup:
590 	rcu_read_lock();
591 
592 	sg = radix_tree_lookup(&iter->radix, n);
593 	GEM_BUG_ON(!sg);
594 
595 	/* If this index is in the middle of multi-page sg entry,
596 	 * the radix tree will contain a value entry that points
597 	 * to the start of that range. We will return the pointer to
598 	 * the base page and the offset of this page within the
599 	 * sg entry's range.
600 	 */
601 	*offset = 0;
602 	if (unlikely(xa_is_value(sg))) {
603 		unsigned long base = xa_to_value(sg);
604 
605 		sg = radix_tree_lookup(&iter->radix, base);
606 		GEM_BUG_ON(!sg);
607 
608 		*offset = n - base;
609 	}
610 
611 	rcu_read_unlock();
612 
613 	return sg;
614 }
615 
616 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)617 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
618 {
619 	struct scatterlist *sg;
620 	unsigned int offset;
621 
622 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
623 
624 	sg = i915_gem_object_get_sg(obj, n, &offset);
625 	return nth_page(sg_page(sg), offset);
626 }
627 
628 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
629 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)630 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
631 			       unsigned int n)
632 {
633 	struct page *page;
634 
635 	page = i915_gem_object_get_page(obj, n);
636 	if (!obj->mm.dirty)
637 		set_page_dirty(page);
638 
639 	return page;
640 }
641 
642 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)643 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
644 				    unsigned long n,
645 				    unsigned int *len)
646 {
647 	struct scatterlist *sg;
648 	unsigned int offset;
649 
650 	sg = i915_gem_object_get_sg_dma(obj, n, &offset);
651 
652 	if (len)
653 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
654 
655 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
656 }
657 
658 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)659 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
660 				unsigned long n)
661 {
662 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
663 }
664