• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12 
13 #include "gt/intel_gt.h"
14 
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)15 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
16 				 struct sg_table *pages,
17 				 unsigned int sg_page_sizes)
18 {
19 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
20 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
21 	int i;
22 
23 	lockdep_assert_held(&obj->mm.lock);
24 
25 	if (i915_gem_object_is_volatile(obj))
26 		obj->mm.madv = I915_MADV_DONTNEED;
27 
28 	/* Make the pages coherent with the GPU (flushing any swapin). */
29 	if (obj->cache_dirty) {
30 		obj->write_domain = 0;
31 		if (i915_gem_object_has_struct_page(obj))
32 			drm_clflush_sg(pages);
33 		obj->cache_dirty = false;
34 	}
35 
36 	obj->mm.get_page.sg_pos = pages->sgl;
37 	obj->mm.get_page.sg_idx = 0;
38 
39 	obj->mm.pages = pages;
40 
41 	if (i915_gem_object_is_tiled(obj) &&
42 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
43 		GEM_BUG_ON(obj->mm.quirked);
44 		__i915_gem_object_pin_pages(obj);
45 		obj->mm.quirked = true;
46 	}
47 
48 	GEM_BUG_ON(!sg_page_sizes);
49 	obj->mm.page_sizes.phys = sg_page_sizes;
50 
51 	/*
52 	 * Calculate the supported page-sizes which fit into the given
53 	 * sg_page_sizes. This will give us the page-sizes which we may be able
54 	 * to use opportunistically when later inserting into the GTT. For
55 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
56 	 * 64K or 4K pages, although in practice this will depend on a number of
57 	 * other factors.
58 	 */
59 	obj->mm.page_sizes.sg = 0;
60 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
61 		if (obj->mm.page_sizes.phys & ~0u << i)
62 			obj->mm.page_sizes.sg |= BIT(i);
63 	}
64 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
65 
66 	if (i915_gem_object_is_shrinkable(obj)) {
67 		struct list_head *list;
68 		unsigned long flags;
69 
70 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
71 
72 		i915->mm.shrink_count++;
73 		i915->mm.shrink_memory += obj->base.size;
74 
75 		if (obj->mm.madv != I915_MADV_WILLNEED)
76 			list = &i915->mm.purge_list;
77 		else
78 			list = &i915->mm.shrink_list;
79 		list_add_tail(&obj->mm.link, list);
80 
81 		atomic_set(&obj->mm.shrink_pin, 0);
82 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
83 	}
84 }
85 
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)86 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
87 {
88 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
89 	int err;
90 
91 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
92 		drm_dbg(&i915->drm,
93 			"Attempting to obtain a purgeable object\n");
94 		return -EFAULT;
95 	}
96 
97 	err = obj->ops->get_pages(obj);
98 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
99 
100 	return err;
101 }
102 
103 /* Ensure that the associated pages are gathered from the backing storage
104  * and pinned into our object. i915_gem_object_pin_pages() may be called
105  * multiple times before they are released by a single call to
106  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
107  * either as a result of memory pressure (reaping pages under the shrinker)
108  * or as the object is itself released.
109  */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)110 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
111 {
112 	int err;
113 
114 	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
115 	if (err)
116 		return err;
117 
118 	if (unlikely(!i915_gem_object_has_pages(obj))) {
119 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
120 
121 		err = ____i915_gem_object_get_pages(obj);
122 		if (err)
123 			goto unlock;
124 
125 		smp_mb__before_atomic();
126 	}
127 	atomic_inc(&obj->mm.pages_pin_count);
128 
129 unlock:
130 	mutex_unlock(&obj->mm.lock);
131 	return err;
132 }
133 
134 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)135 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
136 {
137 	drm_gem_free_mmap_offset(&obj->base);
138 	if (obj->ops->truncate)
139 		obj->ops->truncate(obj);
140 }
141 
142 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)143 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
144 {
145 	lockdep_assert_held(&obj->mm.lock);
146 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
147 
148 	if (obj->ops->writeback)
149 		obj->ops->writeback(obj);
150 }
151 
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)152 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
153 {
154 	struct radix_tree_iter iter;
155 	void __rcu **slot;
156 
157 	rcu_read_lock();
158 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
159 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
160 	rcu_read_unlock();
161 }
162 
unmap_object(struct drm_i915_gem_object * obj,void * ptr)163 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
164 {
165 	if (is_vmalloc_addr(ptr))
166 		vunmap(ptr);
167 }
168 
169 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)170 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
171 {
172 	struct sg_table *pages;
173 
174 	pages = fetch_and_zero(&obj->mm.pages);
175 	if (IS_ERR_OR_NULL(pages))
176 		return pages;
177 
178 	if (i915_gem_object_is_volatile(obj))
179 		obj->mm.madv = I915_MADV_WILLNEED;
180 
181 	i915_gem_object_make_unshrinkable(obj);
182 
183 	if (obj->mm.mapping) {
184 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
185 		obj->mm.mapping = NULL;
186 	}
187 
188 	__i915_gem_object_reset_page_iter(obj);
189 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
190 
191 	if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
192 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
193 		intel_wakeref_t wakeref;
194 
195 		with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
196 			intel_gt_invalidate_tlbs(&i915->gt);
197 	}
198 
199 	return pages;
200 }
201 
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)202 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
203 {
204 	struct sg_table *pages;
205 	int err;
206 
207 	if (i915_gem_object_has_pinned_pages(obj))
208 		return -EBUSY;
209 
210 	/* May be called by shrinker from within get_pages() (on another bo) */
211 	mutex_lock(&obj->mm.lock);
212 	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
213 		err = -EBUSY;
214 		goto unlock;
215 	}
216 
217 	i915_gem_object_release_mmap_offset(obj);
218 
219 	/*
220 	 * ->put_pages might need to allocate memory for the bit17 swizzle
221 	 * array, hence protect them from being reaped by removing them from gtt
222 	 * lists early.
223 	 */
224 	pages = __i915_gem_object_unset_pages(obj);
225 
226 	/*
227 	 * XXX Temporary hijinx to avoid updating all backends to handle
228 	 * NULL pages. In the future, when we have more asynchronous
229 	 * get_pages backends we should be better able to handle the
230 	 * cancellation of the async task in a more uniform manner.
231 	 */
232 	if (!pages && !i915_gem_object_needs_async_cancel(obj))
233 		pages = ERR_PTR(-EINVAL);
234 
235 	if (!IS_ERR(pages))
236 		obj->ops->put_pages(obj, pages);
237 
238 	err = 0;
239 unlock:
240 	mutex_unlock(&obj->mm.lock);
241 
242 	return err;
243 }
244 
245 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)246 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
247 		enum i915_map_type type)
248 {
249 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
250 	struct page *stack[32], **pages = stack, *page;
251 	struct sgt_iter iter;
252 	pgprot_t pgprot;
253 	void *vaddr;
254 
255 	switch (type) {
256 	default:
257 		MISSING_CASE(type);
258 		fallthrough;	/* to use PAGE_KERNEL anyway */
259 	case I915_MAP_WB:
260 		/*
261 		 * On 32b, highmem using a finite set of indirect PTE (i.e.
262 		 * vmap) to provide virtual mappings of the high pages.
263 		 * As these are finite, map_new_virtual() must wait for some
264 		 * other kmap() to finish when it runs out. If we map a large
265 		 * number of objects, there is no method for it to tell us
266 		 * to release the mappings, and we deadlock.
267 		 *
268 		 * However, if we make an explicit vmap of the page, that
269 		 * uses a larger vmalloc arena, and also has the ability
270 		 * to tell us to release unwanted mappings. Most importantly,
271 		 * it will fail and propagate an error instead of waiting
272 		 * forever.
273 		 *
274 		 * So if the page is beyond the 32b boundary, make an explicit
275 		 * vmap.
276 		 */
277 		if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
278 			return page_address(sg_page(obj->mm.pages->sgl));
279 		pgprot = PAGE_KERNEL;
280 		break;
281 	case I915_MAP_WC:
282 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
283 		break;
284 	}
285 
286 	if (n_pages > ARRAY_SIZE(stack)) {
287 		/* Too big for stack -- allocate temporary array instead */
288 		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
289 		if (!pages)
290 			return NULL;
291 	}
292 
293 	i = 0;
294 	for_each_sgt_page(page, iter, obj->mm.pages)
295 		pages[i++] = page;
296 	vaddr = vmap(pages, n_pages, 0, pgprot);
297 	if (pages != stack)
298 		kvfree(pages);
299 	return vaddr;
300 }
301 
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)302 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
303 		enum i915_map_type type)
304 {
305 	resource_size_t iomap = obj->mm.region->iomap.base -
306 		obj->mm.region->region.start;
307 	unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
308 	unsigned long stack[32], *pfns = stack, i;
309 	struct sgt_iter iter;
310 	dma_addr_t addr;
311 	void *vaddr;
312 
313 	if (type != I915_MAP_WC)
314 		return NULL;
315 
316 	if (n_pfn > ARRAY_SIZE(stack)) {
317 		/* Too big for stack -- allocate temporary array instead */
318 		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
319 		if (!pfns)
320 			return NULL;
321 	}
322 
323 	i = 0;
324 	for_each_sgt_daddr(addr, iter, obj->mm.pages)
325 		pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
326 	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
327 	if (pfns != stack)
328 		kvfree(pfns);
329 	return vaddr;
330 }
331 
332 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)333 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
334 			      enum i915_map_type type)
335 {
336 	enum i915_map_type has_type;
337 	unsigned int flags;
338 	bool pinned;
339 	void *ptr;
340 	int err;
341 
342 	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
343 	if (!i915_gem_object_type_has(obj, flags))
344 		return ERR_PTR(-ENXIO);
345 
346 	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
347 	if (err)
348 		return ERR_PTR(err);
349 
350 	pinned = !(type & I915_MAP_OVERRIDE);
351 	type &= ~I915_MAP_OVERRIDE;
352 
353 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
354 		if (unlikely(!i915_gem_object_has_pages(obj))) {
355 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
356 
357 			err = ____i915_gem_object_get_pages(obj);
358 			if (err)
359 				goto err_unlock;
360 
361 			smp_mb__before_atomic();
362 		}
363 		atomic_inc(&obj->mm.pages_pin_count);
364 		pinned = false;
365 	}
366 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
367 
368 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
369 	if (ptr && has_type != type) {
370 		if (pinned) {
371 			err = -EBUSY;
372 			goto err_unpin;
373 		}
374 
375 		unmap_object(obj, ptr);
376 
377 		ptr = obj->mm.mapping = NULL;
378 	}
379 
380 	if (!ptr) {
381 		if (GEM_WARN_ON(type == I915_MAP_WC &&
382 				!static_cpu_has(X86_FEATURE_PAT)))
383 			ptr = NULL;
384 		else if (i915_gem_object_has_struct_page(obj))
385 			ptr = i915_gem_object_map_page(obj, type);
386 		else
387 			ptr = i915_gem_object_map_pfn(obj, type);
388 		if (!ptr) {
389 			err = -ENOMEM;
390 			goto err_unpin;
391 		}
392 
393 		obj->mm.mapping = page_pack_bits(ptr, type);
394 	}
395 
396 out_unlock:
397 	mutex_unlock(&obj->mm.lock);
398 	return ptr;
399 
400 err_unpin:
401 	atomic_dec(&obj->mm.pages_pin_count);
402 err_unlock:
403 	ptr = ERR_PTR(err);
404 	goto out_unlock;
405 }
406 
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)407 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
408 				 unsigned long offset,
409 				 unsigned long size)
410 {
411 	enum i915_map_type has_type;
412 	void *ptr;
413 
414 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
415 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
416 				     offset, size, obj->base.size));
417 
418 	wmb(); /* let all previous writes be visible to coherent partners */
419 	obj->mm.dirty = true;
420 
421 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
422 		return;
423 
424 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
425 	if (has_type == I915_MAP_WC)
426 		return;
427 
428 	drm_clflush_virt_range(ptr + offset, size);
429 	if (size == obj->base.size) {
430 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
431 		obj->cache_dirty = false;
432 	}
433 }
434 
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)435 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
436 {
437 	GEM_BUG_ON(!obj->mm.mapping);
438 
439 	/*
440 	 * We allow removing the mapping from underneath pinned pages!
441 	 *
442 	 * Furthermore, since this is an unsafe operation reserved only
443 	 * for construction time manipulation, we ignore locking prudence.
444 	 */
445 	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
446 
447 	i915_gem_object_unpin_map(obj);
448 }
449 
450 struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object * obj,unsigned int n,unsigned int * offset)451 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
452 		       unsigned int n,
453 		       unsigned int *offset)
454 {
455 	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
456 	struct scatterlist *sg;
457 	unsigned int idx, count;
458 
459 	might_sleep();
460 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
461 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
462 
463 	/* As we iterate forward through the sg, we record each entry in a
464 	 * radixtree for quick repeated (backwards) lookups. If we have seen
465 	 * this index previously, we will have an entry for it.
466 	 *
467 	 * Initial lookup is O(N), but this is amortized to O(1) for
468 	 * sequential page access (where each new request is consecutive
469 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
470 	 * i.e. O(1) with a large constant!
471 	 */
472 	if (n < READ_ONCE(iter->sg_idx))
473 		goto lookup;
474 
475 	mutex_lock(&iter->lock);
476 
477 	/* We prefer to reuse the last sg so that repeated lookup of this
478 	 * (or the subsequent) sg are fast - comparing against the last
479 	 * sg is faster than going through the radixtree.
480 	 */
481 
482 	sg = iter->sg_pos;
483 	idx = iter->sg_idx;
484 	count = __sg_page_count(sg);
485 
486 	while (idx + count <= n) {
487 		void *entry;
488 		unsigned long i;
489 		int ret;
490 
491 		/* If we cannot allocate and insert this entry, or the
492 		 * individual pages from this range, cancel updating the
493 		 * sg_idx so that on this lookup we are forced to linearly
494 		 * scan onwards, but on future lookups we will try the
495 		 * insertion again (in which case we need to be careful of
496 		 * the error return reporting that we have already inserted
497 		 * this index).
498 		 */
499 		ret = radix_tree_insert(&iter->radix, idx, sg);
500 		if (ret && ret != -EEXIST)
501 			goto scan;
502 
503 		entry = xa_mk_value(idx);
504 		for (i = 1; i < count; i++) {
505 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
506 			if (ret && ret != -EEXIST)
507 				goto scan;
508 		}
509 
510 		idx += count;
511 		sg = ____sg_next(sg);
512 		count = __sg_page_count(sg);
513 	}
514 
515 scan:
516 	iter->sg_pos = sg;
517 	iter->sg_idx = idx;
518 
519 	mutex_unlock(&iter->lock);
520 
521 	if (unlikely(n < idx)) /* insertion completed by another thread */
522 		goto lookup;
523 
524 	/* In case we failed to insert the entry into the radixtree, we need
525 	 * to look beyond the current sg.
526 	 */
527 	while (idx + count <= n) {
528 		idx += count;
529 		sg = ____sg_next(sg);
530 		count = __sg_page_count(sg);
531 	}
532 
533 	*offset = n - idx;
534 	return sg;
535 
536 lookup:
537 	rcu_read_lock();
538 
539 	sg = radix_tree_lookup(&iter->radix, n);
540 	GEM_BUG_ON(!sg);
541 
542 	/* If this index is in the middle of multi-page sg entry,
543 	 * the radix tree will contain a value entry that points
544 	 * to the start of that range. We will return the pointer to
545 	 * the base page and the offset of this page within the
546 	 * sg entry's range.
547 	 */
548 	*offset = 0;
549 	if (unlikely(xa_is_value(sg))) {
550 		unsigned long base = xa_to_value(sg);
551 
552 		sg = radix_tree_lookup(&iter->radix, base);
553 		GEM_BUG_ON(!sg);
554 
555 		*offset = n - base;
556 	}
557 
558 	rcu_read_unlock();
559 
560 	return sg;
561 }
562 
563 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)564 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
565 {
566 	struct scatterlist *sg;
567 	unsigned int offset;
568 
569 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
570 
571 	sg = i915_gem_object_get_sg(obj, n, &offset);
572 	return nth_page(sg_page(sg), offset);
573 }
574 
575 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
576 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)577 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
578 			       unsigned int n)
579 {
580 	struct page *page;
581 
582 	page = i915_gem_object_get_page(obj, n);
583 	if (!obj->mm.dirty)
584 		set_page_dirty(page);
585 
586 	return page;
587 }
588 
589 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)590 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
591 				    unsigned long n,
592 				    unsigned int *len)
593 {
594 	struct scatterlist *sg;
595 	unsigned int offset;
596 
597 	sg = i915_gem_object_get_sg(obj, n, &offset);
598 
599 	if (len)
600 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
601 
602 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
603 }
604 
605 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)606 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
607 				unsigned long n)
608 {
609 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
610 }
611