• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 
29 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
30 
31 /* convert swiotlb segment size into sensible units (pages)! */
32 #define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
33 
internal_free_pages(struct sg_table * st)34 static void internal_free_pages(struct sg_table *st)
35 {
36 	struct scatterlist *sg;
37 
38 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
39 		if (sg_page(sg))
40 			__free_pages(sg_page(sg), get_order(sg->length));
41 	}
42 
43 	sg_free_table(st);
44 	kfree(st);
45 }
46 
47 static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object * obj)48 i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
49 {
50 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
51 	struct sg_table *st;
52 	struct scatterlist *sg;
53 	unsigned int npages;
54 	int max_order;
55 	gfp_t gfp;
56 
57 	max_order = MAX_ORDER;
58 #ifdef CONFIG_SWIOTLB
59 	if (swiotlb_nr_tbl()) {
60 		unsigned int max_segment;
61 
62 		max_segment = swiotlb_max_segment();
63 		if (max_segment) {
64 			max_segment = max_t(unsigned int, max_segment,
65 					    PAGE_SIZE) >> PAGE_SHIFT;
66 			max_order = min(max_order, ilog2(max_segment));
67 		}
68 	}
69 #endif
70 
71 	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
72 	if (IS_I965GM(i915) || IS_I965G(i915)) {
73 		/* 965gm cannot relocate objects above 4GiB. */
74 		gfp &= ~__GFP_HIGHMEM;
75 		gfp |= __GFP_DMA32;
76 	}
77 
78 create_st:
79 	st = kmalloc(sizeof(*st), GFP_KERNEL);
80 	if (!st)
81 		return ERR_PTR(-ENOMEM);
82 
83 	npages = obj->base.size / PAGE_SIZE;
84 	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
85 		kfree(st);
86 		return ERR_PTR(-ENOMEM);
87 	}
88 
89 	sg = st->sgl;
90 	st->nents = 0;
91 
92 	do {
93 		int order = min(fls(npages) - 1, max_order);
94 		struct page *page;
95 
96 		do {
97 			page = alloc_pages(gfp | (order ? QUIET : 0), order);
98 			if (page)
99 				break;
100 			if (!order--)
101 				goto err;
102 
103 			/* Limit subsequent allocations as well */
104 			max_order = order;
105 		} while (1);
106 
107 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
108 		st->nents++;
109 
110 		npages -= 1 << order;
111 		if (!npages) {
112 			sg_mark_end(sg);
113 			break;
114 		}
115 
116 		sg = __sg_next(sg);
117 	} while (1);
118 
119 	if (i915_gem_gtt_prepare_pages(obj, st)) {
120 		/* Failed to dma-map try again with single page sg segments */
121 		if (get_order(st->sgl->length)) {
122 			internal_free_pages(st);
123 			max_order = 0;
124 			goto create_st;
125 		}
126 		goto err;
127 	}
128 
129 	/* Mark the pages as dontneed whilst they are still pinned. As soon
130 	 * as they are unpinned they are allowed to be reaped by the shrinker,
131 	 * and the caller is expected to repopulate - the contents of this
132 	 * object are only valid whilst active and pinned.
133 	 */
134 	obj->mm.madv = I915_MADV_DONTNEED;
135 	return st;
136 
137 err:
138 	sg_set_page(sg, NULL, 0, 0);
139 	sg_mark_end(sg);
140 	internal_free_pages(st);
141 	return ERR_PTR(-ENOMEM);
142 }
143 
i915_gem_object_put_pages_internal(struct drm_i915_gem_object * obj,struct sg_table * pages)144 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
145 					       struct sg_table *pages)
146 {
147 	i915_gem_gtt_finish_pages(obj, pages);
148 	internal_free_pages(pages);
149 
150 	obj->mm.dirty = false;
151 	obj->mm.madv = I915_MADV_WILLNEED;
152 }
153 
154 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
155 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
156 		 I915_GEM_OBJECT_IS_SHRINKABLE,
157 	.get_pages = i915_gem_object_get_pages_internal,
158 	.put_pages = i915_gem_object_put_pages_internal,
159 };
160 
161 /**
162  * Creates a new object that wraps some internal memory for private use.
163  * This object is not backed by swappable storage, and as such its contents
164  * are volatile and only valid whilst pinned. If the object is reaped by the
165  * shrinker, its pages and data will be discarded. Equally, it is not a full
166  * GEM object and so not valid for access from userspace. This makes it useful
167  * for hardware interfaces like ringbuffers (which are pinned from the time
168  * the request is written to the time the hardware stops accessing it), but
169  * not for contexts (which need to be preserved when not active for later
170  * reuse). Note that it is not cleared upon allocation.
171  */
172 struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private * i915,phys_addr_t size)173 i915_gem_object_create_internal(struct drm_i915_private *i915,
174 				phys_addr_t size)
175 {
176 	struct drm_i915_gem_object *obj;
177 	unsigned int cache_level;
178 
179 	GEM_BUG_ON(!size);
180 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
181 
182 	if (overflows_type(size, obj->base.size))
183 		return ERR_PTR(-E2BIG);
184 
185 	obj = i915_gem_object_alloc(i915);
186 	if (!obj)
187 		return ERR_PTR(-ENOMEM);
188 
189 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
190 	i915_gem_object_init(obj, &i915_gem_object_internal_ops);
191 
192 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
193 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
194 
195 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
196 	i915_gem_object_set_cache_coherency(obj, cache_level);
197 
198 	return obj;
199 }
200