• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_stolen.h"
16 #include "i915_vgpu.h"
17 
18 /*
19  * The BIOS typically reserves some of the system's memory for the exclusive
20  * use of the integrated graphics. This memory is no longer available for
21  * use by the OS and so the user finds that his system has less memory
22  * available than he put in. We refer to this memory as stolen.
23  *
24  * The BIOS will allocate its framebuffer from the stolen memory. Our
25  * goal is try to reuse that object for our own fbcon which must always
26  * be available for panics. Anything else we can reuse the stolen memory
27  * for is a boon.
28  */
29 
i915_gem_stolen_insert_node_in_range(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment,u64 start,u64 end)30 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
31 					 struct drm_mm_node *node, u64 size,
32 					 unsigned alignment, u64 start, u64 end)
33 {
34 	int ret;
35 
36 	if (!drm_mm_initialized(&i915->mm.stolen))
37 		return -ENODEV;
38 
39 	/* WaSkipStolenMemoryFirstPage:bdw+ */
40 	if (INTEL_GEN(i915) >= 8 && start < 4096)
41 		start = 4096;
42 
43 	mutex_lock(&i915->mm.stolen_lock);
44 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
45 					  size, alignment, 0,
46 					  start, end, DRM_MM_INSERT_BEST);
47 	mutex_unlock(&i915->mm.stolen_lock);
48 
49 	return ret;
50 }
51 
i915_gem_stolen_insert_node(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment)52 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
53 				struct drm_mm_node *node, u64 size,
54 				unsigned alignment)
55 {
56 	return i915_gem_stolen_insert_node_in_range(i915, node,
57 						    size, alignment,
58 						    I915_GEM_STOLEN_BIAS,
59 						    U64_MAX);
60 }
61 
i915_gem_stolen_remove_node(struct drm_i915_private * i915,struct drm_mm_node * node)62 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
63 				 struct drm_mm_node *node)
64 {
65 	mutex_lock(&i915->mm.stolen_lock);
66 	drm_mm_remove_node(node);
67 	mutex_unlock(&i915->mm.stolen_lock);
68 }
69 
i915_adjust_stolen(struct drm_i915_private * i915,struct resource * dsm)70 static int i915_adjust_stolen(struct drm_i915_private *i915,
71 			      struct resource *dsm)
72 {
73 	struct i915_ggtt *ggtt = &i915->ggtt;
74 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
75 	struct resource *r;
76 
77 	if (dsm->start == 0 || dsm->end <= dsm->start)
78 		return -EINVAL;
79 
80 	/*
81 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
82 	 * end of stolen. With that assumption we could simplify this.
83 	 */
84 
85 	/* Make sure we don't clobber the GTT if it's within stolen memory */
86 	if (INTEL_GEN(i915) <= 4 &&
87 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
88 		struct resource stolen[2] = {*dsm, *dsm};
89 		struct resource ggtt_res;
90 		resource_size_t ggtt_start;
91 
92 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
93 		if (IS_GEN(i915, 4))
94 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
95 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
96 		else
97 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
98 
99 		ggtt_res =
100 			(struct resource) DEFINE_RES_MEM(ggtt_start,
101 							 ggtt_total_entries(ggtt) * 4);
102 
103 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
104 			stolen[0].end = ggtt_res.start;
105 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
106 			stolen[1].start = ggtt_res.end;
107 
108 		/* Pick the larger of the two chunks */
109 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
110 			*dsm = stolen[0];
111 		else
112 			*dsm = stolen[1];
113 
114 		if (stolen[0].start != stolen[1].start ||
115 		    stolen[0].end != stolen[1].end) {
116 			drm_dbg(&i915->drm,
117 				"GTT within stolen memory at %pR\n",
118 				&ggtt_res);
119 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
120 				dsm);
121 		}
122 	}
123 
124 	/*
125 	 * Verify that nothing else uses this physical address. Stolen
126 	 * memory should be reserved by the BIOS and hidden from the
127 	 * kernel. So if the region is already marked as busy, something
128 	 * is seriously wrong.
129 	 */
130 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
131 				    resource_size(dsm),
132 				    "Graphics Stolen Memory");
133 	if (r == NULL) {
134 		/*
135 		 * One more attempt but this time requesting region from
136 		 * start + 1, as we have seen that this resolves the region
137 		 * conflict with the PCI Bus.
138 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
139 		 * PCI bus, but have an off-by-one error. Hence retry the
140 		 * reservation starting from 1 instead of 0.
141 		 * There's also BIOS with off-by-one on the other end.
142 		 */
143 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
144 					    resource_size(dsm) - 2,
145 					    "Graphics Stolen Memory");
146 		/*
147 		 * GEN3 firmware likes to smash pci bridges into the stolen
148 		 * range. Apparently this works.
149 		 */
150 		if (!r && !IS_GEN(i915, 3)) {
151 			drm_err(&i915->drm,
152 				"conflict detected with stolen region: %pR\n",
153 				dsm);
154 
155 			return -EBUSY;
156 		}
157 	}
158 
159 	return 0;
160 }
161 
i915_gem_cleanup_stolen(struct drm_i915_private * i915)162 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
163 {
164 	if (!drm_mm_initialized(&i915->mm.stolen))
165 		return;
166 
167 	drm_mm_takedown(&i915->mm.stolen);
168 }
169 
g4x_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)170 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
171 				    struct intel_uncore *uncore,
172 				    resource_size_t *base,
173 				    resource_size_t *size)
174 {
175 	u32 reg_val = intel_uncore_read(uncore,
176 					IS_GM45(i915) ?
177 					CTG_STOLEN_RESERVED :
178 					ELK_STOLEN_RESERVED);
179 	resource_size_t stolen_top = i915->dsm.end + 1;
180 
181 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
182 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
183 
184 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
185 		return;
186 
187 	/*
188 	 * Whether ILK really reuses the ELK register for this is unclear.
189 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
190 	 */
191 	drm_WARN(&i915->drm, IS_GEN(i915, 5),
192 		 "ILK stolen reserved found? 0x%08x\n",
193 		 reg_val);
194 
195 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
196 		return;
197 
198 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
199 	drm_WARN_ON(&i915->drm,
200 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
201 
202 	*size = stolen_top - *base;
203 }
204 
gen6_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)205 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
206 				     struct intel_uncore *uncore,
207 				     resource_size_t *base,
208 				     resource_size_t *size)
209 {
210 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
211 
212 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
213 
214 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
215 		return;
216 
217 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
218 
219 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
220 	case GEN6_STOLEN_RESERVED_1M:
221 		*size = 1024 * 1024;
222 		break;
223 	case GEN6_STOLEN_RESERVED_512K:
224 		*size = 512 * 1024;
225 		break;
226 	case GEN6_STOLEN_RESERVED_256K:
227 		*size = 256 * 1024;
228 		break;
229 	case GEN6_STOLEN_RESERVED_128K:
230 		*size = 128 * 1024;
231 		break;
232 	default:
233 		*size = 1024 * 1024;
234 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
235 	}
236 }
237 
vlv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)238 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
239 				    struct intel_uncore *uncore,
240 				    resource_size_t *base,
241 				    resource_size_t *size)
242 {
243 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
244 	resource_size_t stolen_top = i915->dsm.end + 1;
245 
246 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
247 
248 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
249 		return;
250 
251 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
252 	default:
253 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
254 		fallthrough;
255 	case GEN7_STOLEN_RESERVED_1M:
256 		*size = 1024 * 1024;
257 		break;
258 	}
259 
260 	/*
261 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
262 	 * reserved location as (top - size).
263 	 */
264 	*base = stolen_top - *size;
265 }
266 
gen7_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)267 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
268 				     struct intel_uncore *uncore,
269 				     resource_size_t *base,
270 				     resource_size_t *size)
271 {
272 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
273 
274 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
275 
276 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
277 		return;
278 
279 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
280 
281 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
282 	case GEN7_STOLEN_RESERVED_1M:
283 		*size = 1024 * 1024;
284 		break;
285 	case GEN7_STOLEN_RESERVED_256K:
286 		*size = 256 * 1024;
287 		break;
288 	default:
289 		*size = 1024 * 1024;
290 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
291 	}
292 }
293 
chv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)294 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
295 				    struct intel_uncore *uncore,
296 				    resource_size_t *base,
297 				    resource_size_t *size)
298 {
299 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
300 
301 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
302 
303 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
304 		return;
305 
306 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
307 
308 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
309 	case GEN8_STOLEN_RESERVED_1M:
310 		*size = 1024 * 1024;
311 		break;
312 	case GEN8_STOLEN_RESERVED_2M:
313 		*size = 2 * 1024 * 1024;
314 		break;
315 	case GEN8_STOLEN_RESERVED_4M:
316 		*size = 4 * 1024 * 1024;
317 		break;
318 	case GEN8_STOLEN_RESERVED_8M:
319 		*size = 8 * 1024 * 1024;
320 		break;
321 	default:
322 		*size = 8 * 1024 * 1024;
323 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
324 	}
325 }
326 
bdw_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)327 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
328 				    struct intel_uncore *uncore,
329 				    resource_size_t *base,
330 				    resource_size_t *size)
331 {
332 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
333 	resource_size_t stolen_top = i915->dsm.end + 1;
334 
335 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
336 
337 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
338 		return;
339 
340 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
341 		return;
342 
343 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
344 	*size = stolen_top - *base;
345 }
346 
icl_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)347 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
348 				    struct intel_uncore *uncore,
349 				    resource_size_t *base,
350 				    resource_size_t *size)
351 {
352 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
353 
354 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
355 
356 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
357 
358 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
359 	case GEN8_STOLEN_RESERVED_1M:
360 		*size = 1024 * 1024;
361 		break;
362 	case GEN8_STOLEN_RESERVED_2M:
363 		*size = 2 * 1024 * 1024;
364 		break;
365 	case GEN8_STOLEN_RESERVED_4M:
366 		*size = 4 * 1024 * 1024;
367 		break;
368 	case GEN8_STOLEN_RESERVED_8M:
369 		*size = 8 * 1024 * 1024;
370 		break;
371 	default:
372 		*size = 8 * 1024 * 1024;
373 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
374 	}
375 }
376 
i915_gem_init_stolen(struct drm_i915_private * i915)377 static int i915_gem_init_stolen(struct drm_i915_private *i915)
378 {
379 	struct intel_uncore *uncore = &i915->uncore;
380 	resource_size_t reserved_base, stolen_top;
381 	resource_size_t reserved_total, reserved_size;
382 
383 	mutex_init(&i915->mm.stolen_lock);
384 
385 	if (intel_vgpu_active(i915)) {
386 		drm_notice(&i915->drm,
387 			   "%s, disabling use of stolen memory\n",
388 			   "iGVT-g active");
389 		return 0;
390 	}
391 
392 	if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
393 		drm_notice(&i915->drm,
394 			   "%s, disabling use of stolen memory\n",
395 			   "DMAR active");
396 		return 0;
397 	}
398 
399 	if (resource_size(&intel_graphics_stolen_res) == 0)
400 		return 0;
401 
402 	i915->dsm = intel_graphics_stolen_res;
403 
404 	if (i915_adjust_stolen(i915, &i915->dsm))
405 		return 0;
406 
407 	GEM_BUG_ON(i915->dsm.start == 0);
408 	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
409 
410 	stolen_top = i915->dsm.end + 1;
411 	reserved_base = stolen_top;
412 	reserved_size = 0;
413 
414 	switch (INTEL_GEN(i915)) {
415 	case 2:
416 	case 3:
417 		break;
418 	case 4:
419 		if (!IS_G4X(i915))
420 			break;
421 		fallthrough;
422 	case 5:
423 		g4x_get_stolen_reserved(i915, uncore,
424 					&reserved_base, &reserved_size);
425 		break;
426 	case 6:
427 		gen6_get_stolen_reserved(i915, uncore,
428 					 &reserved_base, &reserved_size);
429 		break;
430 	case 7:
431 		if (IS_VALLEYVIEW(i915))
432 			vlv_get_stolen_reserved(i915, uncore,
433 						&reserved_base, &reserved_size);
434 		else
435 			gen7_get_stolen_reserved(i915, uncore,
436 						 &reserved_base, &reserved_size);
437 		break;
438 	case 8:
439 	case 9:
440 	case 10:
441 		if (IS_LP(i915))
442 			chv_get_stolen_reserved(i915, uncore,
443 						&reserved_base, &reserved_size);
444 		else
445 			bdw_get_stolen_reserved(i915, uncore,
446 						&reserved_base, &reserved_size);
447 		break;
448 	default:
449 		MISSING_CASE(INTEL_GEN(i915));
450 		fallthrough;
451 	case 11:
452 	case 12:
453 		icl_get_stolen_reserved(i915, uncore,
454 					&reserved_base,
455 					&reserved_size);
456 		break;
457 	}
458 
459 	/*
460 	 * Our expectation is that the reserved space is at the top of the
461 	 * stolen region and *never* at the bottom. If we see !reserved_base,
462 	 * it likely means we failed to read the registers correctly.
463 	 */
464 	if (!reserved_base) {
465 		drm_err(&i915->drm,
466 			"inconsistent reservation %pa + %pa; ignoring\n",
467 			&reserved_base, &reserved_size);
468 		reserved_base = stolen_top;
469 		reserved_size = 0;
470 	}
471 
472 	i915->dsm_reserved =
473 		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
474 
475 	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
476 		drm_err(&i915->drm,
477 			"Stolen reserved area %pR outside stolen memory %pR\n",
478 			&i915->dsm_reserved, &i915->dsm);
479 		return 0;
480 	}
481 
482 	/* It is possible for the reserved area to end before the end of stolen
483 	 * memory, so just consider the start. */
484 	reserved_total = stolen_top - reserved_base;
485 
486 	drm_dbg(&i915->drm,
487 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
488 		(u64)resource_size(&i915->dsm) >> 10,
489 		((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
490 
491 	i915->stolen_usable_size =
492 		resource_size(&i915->dsm) - reserved_total;
493 
494 	/* Basic memrange allocator for stolen space. */
495 	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
496 
497 	return 0;
498 }
499 
500 static struct sg_table *
i915_pages_create_for_stolen(struct drm_device * dev,resource_size_t offset,resource_size_t size)501 i915_pages_create_for_stolen(struct drm_device *dev,
502 			     resource_size_t offset, resource_size_t size)
503 {
504 	struct drm_i915_private *i915 = to_i915(dev);
505 	struct sg_table *st;
506 	struct scatterlist *sg;
507 
508 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
509 
510 	/* We hide that we have no struct page backing our stolen object
511 	 * by wrapping the contiguous physical allocation with a fake
512 	 * dma mapping in a single scatterlist.
513 	 */
514 
515 	st = kmalloc(sizeof(*st), GFP_KERNEL);
516 	if (st == NULL)
517 		return ERR_PTR(-ENOMEM);
518 
519 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
520 		kfree(st);
521 		return ERR_PTR(-ENOMEM);
522 	}
523 
524 	sg = st->sgl;
525 	sg->offset = 0;
526 	sg->length = size;
527 
528 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
529 	sg_dma_len(sg) = size;
530 
531 	return st;
532 }
533 
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object * obj)534 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
535 {
536 	struct sg_table *pages =
537 		i915_pages_create_for_stolen(obj->base.dev,
538 					     obj->stolen->start,
539 					     obj->stolen->size);
540 	if (IS_ERR(pages))
541 		return PTR_ERR(pages);
542 
543 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
544 
545 	return 0;
546 }
547 
i915_gem_object_put_pages_stolen(struct drm_i915_gem_object * obj,struct sg_table * pages)548 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
549 					     struct sg_table *pages)
550 {
551 	/* Should only be called from i915_gem_object_release_stolen() */
552 	sg_free_table(pages);
553 	kfree(pages);
554 }
555 
556 static void
i915_gem_object_release_stolen(struct drm_i915_gem_object * obj)557 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
558 {
559 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
560 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
561 
562 	GEM_BUG_ON(!stolen);
563 
564 	i915_gem_object_release_memory_region(obj);
565 
566 	i915_gem_stolen_remove_node(i915, stolen);
567 	kfree(stolen);
568 }
569 
570 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
571 	.name = "i915_gem_object_stolen",
572 	.get_pages = i915_gem_object_get_pages_stolen,
573 	.put_pages = i915_gem_object_put_pages_stolen,
574 	.release = i915_gem_object_release_stolen,
575 };
576 
577 static struct drm_i915_gem_object *
__i915_gem_object_create_stolen(struct intel_memory_region * mem,struct drm_mm_node * stolen)578 __i915_gem_object_create_stolen(struct intel_memory_region *mem,
579 				struct drm_mm_node *stolen)
580 {
581 	static struct lock_class_key lock_class;
582 	struct drm_i915_gem_object *obj;
583 	unsigned int cache_level;
584 	int err = -ENOMEM;
585 
586 	obj = i915_gem_object_alloc();
587 	if (!obj)
588 		goto err;
589 
590 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
591 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
592 
593 	obj->stolen = stolen;
594 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
595 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
596 	i915_gem_object_set_cache_coherency(obj, cache_level);
597 
598 	err = i915_gem_object_pin_pages(obj);
599 	if (err)
600 		goto cleanup;
601 
602 	i915_gem_object_init_memory_region(obj, mem, 0);
603 
604 	return obj;
605 
606 cleanup:
607 	i915_gem_object_free(obj);
608 err:
609 	return ERR_PTR(err);
610 }
611 
612 static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct intel_memory_region * mem,resource_size_t size,unsigned int flags)613 _i915_gem_object_create_stolen(struct intel_memory_region *mem,
614 			       resource_size_t size,
615 			       unsigned int flags)
616 {
617 	struct drm_i915_private *i915 = mem->i915;
618 	struct drm_i915_gem_object *obj;
619 	struct drm_mm_node *stolen;
620 	int ret;
621 
622 	if (!drm_mm_initialized(&i915->mm.stolen))
623 		return ERR_PTR(-ENODEV);
624 
625 	if (size == 0)
626 		return ERR_PTR(-EINVAL);
627 
628 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
629 	if (!stolen)
630 		return ERR_PTR(-ENOMEM);
631 
632 	ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
633 	if (ret) {
634 		obj = ERR_PTR(ret);
635 		goto err_free;
636 	}
637 
638 	obj = __i915_gem_object_create_stolen(mem, stolen);
639 	if (IS_ERR(obj))
640 		goto err_remove;
641 
642 	return obj;
643 
644 err_remove:
645 	i915_gem_stolen_remove_node(i915, stolen);
646 err_free:
647 	kfree(stolen);
648 	return obj;
649 }
650 
651 struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private * i915,resource_size_t size)652 i915_gem_object_create_stolen(struct drm_i915_private *i915,
653 			      resource_size_t size)
654 {
655 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
656 					     size, I915_BO_ALLOC_CONTIGUOUS);
657 }
658 
init_stolen(struct intel_memory_region * mem)659 static int init_stolen(struct intel_memory_region *mem)
660 {
661 	intel_memory_region_set_name(mem, "stolen");
662 
663 	/*
664 	 * Initialise stolen early so that we may reserve preallocated
665 	 * objects for the BIOS to KMS transition.
666 	 */
667 	return i915_gem_init_stolen(mem->i915);
668 }
669 
release_stolen(struct intel_memory_region * mem)670 static void release_stolen(struct intel_memory_region *mem)
671 {
672 	i915_gem_cleanup_stolen(mem->i915);
673 }
674 
675 static const struct intel_memory_region_ops i915_region_stolen_ops = {
676 	.init = init_stolen,
677 	.release = release_stolen,
678 	.create_object = _i915_gem_object_create_stolen,
679 };
680 
i915_gem_stolen_setup(struct drm_i915_private * i915)681 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
682 {
683 	return intel_memory_region_create(i915,
684 					  intel_graphics_stolen_res.start,
685 					  resource_size(&intel_graphics_stolen_res),
686 					  PAGE_SIZE, 0,
687 					  &i915_region_stolen_ops);
688 }
689 
690 struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private * i915,resource_size_t stolen_offset,resource_size_t size)691 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
692 					       resource_size_t stolen_offset,
693 					       resource_size_t size)
694 {
695 	struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
696 	struct drm_i915_gem_object *obj;
697 	struct drm_mm_node *stolen;
698 	int ret;
699 
700 	if (!drm_mm_initialized(&i915->mm.stolen))
701 		return ERR_PTR(-ENODEV);
702 
703 	drm_dbg(&i915->drm,
704 		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
705 		&stolen_offset, &size);
706 
707 	/* KISS and expect everything to be page-aligned */
708 	if (GEM_WARN_ON(size == 0) ||
709 	    GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
710 	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
711 		return ERR_PTR(-EINVAL);
712 
713 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
714 	if (!stolen)
715 		return ERR_PTR(-ENOMEM);
716 
717 	stolen->start = stolen_offset;
718 	stolen->size = size;
719 	mutex_lock(&i915->mm.stolen_lock);
720 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
721 	mutex_unlock(&i915->mm.stolen_lock);
722 	if (ret) {
723 		obj = ERR_PTR(ret);
724 		goto err_free;
725 	}
726 
727 	obj = __i915_gem_object_create_stolen(mem, stolen);
728 	if (IS_ERR(obj))
729 		goto err_stolen;
730 
731 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
732 	return obj;
733 
734 err_stolen:
735 	i915_gem_stolen_remove_node(i915, stolen);
736 err_free:
737 	kfree(stolen);
738 	return obj;
739 }
740