• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <commonlib/bsd/helpers.h>
4 #include <console/console.h>
5 #include <device/device.h>
6 #include <memrange.h>
7 #include <post.h>
8 #include <types.h>
9 
resource2str(const struct resource * res)10 static const char *resource2str(const struct resource *res)
11 {
12 	if (res->flags & IORESOURCE_IO)
13 		return "io";
14 	if (res->flags & IORESOURCE_PREFETCH)
15 		return "prefmem";
16 	if (res->flags & IORESOURCE_MEM)
17 		return "mem";
18 	return "undefined";
19 }
20 
print_domain_res(const struct device * dev,const struct resource * res,const char * suffix)21 static void print_domain_res(const struct device *dev,
22 			     const struct resource *res, const char *suffix)
23 {
24 	printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx%s\n",
25 	       dev_path(dev), resource2str(res), res->base, res->size,
26 	       res->align, res->gran, res->limit, suffix);
27 }
28 
29 #define res_printk(depth, str, ...)	printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
30 
print_bridge_res(const struct device * dev,const struct resource * res,int depth,const char * suffix)31 static void print_bridge_res(const struct device *dev, const struct resource *res,
32 			     int depth, const char *suffix)
33 {
34 	res_printk(depth, "%s %s: size: %llx align: %u gran: %u limit: %llx%s\n", dev_path(dev),
35 		   resource2str(res), res->size, res->align, res->gran, res->limit, suffix);
36 }
37 
print_child_res(const struct device * dev,const struct resource * res,int depth)38 static void print_child_res(const struct device *dev, const struct resource *res, int depth)
39 {
40 	res_printk(depth + 1, "%s %02lx *  [0x%llx - 0x%llx] %s\n", dev_path(dev),
41 		   res->index, res->base, res->base + res->size - 1, resource2str(res));
42 }
43 
print_fixed_res(const struct device * dev,const struct resource * res,const char * prefix)44 static void print_fixed_res(const struct device *dev,
45 			    const struct resource *res, const char *prefix)
46 {
47 	printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
48 	       prefix, dev_path(dev), res->index, res->base, res->base + res->size - 1,
49 	       resource2str(res));
50 }
51 
print_assigned_res(const struct device * dev,const struct resource * res)52 static void print_assigned_res(const struct device *dev, const struct resource *res)
53 {
54 	printk(BIOS_DEBUG, "  %s %02lx *  [0x%llx - 0x%llx] limit: %llx %s\n",
55 	       dev_path(dev), res->index, res->base, res->limit, res->limit, resource2str(res));
56 }
57 
print_failed_res(const struct device * dev,const struct resource * res)58 static void print_failed_res(const struct device *dev, const struct resource *res)
59 {
60 	printk(BIOS_DEBUG, "  %s %02lx *  size: 0x%llx limit: %llx %s\n",
61 	       dev_path(dev), res->index, res->size, res->limit, resource2str(res));
62 }
63 
print_resource_ranges(const struct device * dev,const struct memranges * ranges)64 static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
65 {
66 	const struct range_entry *r;
67 
68 	printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
69 
70 	if (memranges_is_empty(ranges))
71 		printk(BIOS_INFO, " * EMPTY!!\n");
72 
73 	memranges_each_entry(r, ranges) {
74 		printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
75 		       range_entry_base(r), range_entry_size(r), range_entry_tag(r));
76 	}
77 }
78 
dev_has_children(const struct device * dev)79 static bool dev_has_children(const struct device *dev)
80 {
81 	const struct bus *bus = dev->downstream;
82 	return bus && bus->children;
83 }
84 
effective_limit(const struct resource * const res)85 static resource_t effective_limit(const struct resource *const res)
86 {
87 	if (CONFIG(ALWAYS_ALLOW_ABOVE_4G_ALLOCATION))
88 		return res->limit;
89 
90 	/* Always allow bridge resources above 4G. */
91 	if (res->flags & IORESOURCE_BRIDGE)
92 		return res->limit;
93 
94 	const resource_t quirk_4g_limit =
95 		res->flags & IORESOURCE_ABOVE_4G ? UINT64_MAX : UINT32_MAX;
96 	return MIN(res->limit, quirk_4g_limit);
97 }
98 
99 /*
100  * During pass 1, once all the requirements for downstream devices of a
101  * bridge are gathered, this function calculates the overall resource
102  * requirement for the bridge. It starts by picking the largest resource
103  * requirement downstream for the given resource type and works by
104  * adding requirements in descending order.
105  *
106  * Additionally, it takes alignment and limits of the downstream devices
107  * into consideration and ensures that they get propagated to the bridge
108  * resource. This is required to guarantee that the upstream bridge/
109  * domain honors the limit and alignment requirements for this bridge
110  * based on the tightest constraints downstream.
111  *
112  * Last but not least, it stores the offset inside the bridge resource
113  * for each child resource in its base field. This simplifies pass 2
114  * for resources behind a bridge, as we only have to add offsets to the
115  * allocated base of the bridge resource.
116  */
update_bridge_resource(const struct device * bridge,struct resource * bridge_res,int print_depth)117 static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
118 				   int print_depth)
119 {
120 	const struct device *child;
121 	struct resource *child_res;
122 	resource_t base;
123 	const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
124 	const unsigned long type_match = bridge_res->flags & type_mask;
125 	struct bus *bus = bridge->downstream;
126 
127 	child_res = NULL;
128 
129 	/*
130 	 * `base` keeps track of where the next allocation for child resources
131 	 * can take place from within the bridge resource window. Since the
132 	 * bridge resource window allocation is not performed yet, it can start
133 	 * at 0. Base gets updated every time a resource requirement is
134 	 * accounted for in the loop below. After scanning all these resources,
135 	 * base will indicate the total size requirement for the current bridge
136 	 * resource window.
137 	 */
138 	base = 0;
139 
140 	print_bridge_res(bridge, bridge_res, print_depth, "");
141 
142 	while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
143 		/* Size 0 resources can be skipped. */
144 		if (!child_res->size)
145 			continue;
146 
147 		/* Resources with 0 limit can't be assigned anything. */
148 		if (!child_res->limit)
149 			continue;
150 
151 		/*
152 		 * Propagate the resource alignment to the bridge resource. The
153 		 * condition can only be true for the first (largest) resource. For all
154 		 * other child resources, alignment is taken care of by rounding their
155 		 * base up.
156 		 */
157 		if (child_res->align > bridge_res->align)
158 			bridge_res->align = child_res->align;
159 
160 		/*
161 		 * Propagate the resource limit to the bridge resource. If a downstream
162 		 * device has stricter requirements w.r.t. limits for any resource, that
163 		 * constraint needs to be propagated back up to the bridges downstream
164 		 * of the domain. This way, the whole bridge resource fulfills the limit.
165 		 */
166 		if (effective_limit(child_res) < bridge_res->limit)
167 			bridge_res->limit = effective_limit(child_res);
168 
169 		/*
170 		 * Alignment value of 0 means that the child resource has no alignment
171 		 * requirements and so the base value remains unchanged here.
172 		 */
173 		base = ALIGN_UP(base, POWER_OF_2(child_res->align));
174 
175 		/*
176 		 * Store the relative offset inside the bridge resource for later
177 		 * consumption in allocate_bridge_resources(), and invalidate flags
178 		 * related to the base.
179 		 */
180 		child_res->base = base;
181 		child_res->flags &= ~(IORESOURCE_ASSIGNED | IORESOURCE_STORED);
182 
183 		print_child_res(child, child_res, print_depth);
184 
185 		base += child_res->size;
186 	}
187 
188 	/*
189 	 * After all downstream device resources are scanned, `base` represents
190 	 * the total size requirement for the current bridge resource window.
191 	 * This size needs to be rounded up to the granularity requirement of
192 	 * the bridge to ensure that the upstream bridge/domain allocates big
193 	 * enough window.
194 	 */
195 	bridge_res->size = ALIGN_UP(base, POWER_OF_2(bridge_res->gran));
196 
197 	print_bridge_res(bridge, bridge_res, print_depth, " done");
198 }
199 
200 /*
201  * During pass 1, at the bridge level, the resource allocator gathers
202  * requirements from downstream devices and updates its own resource
203  * windows for the provided resource type.
204  */
compute_bridge_resources(const struct device * bridge,unsigned long type_match,int print_depth)205 static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
206 				     int print_depth)
207 {
208 	const struct device *child;
209 	struct resource *res;
210 	struct bus *bus = bridge->downstream;
211 	const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
212 
213 	for (res = bridge->resource_list; res; res = res->next) {
214 		if (!(res->flags & IORESOURCE_BRIDGE))
215 			continue;
216 
217 		if ((res->flags & type_mask) != type_match)
218 			continue;
219 
220 		/*
221 		 * Ensure that the resource requirements for all downstream bridges are
222 		 * gathered before updating the window for current bridge resource.
223 		 */
224 		for (child = bus->children; child; child = child->sibling) {
225 			if (!dev_has_children(child))
226 				continue;
227 			compute_bridge_resources(child, type_match, print_depth + 1);
228 		}
229 
230 		/*
231 		 * Update the window for current bridge resource now that all downstream
232 		 * requirements are gathered.
233 		 */
234 		update_bridge_resource(bridge, res, print_depth);
235 	}
236 }
237 
238 /*
239  * During pass 1, the resource allocator walks down the entire sub-tree
240  * of a domain. It gathers resource requirements for every downstream
241  * bridge by looking at the resource requests of its children. Thus, the
242  * requirement gathering begins at the leaf devices and is propagated
243  * back up to the downstream bridges of the domain.
244  *
245  * At the domain level, it identifies every downstream bridge and walks
246  * down that bridge to gather requirements for each resource type i.e.
247  * i/o, mem and prefmem. Since bridges have separate windows for mem and
248  * prefmem, requirements for each need to be collected separately.
249  *
250  * Domain resource windows are fixed ranges and hence requirement
251  * gathering does not result in any changes to these fixed ranges.
252  */
compute_domain_resources(const struct device * domain)253 static void compute_domain_resources(const struct device *domain)
254 {
255 	const struct device *child;
256 	const int print_depth = 1;
257 
258 	if (domain->downstream == NULL)
259 		return;
260 
261 	for (child = domain->downstream->children; child; child = child->sibling) {
262 		/* Skip if this is not a bridge or has no children under it. */
263 		if (!dev_has_children(child))
264 			continue;
265 
266 		compute_bridge_resources(child, IORESOURCE_IO, print_depth);
267 		compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
268 		compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
269 					 print_depth);
270 	}
271 }
272 
273 /*
274  * Scan the entire tree to identify any fixed resources allocated by
275  * any device to ensure that the address map for domain resources are
276  * appropriately updated.
277  *
278  * Domains can typically provide a memrange for entire address space.
279  * So, this function punches holes in the address space for all fixed
280  * resources that are already defined. Both I/O and normal memory
281  * resources are added as fixed. Both need to be removed from address
282  * space where dynamic resource allocations are sourced.
283  */
avoid_fixed_resources(struct memranges * ranges,const struct device * dev,unsigned long mask_match)284 static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
285 				  unsigned long mask_match)
286 {
287 	const struct resource *res;
288 	const struct device *child;
289 	const struct bus *bus;
290 
291 	for (res = dev->resource_list; res != NULL; res = res->next) {
292 		if ((res->flags & mask_match) != mask_match)
293 			continue;
294 		if (!res->size)
295 			continue;
296 		print_fixed_res(dev, res, __func__);
297 		memranges_create_hole(ranges, res->base, res->size);
298 	}
299 
300 	bus = dev->downstream;
301 	if (bus == NULL)
302 		return;
303 
304 	for (child = bus->children; child != NULL; child = child->sibling)
305 		avoid_fixed_resources(ranges, child, mask_match);
306 }
307 
308 /*
309  * This function creates a list of memranges of given type using the
310  * resource that is provided. It applies additional constraints to
311  * ensure that the memranges do not overlap any of the fixed resources
312  * under the domain. The domain typically provides a memrange for the
313  * entire address space. Thus, it is up to the chipset to add DRAM and
314  * all other windows which cannot be used for resource allocation as
315  * fixed resources.
316  */
setup_resource_ranges(const struct device * const domain,const unsigned long type,struct memranges * const ranges)317 static void setup_resource_ranges(const struct device *const domain,
318 				  const unsigned long type,
319 				  struct memranges *const ranges)
320 {
321 	/* Align mem resources to 2^12 (4KiB pages) at a minimum, so they
322 	   can be memory-mapped individually (e.g. for virtualization guests). */
323 	const unsigned char alignment = type == IORESOURCE_MEM ? 12 : 0;
324 	const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_FIXED;
325 
326 	memranges_init_empty_with_alignment(ranges, NULL, 0, alignment);
327 
328 	for (struct resource *res = domain->resource_list; res != NULL; res = res->next) {
329 		if ((res->flags & type_mask) != type)
330 			continue;
331 		print_domain_res(domain, res, "");
332 		memranges_insert(ranges, res->base, res->limit - res->base + 1, type);
333 	}
334 
335 	if (type == IORESOURCE_IO) {
336 		/*
337 		 * Don't allow allocations in the VGA I/O range. PCI has special
338 		 * cases for that.
339 		 */
340 		memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
341 
342 		/*
343 		 * Resource allocator no longer supports the legacy behavior where
344 		 * I/O resource allocation is guaranteed to avoid aliases over legacy
345 		 * PCI expansion card addresses.
346 		 */
347 	}
348 
349 	avoid_fixed_resources(ranges, domain, type | IORESOURCE_FIXED);
350 
351 	print_resource_ranges(domain, ranges);
352 }
353 
cleanup_domain_resource_ranges(const struct device * dev,struct memranges * ranges,unsigned long type)354 static void cleanup_domain_resource_ranges(const struct device *dev, struct memranges *ranges,
355 					   unsigned long type)
356 {
357 	memranges_teardown(ranges);
358 	for (struct resource *res = dev->resource_list; res != NULL; res = res->next) {
359 		if (res->flags & IORESOURCE_FIXED)
360 			continue;
361 		if ((res->flags & IORESOURCE_TYPE_MASK) != type)
362 			continue;
363 		print_domain_res(dev, res, " done");
364 	}
365 }
366 
assign_resource(struct resource * const res,const resource_t base,const struct device * const dev)367 static void assign_resource(struct resource *const res, const resource_t base,
368 			    const struct device *const dev)
369 {
370 	res->base = base;
371 	res->limit = res->base + res->size - 1;
372 	res->flags |= IORESOURCE_ASSIGNED;
373 	res->flags &= ~IORESOURCE_STORED;
374 
375 	print_assigned_res(dev, res);
376 }
377 
378 /*
379  * This is where the actual allocation of resources happens during
380  * pass 2. We construct a list of memory ranges corresponding to the
381  * resource of a given type, then look for the biggest unallocated
382  * resource on the downstream bus. This continues in a descending order
383  * until all resources of a given type have space allocated within the
384  * domain's resource window.
385  */
allocate_toplevel_resources(const struct device * const domain,const unsigned long type)386 static void allocate_toplevel_resources(const struct device *const domain,
387 					const unsigned long type)
388 {
389 	const unsigned long type_mask = IORESOURCE_TYPE_MASK;
390 	struct resource *res = NULL;
391 	const struct device *dev;
392 	struct memranges ranges;
393 	resource_t base;
394 
395 	if (!dev_has_children(domain))
396 		return;
397 
398 	setup_resource_ranges(domain, type, &ranges);
399 
400 	while ((dev = largest_resource(domain->downstream, &res, type_mask, type))) {
401 		if (!res->size)
402 			continue;
403 
404 		if (!memranges_steal(&ranges, effective_limit(res), res->size, res->align,
405 				     type, &base, CONFIG(RESOURCE_ALLOCATION_TOP_DOWN))) {
406 			printk(BIOS_ERR, "Resource didn't fit!!!\n");
407 			print_failed_res(dev, res);
408 			continue;
409 		}
410 
411 		assign_resource(res, base, dev);
412 	}
413 
414 	cleanup_domain_resource_ranges(domain, &ranges, type);
415 }
416 
417 /*
418  * Pass 2 of the resource allocator at the bridge level loops through
419  * all the resources for the bridge and assigns all the base addresses
420  * of its children's resources of the same type. update_bridge_resource()
421  * of pass 1 pre-calculated the offsets of these bases inside the bridge
422  * resource. Now that the bridge resource is allocated, all we have to
423  * do is to add its final base to these offsets.
424  *
425  * Once allocation at the current bridge is complete, resource allocator
426  * continues walking down the downstream bridges until it hits the leaf
427  * devices.
428  */
assign_resource_cb(void * param,struct device * dev,struct resource * res)429 static void assign_resource_cb(void *param, struct device *dev, struct resource *res)
430 {
431 	/* We have to filter the same resources as update_bridge_resource(). */
432 	if (!res->size || !res->limit)
433 		return;
434 
435 	assign_resource(res, *(const resource_t *)param + res->base, dev);
436 }
allocate_bridge_resources(const struct device * bridge)437 static void allocate_bridge_resources(const struct device *bridge)
438 {
439 	const unsigned long type_mask =
440 		IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH | IORESOURCE_FIXED;
441 	struct bus *const bus = bridge->downstream;
442 	struct resource *res;
443 	struct device *child;
444 
445 	for (res = bridge->resource_list; res != NULL; res = res->next) {
446 		if (!res->size)
447 			continue;
448 
449 		if (!(res->flags & IORESOURCE_BRIDGE))
450 			continue;
451 
452 		if (!(res->flags & IORESOURCE_ASSIGNED))
453 			continue;
454 
455 		/* Run assign_resource_cb() for all downstream resources of the same type. */
456 		search_bus_resources(bus, type_mask, res->flags & type_mask,
457 				     assign_resource_cb, &res->base);
458 	}
459 
460 	for (child = bus->children; child != NULL; child = child->sibling) {
461 		if (!dev_has_children(child))
462 			continue;
463 
464 		allocate_bridge_resources(child);
465 	}
466 }
467 
468 /*
469  * Pass 2 of resource allocator begins at the domain level. Every domain
470  * has two types of resources - io and mem. For each of these resources,
471  * this function creates a list of memory ranges that can be used for
472  * downstream resource allocation. This list is constrained to remove
473  * any fixed resources in the domain sub-tree of the given resource
474  * type. It then uses the memory ranges to apply best fit on the
475  * resource requirements of the downstream devices.
476  *
477  * Once resources are allocated to all downstream devices of the domain,
478  * it walks down each downstream bridge to finish resource assignment
479  * of its children resources within its own window.
480  */
allocate_domain_resources(const struct device * domain)481 static void allocate_domain_resources(const struct device *domain)
482 {
483 	/* Resource type I/O */
484 	allocate_toplevel_resources(domain, IORESOURCE_IO);
485 
486 	/*
487 	 * Resource type Mem:
488 	 * Domain does not distinguish between mem and prefmem resources. Thus,
489 	 * the resource allocation at domain level considers mem and prefmem
490 	 * together when finding the best fit based on the biggest resource
491 	 * requirement.
492 	 */
493 	allocate_toplevel_resources(domain, IORESOURCE_MEM);
494 
495 	struct device *child;
496 	for (child = domain->downstream->children; child; child = child->sibling) {
497 		if (!dev_has_children(child))
498 			continue;
499 
500 		/* Continue allocation for all downstream bridges. */
501 		allocate_bridge_resources(child);
502 	}
503 }
504 
505 /*
506  * This function forms the guts of the resource allocator. It walks
507  * through the entire device tree for each domain two times.
508  *
509  * Every domain has a fixed set of ranges. These ranges cannot be
510  * relaxed based on the requirements of the downstream devices. They
511  * represent the available windows from which resources can be allocated
512  * to the different devices under the domain.
513  *
514  * In order to identify the requirements of downstream devices, resource
515  * allocator walks in a DFS fashion. It gathers the requirements from
516  * leaf devices and propagates those back up to their upstream bridges
517  * until the requirements for all the downstream devices of the domain
518  * are gathered. This is referred to as pass 1 of the resource allocator.
519  *
520  * Once the requirements for all the devices under the domain are
521  * gathered, the resource allocator walks a second time to allocate
522  * resources to downstream devices as per the requirements. It always
523  * picks the biggest resource request as per the type (i/o and mem) to
524  * allocate space from its fixed window to the immediate downstream
525  * device of the domain. In order to accomplish best fit for the
526  * resources, a list of ranges is maintained by each resource type (i/o
527  * and mem). At the domain level we don't differentiate between mem and
528  * prefmem. Since they are allocated space from the same window, the
529  * resource allocator at the domain level ensures that the biggest
530  * requirement is selected independent of the prefetch type. Once the
531  * resource allocation for all immediate downstream devices is complete
532  * at the domain level, the resource allocator walks down the subtree
533  * for each downstream bridge to continue the allocation process at the
534  * bridge level. Since bridges have either their whole window allocated
535  * or nothing, we only need to place downstream resources inside these
536  * windows by re-using offsets that were pre-calculated in pass 1. This
537  * continues until resource allocation is realized for all downstream
538  * bridges in the domain sub-tree. This is referred to as pass 2 of the
539  * resource allocator.
540  *
541  * Some rules that are followed by the resource allocator:
542  *  - Allocate resource locations for every device as long as
543  *    the requirements can be satisfied.
544  *  - Don't overlap with resources in fixed locations.
545  *  - Don't overlap and follow the rules of bridges -- downstream
546  *    devices of bridges should use parts of the address space
547  *    allocated to the bridge.
548  */
allocate_resources(const struct device * root)549 void allocate_resources(const struct device *root)
550 {
551 	const struct device *child;
552 
553 	if ((root == NULL) || (root->downstream == NULL))
554 		return;
555 
556 	for (child = root->downstream->children; child; child = child->sibling) {
557 		if (child->path.type != DEVICE_PATH_DOMAIN)
558 			continue;
559 
560 		post_log_path(child);
561 
562 		/* Pass 1 - Relative placement. */
563 		printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (relative placement) ===\n",
564 		       dev_path(child));
565 		compute_domain_resources(child);
566 
567 		/* Pass 2 - Allocate resources as per gathered requirements. */
568 		printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
569 		       dev_path(child));
570 		allocate_domain_resources(child);
571 
572 		printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
573 		       dev_path(child));
574 	}
575 }
576