• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 
50 #define MM_UNUSED_TARGET 4
51 
drm_mm_kmalloc(struct drm_mm * mm,int atomic)52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54 	struct drm_mm_node *child;
55 
56 	if (atomic)
57 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 	else
59 		child = kzalloc(sizeof(*child), GFP_KERNEL);
60 
61 	if (unlikely(child == NULL)) {
62 		spin_lock(&mm->unused_lock);
63 		if (list_empty(&mm->unused_nodes))
64 			child = NULL;
65 		else {
66 			child =
67 			    list_entry(mm->unused_nodes.next,
68 				       struct drm_mm_node, node_list);
69 			list_del(&child->node_list);
70 			--mm->num_unused;
71 		}
72 		spin_unlock(&mm->unused_lock);
73 	}
74 	return child;
75 }
76 
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78  * drm_mm:	memory manager struct we are pre-allocating for
79  *
80  * Returns 0 on success or -ENOMEM if allocation fails.
81  */
drm_mm_pre_get(struct drm_mm * mm)82 int drm_mm_pre_get(struct drm_mm *mm)
83 {
84 	struct drm_mm_node *node;
85 
86 	spin_lock(&mm->unused_lock);
87 	while (mm->num_unused < MM_UNUSED_TARGET) {
88 		spin_unlock(&mm->unused_lock);
89 		node = kzalloc(sizeof(*node), GFP_KERNEL);
90 		spin_lock(&mm->unused_lock);
91 
92 		if (unlikely(node == NULL)) {
93 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 			spin_unlock(&mm->unused_lock);
95 			return ret;
96 		}
97 		++mm->num_unused;
98 		list_add_tail(&node->node_list, &mm->unused_nodes);
99 	}
100 	spin_unlock(&mm->unused_lock);
101 	return 0;
102 }
103 EXPORT_SYMBOL(drm_mm_pre_get);
104 
drm_mm_insert_helper(struct drm_mm_node * hole_node,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color)105 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 				 struct drm_mm_node *node,
107 				 unsigned long size, unsigned alignment,
108 				 unsigned long color)
109 {
110 	struct drm_mm *mm = hole_node->mm;
111 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
112 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113 	unsigned long adj_start = hole_start;
114 	unsigned long adj_end = hole_end;
115 
116 	BUG_ON(node->allocated);
117 
118 	if (mm->color_adjust)
119 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
120 
121 	if (alignment) {
122 		unsigned tmp = adj_start % alignment;
123 		if (tmp)
124 			adj_start += alignment - tmp;
125 	}
126 
127 	if (adj_start == hole_start) {
128 		hole_node->hole_follows = 0;
129 		list_del(&hole_node->hole_stack);
130 	}
131 
132 	node->start = adj_start;
133 	node->size = size;
134 	node->mm = mm;
135 	node->color = color;
136 	node->allocated = 1;
137 
138 	INIT_LIST_HEAD(&node->hole_stack);
139 	list_add(&node->node_list, &hole_node->node_list);
140 
141 	BUG_ON(node->start + node->size > adj_end);
142 
143 	node->hole_follows = 0;
144 	if (__drm_mm_hole_node_start(node) < hole_end) {
145 		list_add(&node->hole_stack, &mm->hole_stack);
146 		node->hole_follows = 1;
147 	}
148 }
149 
drm_mm_create_block(struct drm_mm * mm,unsigned long start,unsigned long size,bool atomic)150 struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
151 					unsigned long start,
152 					unsigned long size,
153 					bool atomic)
154 {
155 	struct drm_mm_node *hole, *node;
156 	unsigned long end = start + size;
157 	unsigned long hole_start;
158 	unsigned long hole_end;
159 
160 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 		if (hole_start > start || hole_end < end)
162 			continue;
163 
164 		node = drm_mm_kmalloc(mm, atomic);
165 		if (unlikely(node == NULL))
166 			return NULL;
167 
168 		node->start = start;
169 		node->size = size;
170 		node->mm = mm;
171 		node->allocated = 1;
172 
173 		INIT_LIST_HEAD(&node->hole_stack);
174 		list_add(&node->node_list, &hole->node_list);
175 
176 		if (start == hole_start) {
177 			hole->hole_follows = 0;
178 			list_del_init(&hole->hole_stack);
179 		}
180 
181 		node->hole_follows = 0;
182 		if (end != hole_end) {
183 			list_add(&node->hole_stack, &mm->hole_stack);
184 			node->hole_follows = 1;
185 		}
186 
187 		return node;
188 	}
189 
190 	WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
191 	return NULL;
192 }
193 EXPORT_SYMBOL(drm_mm_create_block);
194 
drm_mm_get_block_generic(struct drm_mm_node * hole_node,unsigned long size,unsigned alignment,unsigned long color,int atomic)195 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
196 					     unsigned long size,
197 					     unsigned alignment,
198 					     unsigned long color,
199 					     int atomic)
200 {
201 	struct drm_mm_node *node;
202 
203 	node = drm_mm_kmalloc(hole_node->mm, atomic);
204 	if (unlikely(node == NULL))
205 		return NULL;
206 
207 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
208 
209 	return node;
210 }
211 EXPORT_SYMBOL(drm_mm_get_block_generic);
212 
213 /**
214  * Search for free space and insert a preallocated memory node. Returns
215  * -ENOSPC if no suitable free area is available. The preallocated memory node
216  * must be cleared.
217  */
drm_mm_insert_node_generic(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color)218 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
219 			       unsigned long size, unsigned alignment,
220 			       unsigned long color)
221 {
222 	struct drm_mm_node *hole_node;
223 
224 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
225 					       color, 0);
226 	if (!hole_node)
227 		return -ENOSPC;
228 
229 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
230 	return 0;
231 }
232 EXPORT_SYMBOL(drm_mm_insert_node_generic);
233 
drm_mm_insert_node(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment)234 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
235 		       unsigned long size, unsigned alignment)
236 {
237 	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
238 }
239 EXPORT_SYMBOL(drm_mm_insert_node);
240 
drm_mm_insert_helper_range(struct drm_mm_node * hole_node,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end)241 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
242 				       struct drm_mm_node *node,
243 				       unsigned long size, unsigned alignment,
244 				       unsigned long color,
245 				       unsigned long start, unsigned long end)
246 {
247 	struct drm_mm *mm = hole_node->mm;
248 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
249 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
250 	unsigned long adj_start = hole_start;
251 	unsigned long adj_end = hole_end;
252 
253 	BUG_ON(!hole_node->hole_follows || node->allocated);
254 
255 	if (adj_start < start)
256 		adj_start = start;
257 	if (adj_end > end)
258 		adj_end = end;
259 
260 	if (mm->color_adjust)
261 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
262 
263 	if (alignment) {
264 		unsigned tmp = adj_start % alignment;
265 		if (tmp)
266 			adj_start += alignment - tmp;
267 	}
268 
269 	if (adj_start == hole_start) {
270 		hole_node->hole_follows = 0;
271 		list_del(&hole_node->hole_stack);
272 	}
273 
274 	node->start = adj_start;
275 	node->size = size;
276 	node->mm = mm;
277 	node->color = color;
278 	node->allocated = 1;
279 
280 	INIT_LIST_HEAD(&node->hole_stack);
281 	list_add(&node->node_list, &hole_node->node_list);
282 
283 	BUG_ON(node->start + node->size > adj_end);
284 	BUG_ON(node->start + node->size > end);
285 
286 	node->hole_follows = 0;
287 	if (__drm_mm_hole_node_start(node) < hole_end) {
288 		list_add(&node->hole_stack, &mm->hole_stack);
289 		node->hole_follows = 1;
290 	}
291 }
292 
drm_mm_get_block_range_generic(struct drm_mm_node * hole_node,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end,int atomic)293 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
294 						unsigned long size,
295 						unsigned alignment,
296 						unsigned long color,
297 						unsigned long start,
298 						unsigned long end,
299 						int atomic)
300 {
301 	struct drm_mm_node *node;
302 
303 	node = drm_mm_kmalloc(hole_node->mm, atomic);
304 	if (unlikely(node == NULL))
305 		return NULL;
306 
307 	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
308 				   start, end);
309 
310 	return node;
311 }
312 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
313 
314 /**
315  * Search for free space and insert a preallocated memory node. Returns
316  * -ENOSPC if no suitable free area is available. This is for range
317  * restricted allocations. The preallocated memory node must be cleared.
318  */
drm_mm_insert_node_in_range_generic(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end)319 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
320 					unsigned long size, unsigned alignment, unsigned long color,
321 					unsigned long start, unsigned long end)
322 {
323 	struct drm_mm_node *hole_node;
324 
325 	hole_node = drm_mm_search_free_in_range_generic(mm,
326 							size, alignment, color,
327 							start, end, 0);
328 	if (!hole_node)
329 		return -ENOSPC;
330 
331 	drm_mm_insert_helper_range(hole_node, node,
332 				   size, alignment, color,
333 				   start, end);
334 	return 0;
335 }
336 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
337 
drm_mm_insert_node_in_range(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long start,unsigned long end)338 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
339 				unsigned long size, unsigned alignment,
340 				unsigned long start, unsigned long end)
341 {
342 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
343 }
344 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
345 
346 /**
347  * Remove a memory node from the allocator.
348  */
drm_mm_remove_node(struct drm_mm_node * node)349 void drm_mm_remove_node(struct drm_mm_node *node)
350 {
351 	struct drm_mm *mm = node->mm;
352 	struct drm_mm_node *prev_node;
353 
354 	BUG_ON(node->scanned_block || node->scanned_prev_free
355 				   || node->scanned_next_free);
356 
357 	prev_node =
358 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
359 
360 	if (node->hole_follows) {
361 		BUG_ON(__drm_mm_hole_node_start(node) ==
362 		       __drm_mm_hole_node_end(node));
363 		list_del(&node->hole_stack);
364 	} else
365 		BUG_ON(__drm_mm_hole_node_start(node) !=
366 		       __drm_mm_hole_node_end(node));
367 
368 
369 	if (!prev_node->hole_follows) {
370 		prev_node->hole_follows = 1;
371 		list_add(&prev_node->hole_stack, &mm->hole_stack);
372 	} else
373 		list_move(&prev_node->hole_stack, &mm->hole_stack);
374 
375 	list_del(&node->node_list);
376 	node->allocated = 0;
377 }
378 EXPORT_SYMBOL(drm_mm_remove_node);
379 
380 /*
381  * Remove a memory node from the allocator and free the allocated struct
382  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
383  * drm_mm_get_block functions.
384  */
drm_mm_put_block(struct drm_mm_node * node)385 void drm_mm_put_block(struct drm_mm_node *node)
386 {
387 
388 	struct drm_mm *mm = node->mm;
389 
390 	drm_mm_remove_node(node);
391 
392 	spin_lock(&mm->unused_lock);
393 	if (mm->num_unused < MM_UNUSED_TARGET) {
394 		list_add(&node->node_list, &mm->unused_nodes);
395 		++mm->num_unused;
396 	} else
397 		kfree(node);
398 	spin_unlock(&mm->unused_lock);
399 }
400 EXPORT_SYMBOL(drm_mm_put_block);
401 
check_free_hole(unsigned long start,unsigned long end,unsigned long size,unsigned alignment)402 static int check_free_hole(unsigned long start, unsigned long end,
403 			   unsigned long size, unsigned alignment)
404 {
405 	if (end - start < size)
406 		return 0;
407 
408 	if (alignment) {
409 		unsigned tmp = start % alignment;
410 		if (tmp)
411 			start += alignment - tmp;
412 	}
413 
414 	return end >= start + size;
415 }
416 
drm_mm_search_free_generic(const struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color,bool best_match)417 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
418 					       unsigned long size,
419 					       unsigned alignment,
420 					       unsigned long color,
421 					       bool best_match)
422 {
423 	struct drm_mm_node *entry;
424 	struct drm_mm_node *best;
425 	unsigned long adj_start;
426 	unsigned long adj_end;
427 	unsigned long best_size;
428 
429 	BUG_ON(mm->scanned_blocks);
430 
431 	best = NULL;
432 	best_size = ~0UL;
433 
434 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
435 		if (mm->color_adjust) {
436 			mm->color_adjust(entry, color, &adj_start, &adj_end);
437 			if (adj_end <= adj_start)
438 				continue;
439 		}
440 
441 		if (!check_free_hole(adj_start, adj_end, size, alignment))
442 			continue;
443 
444 		if (!best_match)
445 			return entry;
446 
447 		if (entry->size < best_size) {
448 			best = entry;
449 			best_size = entry->size;
450 		}
451 	}
452 
453 	return best;
454 }
455 EXPORT_SYMBOL(drm_mm_search_free_generic);
456 
drm_mm_search_free_in_range_generic(const struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end,bool best_match)457 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
458 							unsigned long size,
459 							unsigned alignment,
460 							unsigned long color,
461 							unsigned long start,
462 							unsigned long end,
463 							bool best_match)
464 {
465 	struct drm_mm_node *entry;
466 	struct drm_mm_node *best;
467 	unsigned long adj_start;
468 	unsigned long adj_end;
469 	unsigned long best_size;
470 
471 	BUG_ON(mm->scanned_blocks);
472 
473 	best = NULL;
474 	best_size = ~0UL;
475 
476 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
477 		if (adj_start < start)
478 			adj_start = start;
479 		if (adj_end > end)
480 			adj_end = end;
481 
482 		if (mm->color_adjust) {
483 			mm->color_adjust(entry, color, &adj_start, &adj_end);
484 			if (adj_end <= adj_start)
485 				continue;
486 		}
487 
488 		if (!check_free_hole(adj_start, adj_end, size, alignment))
489 			continue;
490 
491 		if (!best_match)
492 			return entry;
493 
494 		if (entry->size < best_size) {
495 			best = entry;
496 			best_size = entry->size;
497 		}
498 	}
499 
500 	return best;
501 }
502 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
503 
504 /**
505  * Moves an allocation. To be used with embedded struct drm_mm_node.
506  */
drm_mm_replace_node(struct drm_mm_node * old,struct drm_mm_node * new)507 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
508 {
509 	list_replace(&old->node_list, &new->node_list);
510 	list_replace(&old->hole_stack, &new->hole_stack);
511 	new->hole_follows = old->hole_follows;
512 	new->mm = old->mm;
513 	new->start = old->start;
514 	new->size = old->size;
515 	new->color = old->color;
516 
517 	old->allocated = 0;
518 	new->allocated = 1;
519 }
520 EXPORT_SYMBOL(drm_mm_replace_node);
521 
522 /**
523  * Initializa lru scanning.
524  *
525  * This simply sets up the scanning routines with the parameters for the desired
526  * hole.
527  *
528  * Warning: As long as the scan list is non-empty, no other operations than
529  * adding/removing nodes to/from the scan list are allowed.
530  */
drm_mm_init_scan(struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color)531 void drm_mm_init_scan(struct drm_mm *mm,
532 		      unsigned long size,
533 		      unsigned alignment,
534 		      unsigned long color)
535 {
536 	mm->scan_color = color;
537 	mm->scan_alignment = alignment;
538 	mm->scan_size = size;
539 	mm->scanned_blocks = 0;
540 	mm->scan_hit_start = 0;
541 	mm->scan_hit_end = 0;
542 	mm->scan_check_range = 0;
543 	mm->prev_scanned_node = NULL;
544 }
545 EXPORT_SYMBOL(drm_mm_init_scan);
546 
547 /**
548  * Initializa lru scanning.
549  *
550  * This simply sets up the scanning routines with the parameters for the desired
551  * hole. This version is for range-restricted scans.
552  *
553  * Warning: As long as the scan list is non-empty, no other operations than
554  * adding/removing nodes to/from the scan list are allowed.
555  */
drm_mm_init_scan_with_range(struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end)556 void drm_mm_init_scan_with_range(struct drm_mm *mm,
557 				 unsigned long size,
558 				 unsigned alignment,
559 				 unsigned long color,
560 				 unsigned long start,
561 				 unsigned long end)
562 {
563 	mm->scan_color = color;
564 	mm->scan_alignment = alignment;
565 	mm->scan_size = size;
566 	mm->scanned_blocks = 0;
567 	mm->scan_hit_start = 0;
568 	mm->scan_hit_end = 0;
569 	mm->scan_start = start;
570 	mm->scan_end = end;
571 	mm->scan_check_range = 1;
572 	mm->prev_scanned_node = NULL;
573 }
574 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
575 
576 /**
577  * Add a node to the scan list that might be freed to make space for the desired
578  * hole.
579  *
580  * Returns non-zero, if a hole has been found, zero otherwise.
581  */
drm_mm_scan_add_block(struct drm_mm_node * node)582 int drm_mm_scan_add_block(struct drm_mm_node *node)
583 {
584 	struct drm_mm *mm = node->mm;
585 	struct drm_mm_node *prev_node;
586 	unsigned long hole_start, hole_end;
587 	unsigned long adj_start, adj_end;
588 
589 	mm->scanned_blocks++;
590 
591 	BUG_ON(node->scanned_block);
592 	node->scanned_block = 1;
593 
594 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
595 			       node_list);
596 
597 	node->scanned_preceeds_hole = prev_node->hole_follows;
598 	prev_node->hole_follows = 1;
599 	list_del(&node->node_list);
600 	node->node_list.prev = &prev_node->node_list;
601 	node->node_list.next = &mm->prev_scanned_node->node_list;
602 	mm->prev_scanned_node = node;
603 
604 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
605 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
606 
607 	if (mm->scan_check_range) {
608 		if (adj_start < mm->scan_start)
609 			adj_start = mm->scan_start;
610 		if (adj_end > mm->scan_end)
611 			adj_end = mm->scan_end;
612 	}
613 
614 	if (mm->color_adjust)
615 		mm->color_adjust(prev_node, mm->scan_color,
616 				 &adj_start, &adj_end);
617 
618 	if (check_free_hole(adj_start, adj_end,
619 			    mm->scan_size, mm->scan_alignment)) {
620 		mm->scan_hit_start = hole_start;
621 		mm->scan_hit_end = hole_end;
622 		return 1;
623 	}
624 
625 	return 0;
626 }
627 EXPORT_SYMBOL(drm_mm_scan_add_block);
628 
629 /**
630  * Remove a node from the scan list.
631  *
632  * Nodes _must_ be removed in the exact same order from the scan list as they
633  * have been added, otherwise the internal state of the memory manager will be
634  * corrupted.
635  *
636  * When the scan list is empty, the selected memory nodes can be freed. An
637  * immediately following drm_mm_search_free with best_match = 0 will then return
638  * the just freed block (because its at the top of the free_stack list).
639  *
640  * Returns one if this block should be evicted, zero otherwise. Will always
641  * return zero when no hole has been found.
642  */
drm_mm_scan_remove_block(struct drm_mm_node * node)643 int drm_mm_scan_remove_block(struct drm_mm_node *node)
644 {
645 	struct drm_mm *mm = node->mm;
646 	struct drm_mm_node *prev_node;
647 
648 	mm->scanned_blocks--;
649 
650 	BUG_ON(!node->scanned_block);
651 	node->scanned_block = 0;
652 
653 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
654 			       node_list);
655 
656 	prev_node->hole_follows = node->scanned_preceeds_hole;
657 	list_add(&node->node_list, &prev_node->node_list);
658 
659 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
660 		 node->start < mm->scan_hit_end);
661 }
662 EXPORT_SYMBOL(drm_mm_scan_remove_block);
663 
drm_mm_clean(struct drm_mm * mm)664 int drm_mm_clean(struct drm_mm * mm)
665 {
666 	struct list_head *head = &mm->head_node.node_list;
667 
668 	return (head->next->next == head);
669 }
670 EXPORT_SYMBOL(drm_mm_clean);
671 
drm_mm_init(struct drm_mm * mm,unsigned long start,unsigned long size)672 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
673 {
674 	INIT_LIST_HEAD(&mm->hole_stack);
675 	INIT_LIST_HEAD(&mm->unused_nodes);
676 	mm->num_unused = 0;
677 	mm->scanned_blocks = 0;
678 	spin_lock_init(&mm->unused_lock);
679 
680 	/* Clever trick to avoid a special case in the free hole tracking. */
681 	INIT_LIST_HEAD(&mm->head_node.node_list);
682 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
683 	mm->head_node.hole_follows = 1;
684 	mm->head_node.scanned_block = 0;
685 	mm->head_node.scanned_prev_free = 0;
686 	mm->head_node.scanned_next_free = 0;
687 	mm->head_node.mm = mm;
688 	mm->head_node.start = start + size;
689 	mm->head_node.size = start - mm->head_node.start;
690 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
691 
692 	mm->color_adjust = NULL;
693 
694 	return 0;
695 }
696 EXPORT_SYMBOL(drm_mm_init);
697 
drm_mm_takedown(struct drm_mm * mm)698 void drm_mm_takedown(struct drm_mm * mm)
699 {
700 	struct drm_mm_node *entry, *next;
701 
702 	if (!list_empty(&mm->head_node.node_list)) {
703 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
704 		return;
705 	}
706 
707 	spin_lock(&mm->unused_lock);
708 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
709 		list_del(&entry->node_list);
710 		kfree(entry);
711 		--mm->num_unused;
712 	}
713 	spin_unlock(&mm->unused_lock);
714 
715 	BUG_ON(mm->num_unused != 0);
716 }
717 EXPORT_SYMBOL(drm_mm_takedown);
718 
drm_mm_debug_table(struct drm_mm * mm,const char * prefix)719 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
720 {
721 	struct drm_mm_node *entry;
722 	unsigned long total_used = 0, total_free = 0, total = 0;
723 	unsigned long hole_start, hole_end, hole_size;
724 
725 	hole_start = drm_mm_hole_node_start(&mm->head_node);
726 	hole_end = drm_mm_hole_node_end(&mm->head_node);
727 	hole_size = hole_end - hole_start;
728 	if (hole_size)
729 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
730 			prefix, hole_start, hole_end,
731 			hole_size);
732 	total_free += hole_size;
733 
734 	drm_mm_for_each_node(entry, mm) {
735 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
736 			prefix, entry->start, entry->start + entry->size,
737 			entry->size);
738 		total_used += entry->size;
739 
740 		if (entry->hole_follows) {
741 			hole_start = drm_mm_hole_node_start(entry);
742 			hole_end = drm_mm_hole_node_end(entry);
743 			hole_size = hole_end - hole_start;
744 			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
745 				prefix, hole_start, hole_end,
746 				hole_size);
747 			total_free += hole_size;
748 		}
749 	}
750 	total = total_free + total_used;
751 
752 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
753 		total_used, total_free);
754 }
755 EXPORT_SYMBOL(drm_mm_debug_table);
756 
757 #if defined(CONFIG_DEBUG_FS)
drm_mm_dump_hole(struct seq_file * m,struct drm_mm_node * entry)758 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759 {
760 	unsigned long hole_start, hole_end, hole_size;
761 
762 	if (entry->hole_follows) {
763 		hole_start = drm_mm_hole_node_start(entry);
764 		hole_end = drm_mm_hole_node_end(entry);
765 		hole_size = hole_end - hole_start;
766 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
767 				hole_start, hole_end, hole_size);
768 		return hole_size;
769 	}
770 
771 	return 0;
772 }
773 
drm_mm_dump_table(struct seq_file * m,struct drm_mm * mm)774 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
775 {
776 	struct drm_mm_node *entry;
777 	unsigned long total_used = 0, total_free = 0, total = 0;
778 
779 	total_free += drm_mm_dump_hole(m, &mm->head_node);
780 
781 	drm_mm_for_each_node(entry, mm) {
782 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
783 				entry->start, entry->start + entry->size,
784 				entry->size);
785 		total_used += entry->size;
786 		total_free += drm_mm_dump_hole(m, entry);
787 	}
788 	total = total_free + total_used;
789 
790 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
791 	return 0;
792 }
793 EXPORT_SYMBOL(drm_mm_dump_table);
794 #endif
795