Lines Matching full:object
50 * Note that the kmemleak_object.use_count is incremented when an object is
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
119 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
144 * object->lock. Insertions or deletions from object_list, gray_list or
151 unsigned int flags; /* object status flags */
156 /* object usage count; object freed when use_count == 0 */
164 /* the total number of pointers found pointing to this object */
168 /* memory ranges to be scanned inside an object (empty for all) */
179 /* flag set after the first reporting of an unreference object */
181 /* flag set to not scan the object */
197 /* search tree for object boundaries */
239 * Early object allocation/freeing logging. Kmemleak is initialized after the
306 * with the object->lock held.
309 struct kmemleak_object *object) in hex_dump_object() argument
311 const u8 *ptr = (const u8 *)object->pointer; in hex_dump_object()
315 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); in hex_dump_object()
325 * Object colors, encoded with count and min_count:
326 * - white - orphan object, not enough references to it (count < min_count)
331 * Newly created objects don't have any color assigned (object->count == -1)
334 static bool color_white(const struct kmemleak_object *object) in color_white() argument
336 return object->count != KMEMLEAK_BLACK && in color_white()
337 object->count < object->min_count; in color_white()
340 static bool color_gray(const struct kmemleak_object *object) in color_gray() argument
342 return object->min_count != KMEMLEAK_BLACK && in color_gray()
343 object->count >= object->min_count; in color_gray()
351 static bool unreferenced_object(struct kmemleak_object *object) in unreferenced_object() argument
353 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && in unreferenced_object()
354 time_before_eq(object->jiffies + jiffies_min_age, in unreferenced_object()
360 * print_unreferenced function must be called with the object->lock held.
363 struct kmemleak_object *object) in print_unreferenced() argument
366 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); in print_unreferenced()
368 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", in print_unreferenced()
369 object->pointer, object->size); in print_unreferenced()
371 object->comm, object->pid, object->jiffies, in print_unreferenced()
373 hex_dump_object(seq, object); in print_unreferenced()
376 for (i = 0; i < object->trace_len; i++) { in print_unreferenced()
377 void *ptr = (void *)object->trace[i]; in print_unreferenced()
385 * the object->lock held.
387 static void dump_object_info(struct kmemleak_object *object) in dump_object_info() argument
391 trace.nr_entries = object->trace_len; in dump_object_info()
392 trace.entries = object->trace; in dump_object_info()
394 pr_notice("Object 0x%08lx (size %zu):\n", in dump_object_info()
395 object->pointer, object->size); in dump_object_info()
397 object->comm, object->pid, object->jiffies); in dump_object_info()
398 pr_notice(" min_count = %d\n", object->min_count); in dump_object_info()
399 pr_notice(" count = %d\n", object->count); in dump_object_info()
400 pr_notice(" flags = 0x%x\n", object->flags); in dump_object_info()
401 pr_notice(" checksum = %u\n", object->checksum); in dump_object_info()
407 * Look-up a memory block metadata (kmemleak_object) in the object search
417 struct kmemleak_object *object = in lookup_object() local
419 if (ptr < object->pointer) in lookup_object()
420 rb = object->rb_node.rb_left; in lookup_object()
421 else if (object->pointer + object->size <= ptr) in lookup_object()
422 rb = object->rb_node.rb_right; in lookup_object()
423 else if (object->pointer == ptr || alias) in lookup_object()
424 return object; in lookup_object()
426 kmemleak_warn("Found object by alias at 0x%08lx\n", in lookup_object()
428 dump_object_info(object); in lookup_object()
436 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
437 * that once an object's use_count reached 0, the RCU freeing was already
438 * registered and the object should no longer be used. This function must be
441 static int get_object(struct kmemleak_object *object) in get_object() argument
443 return atomic_inc_not_zero(&object->use_count); in get_object()
453 struct kmemleak_object *object = in free_object_rcu() local
458 * code accessing this object, hence no need for locking. in free_object_rcu()
460 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
464 kmem_cache_free(object_cache, object); in free_object_rcu()
468 * Decrement the object use_count. Once the count is 0, free the object using
474 static void put_object(struct kmemleak_object *object) in put_object() argument
476 if (!atomic_dec_and_test(&object->use_count)) in put_object()
480 WARN_ON(object->flags & OBJECT_ALLOCATED); in put_object()
482 call_rcu(&object->rcu, free_object_rcu); in put_object()
486 * Look up an object in the object search tree and increase its use_count.
491 struct kmemleak_object *object; in find_and_get_object() local
495 object = lookup_object(ptr, alias); in find_and_get_object()
498 /* check whether the object is still available */ in find_and_get_object()
499 if (object && !get_object(object)) in find_and_get_object()
500 object = NULL; in find_and_get_object()
503 return object; in find_and_get_object()
507 * Look up an object in the object search tree and remove it from both
508 * object_tree_root and object_list. The returned object's use_count should be
514 struct kmemleak_object *object; in find_and_remove_object() local
517 object = lookup_object(ptr, alias); in find_and_remove_object()
518 if (object) { in find_and_remove_object()
519 rb_erase(&object->rb_node, &object_tree_root); in find_and_remove_object()
520 list_del_rcu(&object->object_list); in find_and_remove_object()
524 return object; in find_and_remove_object()
551 struct kmemleak_object *object, *parent; in create_object() local
554 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in create_object()
555 if (!object) { in create_object()
561 INIT_LIST_HEAD(&object->object_list); in create_object()
562 INIT_LIST_HEAD(&object->gray_list); in create_object()
563 INIT_HLIST_HEAD(&object->area_list); in create_object()
564 spin_lock_init(&object->lock); in create_object()
565 atomic_set(&object->use_count, 1); in create_object()
566 object->flags = OBJECT_ALLOCATED; in create_object()
567 object->pointer = ptr; in create_object()
568 object->size = size; in create_object()
569 object->excess_ref = 0; in create_object()
570 object->min_count = min_count; in create_object()
571 object->count = 0; /* white color initially */ in create_object()
572 object->jiffies = jiffies; in create_object()
573 object->checksum = 0; in create_object()
577 object->pid = 0; in create_object()
578 strncpy(object->comm, "hardirq", sizeof(object->comm)); in create_object()
580 object->pid = 0; in create_object()
581 strncpy(object->comm, "softirq", sizeof(object->comm)); in create_object()
583 object->pid = current->pid; in create_object()
590 strncpy(object->comm, current->comm, sizeof(object->comm)); in create_object()
594 object->trace_len = __save_stack_trace(object->trace); in create_object()
610 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", in create_object()
617 kmem_cache_free(object_cache, object); in create_object()
618 object = NULL; in create_object()
622 rb_link_node(&object->rb_node, rb_parent, link); in create_object()
623 rb_insert_color(&object->rb_node, &object_tree_root); in create_object()
625 list_add_tail_rcu(&object->object_list, &object_list); in create_object()
628 return object; in create_object()
632 * Mark the object as not allocated and schedule RCU freeing via put_object().
634 static void __delete_object(struct kmemleak_object *object) in __delete_object() argument
638 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); in __delete_object()
639 WARN_ON(atomic_read(&object->use_count) < 1); in __delete_object()
645 spin_lock_irqsave(&object->lock, flags); in __delete_object()
646 object->flags &= ~OBJECT_ALLOCATED; in __delete_object()
647 spin_unlock_irqrestore(&object->lock, flags); in __delete_object()
648 put_object(object); in __delete_object()
657 struct kmemleak_object *object; in delete_object_full() local
659 object = find_and_remove_object(ptr, 0); in delete_object_full()
660 if (!object) { in delete_object_full()
662 kmemleak_warn("Freeing unknown object at 0x%08lx\n", in delete_object_full()
667 __delete_object(object); in delete_object_full()
677 struct kmemleak_object *object; in delete_object_part() local
680 object = find_and_remove_object(ptr, 1); in delete_object_part()
681 if (!object) { in delete_object_part()
683 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", in delete_object_part()
696 start = object->pointer; in delete_object_part()
697 end = object->pointer + object->size; in delete_object_part()
699 create_object(start, ptr - start, object->min_count, in delete_object_part()
702 create_object(ptr + size, end - ptr - size, object->min_count, in delete_object_part()
705 __delete_object(object); in delete_object_part()
708 static void __paint_it(struct kmemleak_object *object, int color) in __paint_it() argument
710 object->min_count = color; in __paint_it()
712 object->flags |= OBJECT_NO_SCAN; in __paint_it()
715 static void paint_it(struct kmemleak_object *object, int color) in paint_it() argument
719 spin_lock_irqsave(&object->lock, flags); in paint_it()
720 __paint_it(object, color); in paint_it()
721 spin_unlock_irqrestore(&object->lock, flags); in paint_it()
726 struct kmemleak_object *object; in paint_ptr() local
728 object = find_and_get_object(ptr, 0); in paint_ptr()
729 if (!object) { in paint_ptr()
730 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", in paint_ptr()
736 paint_it(object, color); in paint_ptr()
737 put_object(object); in paint_ptr()
741 * Mark an object permanently as gray-colored so that it can no longer be
750 * Mark the object as black-colored so that it is ignored from scans and
759 * Add a scanning area to the object. If at least one such area is added,
765 struct kmemleak_object *object; in add_scan_area() local
768 object = find_and_get_object(ptr, 1); in add_scan_area()
769 if (!object) { in add_scan_area()
770 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", in add_scan_area()
781 spin_lock_irqsave(&object->lock, flags); in add_scan_area()
783 size = object->pointer + object->size - ptr; in add_scan_area()
784 } else if (ptr + size > object->pointer + object->size) { in add_scan_area()
785 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); in add_scan_area()
786 dump_object_info(object); in add_scan_area()
795 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
797 spin_unlock_irqrestore(&object->lock, flags); in add_scan_area()
799 put_object(object); in add_scan_area()
803 * Any surplus references (object already gray) to 'ptr' are passed to
805 * vm_struct may be used as an alternative reference to the vmalloc'ed object
811 struct kmemleak_object *object; in object_set_excess_ref() local
813 object = find_and_get_object(ptr, 0); in object_set_excess_ref()
814 if (!object) { in object_set_excess_ref()
815 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n", in object_set_excess_ref()
820 spin_lock_irqsave(&object->lock, flags); in object_set_excess_ref()
821 object->excess_ref = excess_ref; in object_set_excess_ref()
822 spin_unlock_irqrestore(&object->lock, flags); in object_set_excess_ref()
823 put_object(object); in object_set_excess_ref()
827 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
828 * pointer. Such object will not be scanned by kmemleak but references to it
834 struct kmemleak_object *object; in object_no_scan() local
836 object = find_and_get_object(ptr, 0); in object_no_scan()
837 if (!object) { in object_no_scan()
838 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); in object_no_scan()
842 spin_lock_irqsave(&object->lock, flags); in object_no_scan()
843 object->flags |= OBJECT_NO_SCAN; in object_no_scan()
844 spin_unlock_irqrestore(&object->lock, flags); in object_no_scan()
845 put_object(object); in object_no_scan()
890 struct kmemleak_object *object; in early_alloc() local
898 * RCU locking needed to ensure object is not freed via put_object(). in early_alloc()
901 object = create_object((unsigned long)log->ptr, log->size, in early_alloc()
903 if (!object) in early_alloc()
905 spin_lock_irqsave(&object->lock, flags); in early_alloc()
907 object->trace[i] = log->trace[i]; in early_alloc()
908 object->trace_len = log->trace_len; in early_alloc()
909 spin_unlock_irqrestore(&object->lock, flags); in early_alloc()
929 * kmemleak_alloc - register a newly allocated object
930 * @ptr: pointer to beginning of the object
931 * @size: size of the object
932 * @min_count: minimum number of references to this object. If during memory
934 * the object is reported as a memory leak. If @min_count is 0,
935 * the object is never reported as a leak. If @min_count is -1,
936 * the object is ignored (not scanned and not reported as a leak)
939 * This function is called from the kernel allocators when a new object
955 * kmemleak_alloc_percpu - register a newly allocated __percpu object
956 * @ptr: __percpu pointer to beginning of the object
957 * @size: size of the object
960 * This function is called from the kernel percpu allocator when a new object
984 * kmemleak_vmalloc - register a newly vmalloc'ed object
986 * @size: size of the object
990 * object (memory block) is allocated.
1014 * kmemleak_free - unregister a previously registered object
1015 * @ptr: pointer to beginning of the object
1017 * This function is called from the kernel allocators when an object (memory
1032 * kmemleak_free_part - partially unregister a previously registered object
1033 * @ptr: pointer to the beginning or inside the object. This also
1052 * kmemleak_free_percpu - unregister a previously registered __percpu object
1053 * @ptr: __percpu pointer to beginning of the object
1055 * This function is called from the kernel percpu allocator when an object
1074 * kmemleak_update_trace - update object allocation stack trace
1075 * @ptr: pointer to beginning of the object
1077 * Override the object allocation stack trace for cases where the actual
1082 struct kmemleak_object *object; in kmemleak_update_trace() local
1090 object = find_and_get_object((unsigned long)ptr, 1); in kmemleak_update_trace()
1091 if (!object) { in kmemleak_update_trace()
1093 kmemleak_warn("Updating stack trace for unknown object at %p\n", in kmemleak_update_trace()
1099 spin_lock_irqsave(&object->lock, flags); in kmemleak_update_trace()
1100 object->trace_len = __save_stack_trace(object->trace); in kmemleak_update_trace()
1101 spin_unlock_irqrestore(&object->lock, flags); in kmemleak_update_trace()
1103 put_object(object); in kmemleak_update_trace()
1108 * kmemleak_not_leak - mark an allocated object as false positive
1109 * @ptr: pointer to beginning of the object
1111 * Calling this function on an object will cause the memory block to no longer
1126 * kmemleak_ignore - ignore an allocated object
1127 * @ptr: pointer to beginning of the object
1129 * Calling this function on an object will cause the memory block to be
1146 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1147 * @ptr: pointer to beginning or inside the object. This also
1152 * This function is used when it is known that only certain parts of an object
1168 * kmemleak_no_scan - do not scan an allocated object
1169 * @ptr: pointer to beginning of the object
1172 * in situations where it is known that the given object does not contain any
1190 * @phys: physical address of the object
1191 * @size: size of the object
1192 * @min_count: minimum number of references to this object.
1207 * @phys: physical address if the beginning or inside an object. This
1221 * @phys: physical address of the object
1233 * @phys: physical address of the object
1243 * Update an object's checksum and return true if it was modified.
1245 static bool update_checksum(struct kmemleak_object *object) in update_checksum() argument
1247 u32 old_csum = object->checksum; in update_checksum()
1250 object->checksum = crc32(0, (void *)object->pointer, object->size); in update_checksum()
1253 return object->checksum != old_csum; in update_checksum()
1257 * Update an object's references. object->lock must be held by the caller.
1259 static void update_refs(struct kmemleak_object *object) in update_refs() argument
1261 if (!color_white(object)) { in update_refs()
1267 * Increase the object's reference count (number of pointers to the in update_refs()
1269 * object's color will become gray and it will be added to the in update_refs()
1272 object->count++; in update_refs()
1273 if (color_gray(object)) { in update_refs()
1275 WARN_ON(!get_object(object)); in update_refs()
1276 list_add_tail(&object->gray_list, &gray_list); in update_refs()
1315 struct kmemleak_object *object; in scan_block() local
1331 * object->use_count cannot be dropped to 0 while the object in scan_block()
1335 object = lookup_object(pointer, 1); in scan_block()
1336 if (!object) in scan_block()
1338 if (object == scanned) in scan_block()
1343 * Avoid the lockdep recursive warning on object->lock being in scan_block()
1347 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in scan_block()
1348 /* only pass surplus references (object already gray) */ in scan_block()
1349 if (color_gray(object)) { in scan_block()
1350 excess_ref = object->excess_ref; in scan_block()
1351 /* no need for update_refs() if object already gray */ in scan_block()
1354 update_refs(object); in scan_block()
1356 spin_unlock(&object->lock); in scan_block()
1359 object = lookup_object(excess_ref, 0); in scan_block()
1360 if (!object) in scan_block()
1362 if (object == scanned) in scan_block()
1365 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); in scan_block()
1366 update_refs(object); in scan_block()
1367 spin_unlock(&object->lock); in scan_block()
1392 * that object->use_count >= 1.
1394 static void scan_object(struct kmemleak_object *object) in scan_object() argument
1400 * Once the object->lock is acquired, the corresponding memory block in scan_object()
1403 spin_lock_irqsave(&object->lock, flags); in scan_object()
1404 if (object->flags & OBJECT_NO_SCAN) in scan_object()
1406 if (!(object->flags & OBJECT_ALLOCATED)) in scan_object()
1407 /* already freed object */ in scan_object()
1409 if (hlist_empty(&object->area_list)) { in scan_object()
1410 void *start = (void *)object->pointer; in scan_object()
1411 void *end = (void *)(object->pointer + object->size); in scan_object()
1416 scan_block(start, next, object); in scan_object()
1422 spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1424 spin_lock_irqsave(&object->lock, flags); in scan_object()
1425 } while (object->flags & OBJECT_ALLOCATED); in scan_object()
1427 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
1430 object); in scan_object()
1432 spin_unlock_irqrestore(&object->lock, flags); in scan_object()
1441 struct kmemleak_object *object, *tmp; in scan_gray_list() local
1448 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list()
1449 while (&object->gray_list != &gray_list) { in scan_gray_list()
1454 scan_object(object); in scan_gray_list()
1456 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
1459 /* remove the object from the list and release it */ in scan_gray_list()
1460 list_del(&object->gray_list); in scan_gray_list()
1461 put_object(object); in scan_gray_list()
1463 object = tmp; in scan_gray_list()
1476 struct kmemleak_object *object; in kmemleak_scan() local
1484 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1485 spin_lock_irqsave(&object->lock, flags); in kmemleak_scan()
1489 * 1 reference to any object at this point. in kmemleak_scan()
1491 if (atomic_read(&object->use_count) > 1) { in kmemleak_scan()
1492 pr_debug("object->use_count = %d\n", in kmemleak_scan()
1493 atomic_read(&object->use_count)); in kmemleak_scan()
1494 dump_object_info(object); in kmemleak_scan()
1497 /* reset the reference count (whiten the object) */ in kmemleak_scan()
1498 object->count = 0; in kmemleak_scan()
1499 if (color_gray(object) && get_object(object)) in kmemleak_scan()
1500 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1502 spin_unlock_irqrestore(&object->lock, flags); in kmemleak_scan()
1566 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1567 spin_lock_irqsave(&object->lock, flags); in kmemleak_scan()
1568 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) in kmemleak_scan()
1569 && update_checksum(object) && get_object(object)) { in kmemleak_scan()
1571 object->count = object->min_count; in kmemleak_scan()
1572 list_add_tail(&object->gray_list, &gray_list); in kmemleak_scan()
1574 spin_unlock_irqrestore(&object->lock, flags); in kmemleak_scan()
1593 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_scan()
1594 spin_lock_irqsave(&object->lock, flags); in kmemleak_scan()
1595 if (unreferenced_object(object) && in kmemleak_scan()
1596 !(object->flags & OBJECT_REPORTED)) { in kmemleak_scan()
1597 object->flags |= OBJECT_REPORTED; in kmemleak_scan()
1600 spin_unlock_irqrestore(&object->lock, flags); in kmemleak_scan()
1678 * Iterate over the object_list and return the first valid object at or after
1684 struct kmemleak_object *object; in kmemleak_seq_start() local
1693 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_seq_start()
1696 if (get_object(object)) in kmemleak_seq_start()
1699 object = NULL; in kmemleak_seq_start()
1701 return object; in kmemleak_seq_start()
1705 * Return the next object in the object_list. The function decrements the
1706 * use_count of the previous object and increases that of the next one.
1728 * Decrement the use_count of the last object required, if any.
1745 * Print the information for an unreferenced object to the seq file.
1749 struct kmemleak_object *object = v; in kmemleak_seq_show() local
1752 spin_lock_irqsave(&object->lock, flags); in kmemleak_seq_show()
1753 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) in kmemleak_seq_show()
1754 print_unreferenced(seq, object); in kmemleak_seq_show()
1755 spin_unlock_irqrestore(&object->lock, flags); in kmemleak_seq_show()
1774 struct kmemleak_object *object; in dump_str_object_info() local
1779 object = find_and_get_object(addr, 0); in dump_str_object_info()
1780 if (!object) { in dump_str_object_info()
1781 pr_info("Unknown object at 0x%08lx\n", addr); in dump_str_object_info()
1785 spin_lock_irqsave(&object->lock, flags); in dump_str_object_info()
1786 dump_object_info(object); in dump_str_object_info()
1787 spin_unlock_irqrestore(&object->lock, flags); in dump_str_object_info()
1789 put_object(object); in dump_str_object_info()
1801 struct kmemleak_object *object; in kmemleak_clear() local
1805 list_for_each_entry_rcu(object, &object_list, object_list) { in kmemleak_clear()
1806 spin_lock_irqsave(&object->lock, flags); in kmemleak_clear()
1807 if ((object->flags & OBJECT_REPORTED) && in kmemleak_clear()
1808 unreferenced_object(object)) in kmemleak_clear()
1809 __paint_it(object, KMEMLEAK_GREY); in kmemleak_clear()
1810 spin_unlock_irqrestore(&object->lock, flags); in kmemleak_clear()
1833 * dump=... - dump information about the object found at the given address
1913 struct kmemleak_object *object; in __kmemleak_do_cleanup() local
1916 list_for_each_entry_rcu(object, &object_list, object_list) in __kmemleak_do_cleanup()
1917 delete_object_full(object->pointer); in __kmemleak_do_cleanup()
1933 * longer track object freeing. Ordering of the scan thread stopping and in kmemleak_do_cleanup()