Lines Matching refs:curr
1620 struct task_struct *curr = current; in print_circular_bug_header() local
1631 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1653 struct task_struct *curr = current; in print_circular_bug() local
1681 lockdep_print_held_locks(curr); in print_circular_bug()
2024 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
2045 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
2046 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, in print_bad_irq_dependency()
2047 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
2048 curr->hardirqs_enabled, in print_bad_irq_dependency()
2049 curr->softirqs_enabled); in print_bad_irq_dependency()
2078 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
2223 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
2292 print_bad_irq_dependency(curr, &this, &that, in check_irq_usage()
2315 static inline int check_irq_usage(struct task_struct *curr, in check_irq_usage() argument
2348 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, in print_deadlock_bug() argument
2360 curr->comm, task_pid_nr(curr)); in print_deadlock_bug()
2367 lockdep_print_held_locks(curr); in print_deadlock_bug()
2382 check_deadlock(struct task_struct *curr, struct held_lock *next) in check_deadlock() argument
2388 for (i = 0; i < curr->lockdep_depth; i++) { in check_deadlock()
2389 prev = curr->held_locks + i; in check_deadlock()
2411 print_deadlock_bug(curr, prev, next); in check_deadlock()
2440 check_prev_add(struct task_struct *curr, struct held_lock *prev, in check_prev_add() argument
2479 if (!check_irq_usage(curr, prev, next)) in check_prev_add()
2550 check_prevs_add(struct task_struct *curr, struct held_lock *next) in check_prevs_add() argument
2553 int depth = curr->lockdep_depth; in check_prevs_add()
2567 if (curr->held_locks[depth].irq_context != in check_prevs_add()
2568 curr->held_locks[depth-1].irq_context) in check_prevs_add()
2572 int distance = curr->lockdep_depth - depth + 1; in check_prevs_add()
2573 hlock = curr->held_locks + depth - 1; in check_prevs_add()
2580 int ret = check_prev_add(curr, hlock, next, distance, in check_prevs_add()
2604 if (curr->held_locks[depth].irq_context != in check_prevs_add()
2605 curr->held_locks[depth-1].irq_context) in check_prevs_add()
2636 static inline int get_first_held_lock(struct task_struct *curr, in get_first_held_lock() argument
2642 for (i = curr->lockdep_depth - 1; i >= 0; i--) { in get_first_held_lock()
2643 hlock_curr = curr->held_locks + i; in get_first_held_lock()
2667 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) in print_chain_keys_held_locks() argument
2671 int depth = curr->lockdep_depth; in print_chain_keys_held_locks()
2672 int i = get_first_held_lock(curr, hlock_next); in print_chain_keys_held_locks()
2677 hlock = curr->held_locks + i; in print_chain_keys_held_locks()
2703 static void print_collision(struct task_struct *curr, in print_collision() argument
2716 print_chain_keys_held_locks(curr, hlock_next); in print_collision()
2732 static int check_no_collision(struct task_struct *curr, in check_no_collision() argument
2739 i = get_first_held_lock(curr, hlock); in check_no_collision()
2741 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { in check_no_collision()
2742 print_collision(curr, hlock, chain); in check_no_collision()
2747 id = curr->held_locks[i].class_idx; in check_no_collision()
2750 print_collision(curr, hlock, chain); in check_no_collision()
2792 static inline int add_chain_cache(struct task_struct *curr, in add_chain_cache() argument
2820 i = get_first_held_lock(curr, hlock); in add_chain_cache()
2821 chain->depth = curr->lockdep_depth + 1 - i; in add_chain_cache()
2824 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); in add_chain_cache()
2830 int lock_id = curr->held_locks[i].class_idx; in add_chain_cache()
2875 static inline int lookup_chain_cache_add(struct task_struct *curr, in lookup_chain_cache_add() argument
2884 if (!check_no_collision(curr, hlock, chain)) in lookup_chain_cache_add()
2914 if (!add_chain_cache(curr, hlock, chain_key)) in lookup_chain_cache_add()
2920 static int validate_chain(struct task_struct *curr, in validate_chain() argument
2935 lookup_chain_cache_add(curr, hlock, chain_key)) { in validate_chain()
2954 int ret = check_deadlock(curr, hlock); in validate_chain()
2970 if (!check_prevs_add(curr, hlock)) in validate_chain()
2984 static inline int validate_chain(struct task_struct *curr, in validate_chain() argument
2996 static void check_chain_key(struct task_struct *curr) in check_chain_key() argument
3003 for (i = 0; i < curr->lockdep_depth; i++) { in check_chain_key()
3004 hlock = curr->held_locks + i; in check_chain_key()
3012 curr->lockdep_depth, i, in check_chain_key()
3031 if (chain_key != curr->curr_chain_key) { in check_chain_key()
3038 curr->lockdep_depth, i, in check_chain_key()
3040 (unsigned long long)curr->curr_chain_key); in check_chain_key()
3046 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3067 print_usage_bug(struct task_struct *curr, struct held_lock *this, in print_usage_bug() argument
3083 curr->comm, task_pid_nr(curr), in print_usage_bug()
3084 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, in print_usage_bug()
3085 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, in print_usage_bug()
3086 trace_hardirqs_enabled(curr), in print_usage_bug()
3087 trace_softirqs_enabled(curr)); in print_usage_bug()
3093 print_irqtrace_events(curr); in print_usage_bug()
3097 lockdep_print_held_locks(curr); in print_usage_bug()
3107 valid_state(struct task_struct *curr, struct held_lock *this, in valid_state() argument
3111 print_usage_bug(curr, this, bad_bit, new_bit); in valid_state()
3122 print_irq_inversion_bug(struct task_struct *curr, in print_irq_inversion_bug() argument
3140 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug()
3169 lockdep_print_held_locks(curr); in print_irq_inversion_bug()
3186 check_usage_forwards(struct task_struct *curr, struct held_lock *this, in check_usage_forwards() argument
3203 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_forwards()
3213 check_usage_backwards(struct task_struct *curr, struct held_lock *this, in check_usage_backwards() argument
3230 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_backwards()
3235 void print_irqtrace_events(struct task_struct *curr) in print_irqtrace_events() argument
3237 printk("irq event stamp: %u\n", curr->irq_events); in print_irqtrace_events()
3239 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, in print_irqtrace_events()
3240 (void *)curr->hardirq_enable_ip); in print_irqtrace_events()
3242 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, in print_irqtrace_events()
3243 (void *)curr->hardirq_disable_ip); in print_irqtrace_events()
3245 curr->softirq_enable_event, (void *)curr->softirq_enable_ip, in print_irqtrace_events()
3246 (void *)curr->softirq_enable_ip); in print_irqtrace_events()
3248 curr->softirq_disable_event, (void *)curr->softirq_disable_ip, in print_irqtrace_events()
3249 (void *)curr->softirq_disable_ip); in print_irqtrace_events()
3287 mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
3308 if (!valid_state(curr, this, new_bit, excl_bit)) in mark_lock_irq()
3316 !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK))) in mark_lock_irq()
3323 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK)) in mark_lock_irq()
3327 !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK, in mark_lock_irq()
3342 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) in mark_held_locks() argument
3347 for (i = 0; i < curr->lockdep_depth; i++) { in mark_held_locks()
3349 hlock = curr->held_locks + i; in mark_held_locks()
3359 if (!mark_lock(curr, hlock, hlock_bit)) in mark_held_locks()
3371 struct task_struct *curr = current; in __trace_hardirqs_on_caller() local
3374 curr->hardirqs_enabled = 1; in __trace_hardirqs_on_caller()
3380 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) in __trace_hardirqs_on_caller()
3387 if (curr->softirqs_enabled) in __trace_hardirqs_on_caller()
3388 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ)) in __trace_hardirqs_on_caller()
3391 curr->hardirq_enable_ip = ip; in __trace_hardirqs_on_caller()
3392 curr->hardirq_enable_event = ++curr->irq_events; in __trace_hardirqs_on_caller()
3443 struct task_struct *curr = current; in lockdep_hardirqs_off() local
3455 if (curr->hardirqs_enabled) { in lockdep_hardirqs_off()
3459 curr->hardirqs_enabled = 0; in lockdep_hardirqs_off()
3460 curr->hardirq_disable_ip = ip; in lockdep_hardirqs_off()
3461 curr->hardirq_disable_event = ++curr->irq_events; in lockdep_hardirqs_off()
3473 struct task_struct *curr = current; in trace_softirqs_on() local
3485 if (curr->softirqs_enabled) { in trace_softirqs_on()
3494 curr->softirqs_enabled = 1; in trace_softirqs_on()
3495 curr->softirq_enable_ip = ip; in trace_softirqs_on()
3496 curr->softirq_enable_event = ++curr->irq_events; in trace_softirqs_on()
3503 if (curr->hardirqs_enabled) in trace_softirqs_on()
3504 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); in trace_softirqs_on()
3513 struct task_struct *curr = current; in trace_softirqs_off() local
3524 if (curr->softirqs_enabled) { in trace_softirqs_off()
3528 curr->softirqs_enabled = 0; in trace_softirqs_off()
3529 curr->softirq_disable_ip = ip; in trace_softirqs_off()
3530 curr->softirq_disable_event = ++curr->irq_events; in trace_softirqs_off()
3541 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) in mark_usage() argument
3552 if (curr->hardirq_context) in mark_usage()
3553 if (!mark_lock(curr, hlock, in mark_usage()
3556 if (curr->softirq_context) in mark_usage()
3557 if (!mark_lock(curr, hlock, in mark_usage()
3561 if (curr->hardirq_context) in mark_usage()
3562 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) in mark_usage()
3564 if (curr->softirq_context) in mark_usage()
3565 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) in mark_usage()
3571 if (!mark_lock(curr, hlock, in mark_usage()
3574 if (curr->softirqs_enabled) in mark_usage()
3575 if (!mark_lock(curr, hlock, in mark_usage()
3579 if (!mark_lock(curr, hlock, in mark_usage()
3582 if (curr->softirqs_enabled) in mark_usage()
3583 if (!mark_lock(curr, hlock, in mark_usage()
3591 if (!mark_lock(curr, hlock, LOCK_USED)) in mark_usage()
3602 static int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
3605 unsigned int depth = curr->lockdep_depth; in separate_irq_context()
3613 prev_hlock = curr->held_locks + depth-1; in separate_irq_context()
3628 static int mark_lock(struct task_struct *curr, struct held_lock *this, in mark_lock() argument
3665 ret = mark_lock_irq(curr, this, new_bit); in mark_lock()
3678 print_irqtrace_events(curr); in mark_lock()
3688 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) in mark_usage() argument
3698 static inline int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
3770 print_lock_nested_lock_not_held(struct task_struct *curr, in print_lock_nested_lock_not_held() argument
3785 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_nested_lock_not_held()
3795 lockdep_print_held_locks(curr); in print_lock_nested_lock_not_held()
3816 struct task_struct *curr = current; in __lock_acquire() local
3856 depth = curr->lockdep_depth; in __lock_acquire()
3866 hlock = curr->held_locks + depth - 1; in __lock_acquire()
3884 hlock = curr->held_locks + depth; in __lock_acquire()
3895 hlock->irq_context = task_irq_context(curr); in __lock_acquire()
3908 if (!mark_usage(curr, hlock, check)) in __lock_acquire()
3927 chain_key = curr->curr_chain_key; in __lock_acquire()
3938 if (separate_irq_context(curr, hlock)) { in __lock_acquire()
3945 print_lock_nested_lock_not_held(curr, hlock, ip); in __lock_acquire()
3954 if (!validate_chain(curr, hlock, chain_head, chain_key)) in __lock_acquire()
3957 curr->curr_chain_key = chain_key; in __lock_acquire()
3958 curr->lockdep_depth++; in __lock_acquire()
3959 check_chain_key(curr); in __lock_acquire()
3964 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { in __lock_acquire()
3968 curr->lockdep_depth, MAX_LOCK_DEPTH); in __lock_acquire()
3977 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) in __lock_acquire()
3978 max_lockdep_depth = curr->lockdep_depth; in __lock_acquire()
3983 static void print_unlock_imbalance_bug(struct task_struct *curr, in print_unlock_imbalance_bug() argument
3998 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug()
4004 lockdep_print_held_locks(curr); in print_unlock_imbalance_bug()
4047 static struct held_lock *find_held_lock(struct task_struct *curr, in find_held_lock() argument
4055 hlock = curr->held_locks + i; in find_held_lock()
4082 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, in reacquire_held_locks() argument
4091 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { in reacquire_held_locks()
4119 struct task_struct *curr = current; in __lock_set_class() local
4128 depth = curr->lockdep_depth; in __lock_set_class()
4136 hlock = find_held_lock(curr, lock, depth, &i); in __lock_set_class()
4138 print_unlock_imbalance_bug(curr, lock, ip); in __lock_set_class()
4146 curr->lockdep_depth = i; in __lock_set_class()
4147 curr->curr_chain_key = hlock->prev_chain_key; in __lock_set_class()
4149 if (reacquire_held_locks(curr, depth, i, &merged)) in __lock_set_class()
4156 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) in __lock_set_class()
4163 struct task_struct *curr = current; in __lock_downgrade() local
4171 depth = curr->lockdep_depth; in __lock_downgrade()
4179 hlock = find_held_lock(curr, lock, depth, &i); in __lock_downgrade()
4181 print_unlock_imbalance_bug(curr, lock, ip); in __lock_downgrade()
4185 curr->lockdep_depth = i; in __lock_downgrade()
4186 curr->curr_chain_key = hlock->prev_chain_key; in __lock_downgrade()
4192 if (reacquire_held_locks(curr, depth, i, &merged)) in __lock_downgrade()
4203 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_downgrade()
4219 struct task_struct *curr = current; in __lock_release() local
4227 depth = curr->lockdep_depth; in __lock_release()
4233 print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
4241 hlock = find_held_lock(curr, lock, depth, &i); in __lock_release()
4243 print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
4270 curr->lockdep_depth = i; in __lock_release()
4271 curr->curr_chain_key = hlock->prev_chain_key; in __lock_release()
4280 if (reacquire_held_locks(curr, depth, i + 1, &merged)) in __lock_release()
4288 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); in __lock_release()
4301 struct task_struct *curr = current; in __lock_is_held() local
4304 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_is_held()
4305 struct held_lock *hlock = curr->held_locks + i; in __lock_is_held()
4321 struct task_struct *curr = current; in __lock_pin_lock() local
4327 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_pin_lock()
4328 struct held_lock *hlock = curr->held_locks + i; in __lock_pin_lock()
4348 struct task_struct *curr = current; in __lock_repin_lock() local
4354 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_repin_lock()
4355 struct held_lock *hlock = curr->held_locks + i; in __lock_repin_lock()
4368 struct task_struct *curr = current; in __lock_unpin_lock() local
4374 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_unpin_lock()
4375 struct held_lock *hlock = curr->held_locks + i; in __lock_unpin_lock()
4588 static void print_lock_contention_bug(struct task_struct *curr, in print_lock_contention_bug() argument
4603 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug()
4609 lockdep_print_held_locks(curr); in print_lock_contention_bug()
4618 struct task_struct *curr = current; in __lock_contended() local
4624 depth = curr->lockdep_depth; in __lock_contended()
4632 hlock = find_held_lock(curr, lock, depth, &i); in __lock_contended()
4634 print_lock_contention_bug(curr, lock, ip); in __lock_contended()
4659 struct task_struct *curr = current; in __lock_acquired() local
4666 depth = curr->lockdep_depth; in __lock_acquired()
4674 hlock = find_held_lock(curr, lock, depth, &i); in __lock_acquired()
4676 print_lock_contention_bug(curr, lock, _RET_IP_); in __lock_acquired()
5245 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, in print_freed_lock_bug() argument
5259 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug()
5261 lockdep_print_held_locks(curr); in print_freed_lock_bug()
5281 struct task_struct *curr = current; in debug_check_no_locks_freed() local
5290 for (i = 0; i < curr->lockdep_depth; i++) { in debug_check_no_locks_freed()
5291 hlock = curr->held_locks + i; in debug_check_no_locks_freed()
5297 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); in debug_check_no_locks_freed()
5372 struct task_struct *curr = current; in lockdep_sys_exit() local
5374 if (unlikely(curr->lockdep_depth)) { in lockdep_sys_exit()
5383 curr->comm, curr->pid); in lockdep_sys_exit()
5384 lockdep_print_held_locks(curr); in lockdep_sys_exit()
5396 struct task_struct *curr = current; in lockdep_rcu_suspicious() local
5435 lockdep_print_held_locks(curr); in lockdep_rcu_suspicious()