• Home
  • Raw
  • Download

Lines Matching refs:krcp

3129 	struct kfree_rcu_cpu *krcp;  member
3195 struct kfree_rcu_cpu *krcp; in krc_this_cpu_lock() local
3198 krcp = this_cpu_ptr(&krc); in krc_this_cpu_lock()
3199 raw_spin_lock(&krcp->lock); in krc_this_cpu_lock()
3201 return krcp; in krc_this_cpu_lock()
3205 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) in krc_this_cpu_unlock() argument
3207 raw_spin_unlock_irqrestore(&krcp->lock, flags); in krc_this_cpu_unlock()
3211 get_cached_bnode(struct kfree_rcu_cpu *krcp) in get_cached_bnode() argument
3213 if (!krcp->nr_bkv_objs) in get_cached_bnode()
3216 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); in get_cached_bnode()
3218 llist_del_first(&krcp->bkvcache); in get_cached_bnode()
3222 put_cached_bnode(struct kfree_rcu_cpu *krcp, in put_cached_bnode() argument
3226 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) in put_cached_bnode()
3229 llist_add((struct llist_node *) bnode, &krcp->bkvcache); in put_cached_bnode()
3230 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); in put_cached_bnode()
3235 drain_page_cache(struct kfree_rcu_cpu *krcp) in drain_page_cache() argument
3241 raw_spin_lock_irqsave(&krcp->lock, flags); in drain_page_cache()
3242 page_list = llist_del_all(&krcp->bkvcache); in drain_page_cache()
3243 WRITE_ONCE(krcp->nr_bkv_objs, 0); in drain_page_cache()
3244 raw_spin_unlock_irqrestore(&krcp->lock, flags); in drain_page_cache()
3263 struct kfree_rcu_cpu *krcp; in kfree_rcu_work() local
3269 krcp = krwp->krcp; in kfree_rcu_work()
3271 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3281 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3308 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3309 if (put_cached_bnode(krcp, bkvhead[i])) in kfree_rcu_work()
3311 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3345 need_offload_krc(struct kfree_rcu_cpu *krcp) in need_offload_krc() argument
3350 if (krcp->bkvhead[i]) in need_offload_krc()
3353 return !!krcp->head; in need_offload_krc()
3373 struct kfree_rcu_cpu *krcp = container_of(work, in kfree_rcu_monitor() local
3378 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_monitor()
3382 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); in kfree_rcu_monitor()
3390 if (need_offload_krc(krcp)) { in kfree_rcu_monitor()
3395 krwp->bkvhead_free[j] = krcp->bkvhead[j]; in kfree_rcu_monitor()
3396 krcp->bkvhead[j] = NULL; in kfree_rcu_monitor()
3403 krwp->head_free = krcp->head; in kfree_rcu_monitor()
3404 krcp->head = NULL; in kfree_rcu_monitor()
3407 WRITE_ONCE(krcp->count, 0); in kfree_rcu_monitor()
3423 if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) in kfree_rcu_monitor()
3424 krcp->monitor_todo = false; in kfree_rcu_monitor()
3426 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); in kfree_rcu_monitor()
3428 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_monitor()
3434 struct kfree_rcu_cpu *krcp = in schedule_page_work_fn() local
3437 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); in schedule_page_work_fn()
3444 struct kfree_rcu_cpu *krcp = in fill_page_cache_func() local
3452 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? in fill_page_cache_func()
3462 raw_spin_lock_irqsave(&krcp->lock, flags); in fill_page_cache_func()
3463 pushed = put_cached_bnode(krcp, bnode); in fill_page_cache_func()
3464 raw_spin_unlock_irqrestore(&krcp->lock, flags); in fill_page_cache_func()
3472 atomic_set(&krcp->work_in_progress, 0); in fill_page_cache_func()
3473 atomic_set(&krcp->backoff_page_cache_fill, 0); in fill_page_cache_func()
3477 run_page_cache_worker(struct kfree_rcu_cpu *krcp) in run_page_cache_worker() argument
3480 !atomic_xchg(&krcp->work_in_progress, 1)) { in run_page_cache_worker()
3481 if (atomic_read(&krcp->backoff_page_cache_fill)) { in run_page_cache_worker()
3483 &krcp->page_cache_work, in run_page_cache_worker()
3486 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in run_page_cache_worker()
3487 krcp->hrtimer.function = schedule_page_work_fn; in run_page_cache_worker()
3488 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); in run_page_cache_worker()
3500 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, in add_ptr_to_bulk_krc_lock() argument
3506 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3507 if (unlikely(!(*krcp)->initialized)) in add_ptr_to_bulk_krc_lock()
3513 if (!(*krcp)->bkvhead[idx] || in add_ptr_to_bulk_krc_lock()
3514 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { in add_ptr_to_bulk_krc_lock()
3515 bnode = get_cached_bnode(*krcp); in add_ptr_to_bulk_krc_lock()
3517 krc_this_cpu_unlock(*krcp, *flags); in add_ptr_to_bulk_krc_lock()
3532 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3540 bnode->next = (*krcp)->bkvhead[idx]; in add_ptr_to_bulk_krc_lock()
3543 (*krcp)->bkvhead[idx] = bnode; in add_ptr_to_bulk_krc_lock()
3547 (*krcp)->bkvhead[idx]->records in add_ptr_to_bulk_krc_lock()
3548 [(*krcp)->bkvhead[idx]->nr_records++] = ptr; in add_ptr_to_bulk_krc_lock()
3568 struct kfree_rcu_cpu *krcp; in kvfree_call_rcu() local
3597 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); in kvfree_call_rcu()
3599 run_page_cache_worker(krcp); in kvfree_call_rcu()
3606 head->next = krcp->head; in kvfree_call_rcu()
3607 krcp->head = head; in kvfree_call_rcu()
3611 WRITE_ONCE(krcp->count, krcp->count + 1); in kvfree_call_rcu()
3623 !krcp->monitor_todo) { in kvfree_call_rcu()
3624 krcp->monitor_todo = true; in kvfree_call_rcu()
3625 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); in kvfree_call_rcu()
3629 krc_this_cpu_unlock(krcp, flags); in kvfree_call_rcu()
3652 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_count() local
3654 count += READ_ONCE(krcp->count); in kfree_rcu_shrink_count()
3655 count += READ_ONCE(krcp->nr_bkv_objs); in kfree_rcu_shrink_count()
3656 atomic_set(&krcp->backoff_page_cache_fill, 1); in kfree_rcu_shrink_count()
3669 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_scan() local
3671 count = krcp->count; in kfree_rcu_shrink_scan()
3672 count += drain_page_cache(krcp); in kfree_rcu_shrink_scan()
3673 kfree_rcu_monitor(&krcp->monitor_work.work); in kfree_rcu_shrink_scan()
3698 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_scheduler_running() local
3700 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_scheduler_running()
3701 if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) || in kfree_rcu_scheduler_running()
3702 krcp->monitor_todo) { in kfree_rcu_scheduler_running()
3703 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_scheduler_running()
3706 krcp->monitor_todo = true; in kfree_rcu_scheduler_running()
3707 schedule_delayed_work_on(cpu, &krcp->monitor_work, in kfree_rcu_scheduler_running()
3709 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_scheduler_running()
4785 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_batch_init() local
4788 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); in kfree_rcu_batch_init()
4789 krcp->krw_arr[i].krcp = krcp; in kfree_rcu_batch_init()
4792 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); in kfree_rcu_batch_init()
4793 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); in kfree_rcu_batch_init()
4794 krcp->initialized = true; in kfree_rcu_batch_init()