Lines Matching full:work
33 struct kvm_async_pf *work) in kvm_async_page_present_sync() argument
36 kvm_arch_async_page_present(vcpu, work); in kvm_async_page_present_sync()
40 struct kvm_async_pf *work) in kvm_async_page_present_async() argument
43 kvm_arch_async_page_present(vcpu, work); in kvm_async_page_present_async()
72 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument
75 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
85 * This work is run asynchromously to the task which owns in async_pf_execute()
120 /* cancel outstanding work queue item */ in kvm_clear_async_pf_completion_queue()
122 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local
124 typeof(*work), queue); in kvm_clear_async_pf_completion_queue()
125 list_del(&work->queue); in kvm_clear_async_pf_completion_queue()
131 if (!work->vcpu) in kvm_clear_async_pf_completion_queue()
136 flush_work(&work->work); in kvm_clear_async_pf_completion_queue()
138 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue()
139 mmput(work->mm); in kvm_clear_async_pf_completion_queue()
140 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ in kvm_clear_async_pf_completion_queue()
141 kmem_cache_free(async_pf_cache, work); in kvm_clear_async_pf_completion_queue()
148 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local
150 typeof(*work), link); in kvm_clear_async_pf_completion_queue()
151 list_del(&work->link); in kvm_clear_async_pf_completion_queue()
152 kmem_cache_free(async_pf_cache, work); in kvm_clear_async_pf_completion_queue()
161 struct kvm_async_pf *work; in kvm_check_async_pf_completion() local
166 work = list_first_entry(&vcpu->async_pf.done, typeof(*work), in kvm_check_async_pf_completion()
168 list_del(&work->link); in kvm_check_async_pf_completion()
171 kvm_arch_async_page_ready(vcpu, work); in kvm_check_async_pf_completion()
172 kvm_async_page_present_async(vcpu, work); in kvm_check_async_pf_completion()
174 list_del(&work->queue); in kvm_check_async_pf_completion()
176 kmem_cache_free(async_pf_cache, work); in kvm_check_async_pf_completion()
183 struct kvm_async_pf *work; in kvm_setup_async_pf() local
188 /* setup delayed work */ in kvm_setup_async_pf()
194 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); in kvm_setup_async_pf()
195 if (!work) in kvm_setup_async_pf()
198 work->wakeup_all = false; in kvm_setup_async_pf()
199 work->vcpu = vcpu; in kvm_setup_async_pf()
200 work->cr2_or_gpa = cr2_or_gpa; in kvm_setup_async_pf()
201 work->addr = hva; in kvm_setup_async_pf()
202 work->arch = *arch; in kvm_setup_async_pf()
203 work->mm = current->mm; in kvm_setup_async_pf()
204 mmget(work->mm); in kvm_setup_async_pf()
205 kvm_get_kvm(work->vcpu->kvm); in kvm_setup_async_pf()
209 if (unlikely(kvm_is_error_hva(work->addr))) in kvm_setup_async_pf()
212 INIT_WORK(&work->work, async_pf_execute); in kvm_setup_async_pf()
213 if (!schedule_work(&work->work)) in kvm_setup_async_pf()
216 list_add_tail(&work->queue, &vcpu->async_pf.queue); in kvm_setup_async_pf()
218 kvm_arch_async_page_not_present(vcpu, work); in kvm_setup_async_pf()
221 kvm_put_kvm(work->vcpu->kvm); in kvm_setup_async_pf()
222 mmput(work->mm); in kvm_setup_async_pf()
223 kmem_cache_free(async_pf_cache, work); in kvm_setup_async_pf()
229 struct kvm_async_pf *work; in kvm_async_pf_wakeup_all() local
234 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); in kvm_async_pf_wakeup_all()
235 if (!work) in kvm_async_pf_wakeup_all()
238 work->wakeup_all = true; in kvm_async_pf_wakeup_all()
239 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ in kvm_async_pf_wakeup_all()
242 list_add_tail(&work->link, &vcpu->async_pf.done); in kvm_async_pf_wakeup_all()