Lines Matching refs:work

45 static void async_pf_execute(struct work_struct *work)
48 container_of(work, struct kvm_async_pf, work);
63 * work item is fully processed.
100 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
103 * The async #PF is "done", but KVM must wait for the work item itself,
106 * after the last call to module_put(). Note, flushing the work item
112 * need to be flushed (but sanity check that the work wasn't queued).
114 if (work->wakeup_all)
115 WARN_ON_ONCE(work->work.func);
117 flush_work(&work->work);
118 kmem_cache_free(async_pf_cache, work);
125 /* cancel outstanding work queue item */
127 struct kvm_async_pf *work =
129 typeof(*work), queue);
130 list_del(&work->queue);
136 if (!work->vcpu)
141 flush_work(&work->work);
143 if (cancel_work_sync(&work->work))
144 kmem_cache_free(async_pf_cache, work);
150 struct kvm_async_pf *work =
152 typeof(*work), link);
153 list_del(&work->link);
156 kvm_flush_and_free_async_pf_work(work);
166 struct kvm_async_pf *work;
171 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
173 list_del(&work->link);
176 kvm_arch_async_page_ready(vcpu, work);
178 kvm_arch_async_page_present(vcpu, work);
180 list_del(&work->queue);
182 kvm_flush_and_free_async_pf_work(work);
193 struct kvm_async_pf *work;
206 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
207 if (!work)
210 work->wakeup_all = false;
211 work->vcpu = vcpu;
212 work->cr2_or_gpa = cr2_or_gpa;
213 work->addr = hva;
214 work->arch = *arch;
216 INIT_WORK(&work->work, async_pf_execute);
218 list_add_tail(&work->queue, &vcpu->async_pf.queue);
220 work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
222 schedule_work(&work->work);
229 struct kvm_async_pf *work;
235 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
236 if (!work)
239 work->wakeup_all = true;
240 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
244 list_add_tail(&work->link, &vcpu->async_pf.done);