Lines Matching refs:vc

163 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
170 vcpu = READ_ONCE(vc->runnable_threads[i]);
180 #define for_each_runnable_thread(i, vcpu, vc) \
181 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
267 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
281 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
293 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
299 spin_lock_irqsave(&vc->stoltb_lock, flags);
300 vc->preempt_tb = tb;
301 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
304 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
310 spin_lock_irqsave(&vc->stoltb_lock, flags);
311 if (vc->preempt_tb != TB_NIL) {
312 vc->stolen_tb += tb - vc->preempt_tb;
313 vc->preempt_tb = TB_NIL;
315 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
320 struct kvmppc_vcore *vc = vcpu->arch.vcore;
327 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt;
336 * We can test vc->runner without taking the vcore lock,
337 * because only this task ever sets vc->runner to this
341 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
342 kvmppc_core_end_stolen(vc, now);
355 struct kvmppc_vcore *vc = vcpu->arch.vcore;
377 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
378 kvmppc_core_start_stolen(vc, now);
415 struct kvmppc_vcore *vc = vcpu->arch.vcore;
470 spin_lock(&vc->lock);
471 vc->arch_compat = arch_compat;
477 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
478 spin_unlock(&vc->lock);
773 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
780 spin_lock_irqsave(&vc->stoltb_lock, flags);
781 p = vc->stolen_tb;
782 if (vc->vcore_state != VCORE_INACTIVE &&
783 vc->preempt_tb != TB_NIL)
784 p += now - vc->preempt_tb;
785 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
824 struct kvmppc_vcore *vc)
838 core_stolen = vcore_stolen_time(vc, now);
848 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen);
854 struct kvmppc_vcore *vc,
865 stolen = vc->stolen_tb;
871 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta);
880 struct kvmppc_vcore *vc;
892 vc = vcpu->arch.vcore;
893 thr = vcpu->vcpu_id - vc->first_vcpuid;
894 return !!(vc->dpdes & (1 << thr));
2201 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2204 spin_lock(&vc->lock);
2220 (vc->lpcr & ~mask) | (new_lpcr & mask));
2226 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
2231 if (vcpu->arch.vcore != vc)
2240 vc->lpcr = new_lpcr;
2243 spin_unlock(&vc->lock);
3173 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
3182 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
3187 --vc->n_runnable;
3188 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
3332 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
3337 cpu = vc->pcpu;
3344 vcpu->cpu = vc->pcpu;
3349 tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
3353 tpaca->kvm_hstate.kvm_vcore = vc;
3442 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
3448 vc->vcore_state = VCORE_PREEMPT;
3449 vc->pcpu = smp_processor_id();
3450 if (vc->num_threads < threads_per_vcore(vc->kvm)) {
3452 list_add_tail(&vc->preempt_list, &lp->list);
3457 kvmppc_core_start_stolen(vc, mftb());
3460 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
3466 kvmppc_core_end_stolen(vc, mftb());
3467 if (!list_empty(&vc->preempt_list)) {
3468 lp = &per_cpu(preempted_vcores, vc->pcpu);
3470 list_del_init(&vc->preempt_list);
3473 vc->vcore_state = VCORE_INACTIVE;
3485 struct kvmppc_vcore *vc[MAX_SUBCORES];
3494 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
3498 cip->max_subcore_threads = vc->num_threads;
3499 cip->total_threads = vc->num_threads;
3500 cip->subcore_threads[0] = vc->num_threads;
3501 cip->vc[0] = vc;
3528 static void init_vcore_to_run(struct kvmppc_vcore *vc)
3530 vc->entry_exit_map = 0;
3531 vc->in_guest = 0;
3532 vc->napping_threads = 0;
3533 vc->conferring_threads = 0;
3534 vc->tb_offset_applied = 0;
3537 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
3539 int n_threads = vc->num_threads;
3546 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
3557 cip->total_threads += vc->num_threads;
3558 cip->subcore_threads[sub] = vc->num_threads;
3559 cip->vc[sub] = vc;
3560 init_vcore_to_run(vc);
3561 list_del_init(&vc->preempt_list);
3579 static void prepare_threads(struct kvmppc_vcore *vc)
3584 for_each_runnable_thread(i, vcpu, vc) {
3593 kvmppc_remove_runnable(vc, vcpu, mftb());
3633 struct kvmppc_vcore *vc;
3636 vc = cip->vc[sub];
3637 if (!vc->kvm->arch.mmu_ready)
3639 for_each_runnable_thread(i, vcpu, vc)
3646 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
3653 spin_lock(&vc->lock);
3655 for_each_runnable_thread(i, vcpu, vc) {
3663 spin_unlock(&vc->lock);
3679 spin_lock(&vc->lock);
3688 kvmppc_remove_runnable(vc, vcpu, mftb());
3694 kvmppc_vcore_preempt(vc);
3695 } else if (vc->runner) {
3696 vc->vcore_state = VCORE_PREEMPT;
3697 kvmppc_core_start_stolen(vc, mftb());
3699 vc->vcore_state = VCORE_INACTIVE;
3701 if (vc->n_runnable > 0 && vc->runner == NULL) {
3704 vcpu = next_runnable_thread(vc, &i);
3708 spin_unlock(&vc->lock);
3773 * Called with vc->lock held.
3775 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3800 prepare_threads(vc);
3803 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3807 * Initialize *vc.
3809 init_vcore_to_run(vc);
3810 vc->preempt_tb = TB_NIL;
3817 controlled_threads = threads_per_vcore(vc->kvm);
3825 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
3826 for_each_runnable_thread(i, vcpu, vc) {
3828 kvmppc_remove_runnable(vc, vcpu, mftb());
3838 init_core_info(&core_info, vc);
3843 if (vc->num_threads < target_threads)
3858 vc->vcore_state = VCORE_INACTIVE;
3861 pvc = core_info.vc[sub];
3884 split_info.vc[sub] = core_info.vc[sub];
3938 int n_online = atomic_read(&vc->online_count);
3956 pvc = core_info.vc[sub];
4000 vc->vcore_state = VCORE_RUNNING;
4003 trace_kvmppc_run_core(vc, 0);
4006 spin_unlock(&core_info.vc[sub]->lock);
4010 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
4020 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
4024 spin_lock(&vc->lock);
4026 vc->vcore_state = VCORE_EXITING;
4076 spin_unlock(&vc->lock);
4084 pvc = core_info.vc[sub];
4085 post_guest_process(pvc, pvc == vc);
4088 spin_lock(&vc->lock);
4091 vc->vcore_state = VCORE_INACTIVE;
4092 trace_kvmppc_run_core(vc, 1);
4365 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
4372 spin_unlock(&vc->lock);
4374 spin_lock(&vc->lock);
4379 static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
4384 vc->halt_poll_ns *= halt_poll_ns_grow;
4385 if (vc->halt_poll_ns < halt_poll_ns_grow_start)
4386 vc->halt_poll_ns = halt_poll_ns_grow_start;
4389 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
4392 vc->halt_poll_ns = 0;
4394 vc->halt_poll_ns /= halt_poll_ns_shrink;
4432 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
4437 for_each_runnable_thread(i, vcpu, vc) {
4447 * or external interrupt to one of the vcpus. vc->lock is held.
4449 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
4459 if (vc->halt_poll_ns) {
4460 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
4461 ++vc->runner->stat.generic.halt_attempted_poll;
4463 vc->vcore_state = VCORE_POLLING;
4464 spin_unlock(&vc->lock);
4467 if (kvmppc_vcore_check_block(vc)) {
4474 spin_lock(&vc->lock);
4475 vc->vcore_state = VCORE_INACTIVE;
4478 ++vc->runner->stat.generic.halt_successful_poll;
4483 prepare_to_rcuwait(&vc->wait);
4485 if (kvmppc_vcore_check_block(vc)) {
4486 finish_rcuwait(&vc->wait);
4489 if (vc->halt_poll_ns)
4490 ++vc->runner->stat.generic.halt_successful_poll;
4496 vc->vcore_state = VCORE_SLEEPING;
4497 trace_kvmppc_vcore_blocked(vc->runner, 0);
4498 spin_unlock(&vc->lock);
4500 finish_rcuwait(&vc->wait);
4501 spin_lock(&vc->lock);
4502 vc->vcore_state = VCORE_INACTIVE;
4503 trace_kvmppc_vcore_blocked(vc->runner, 1);
4504 ++vc->runner->stat.halt_successful_wait;
4513 vc->runner->stat.generic.halt_wait_ns +=
4516 vc->runner->stat.generic.halt_wait_hist,
4519 if (vc->halt_poll_ns) {
4520 vc->runner->stat.generic.halt_poll_fail_ns +=
4524 vc->runner->stat.generic.halt_poll_fail_hist,
4530 if (vc->halt_poll_ns) {
4531 vc->runner->stat.generic.halt_poll_success_ns +=
4535 vc->runner->stat.generic.halt_poll_success_hist,
4542 if (block_ns <= vc->halt_poll_ns)
4545 else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
4546 shrink_halt_poll_ns(vc);
4548 else if (vc->halt_poll_ns < halt_poll_ns &&
4550 grow_halt_poll_ns(vc);
4551 if (vc->halt_poll_ns > halt_poll_ns)
4552 vc->halt_poll_ns = halt_poll_ns;
4554 vc->halt_poll_ns = 0;
4586 struct kvmppc_vcore *vc;
4599 vc = vcpu->arch.vcore;
4600 spin_lock(&vc->lock);
4603 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4606 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
4607 ++vc->n_runnable;
4615 if ((vc->vcore_state == VCORE_PIGGYBACK ||
4616 vc->vcore_state == VCORE_RUNNING) &&
4617 !VCORE_IS_EXITING(vc)) {
4618 kvmppc_update_vpa_dispatch(vcpu, vc);
4619 kvmppc_start_thread(vcpu, vc);
4621 } else if (vc->vcore_state == VCORE_SLEEPING) {
4622 rcuwait_wake_up(&vc->wait);
4631 spin_unlock(&vc->lock);
4633 spin_lock(&vc->lock);
4643 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4644 kvmppc_vcore_end_preempt(vc);
4646 if (vc->vcore_state != VCORE_INACTIVE) {
4647 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
4650 for_each_runnable_thread(i, v, vc) {
4653 kvmppc_remove_runnable(vc, v, mftb());
4660 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4663 for_each_runnable_thread(i, v, vc) {
4669 vc->runner = vcpu;
4670 if (n_ceded == vc->n_runnable) {
4671 kvmppc_vcore_blocked(vc);
4673 kvmppc_vcore_preempt(vc);
4675 cond_resched_lock(&vc->lock);
4676 if (vc->vcore_state == VCORE_PREEMPT)
4677 kvmppc_vcore_end_preempt(vc);
4679 kvmppc_run_core(vc);
4681 vc->runner = NULL;
4685 (vc->vcore_state == VCORE_RUNNING ||
4686 vc->vcore_state == VCORE_EXITING ||
4687 vc->vcore_state == VCORE_PIGGYBACK))
4688 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
4690 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4691 kvmppc_vcore_end_preempt(vc);
4694 kvmppc_remove_runnable(vc, vcpu, mftb());
4700 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
4703 v = next_runnable_thread(vc, &i);
4708 spin_unlock(&vc->lock);
4719 struct kvmppc_vcore *vc;
4731 vc = vcpu->arch.vcore;
4769 vc->pcpu = pcpu;
4812 kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu));
5257 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
5258 if (!vc)
5261 spin_lock(&vc->lock);
5262 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
5263 verify_lpcr(kvm, vc->lpcr);
5264 spin_unlock(&vc->lock);