Lines Matching refs:vcore

140  * online threads in the vcore being run.
179 /* Used to traverse the list of runnable threads for a given vcore */
257 * run as part of a virtual core, but the task running the vcore
262 * of running the core, and the other vcpu tasks in the vcore will
267 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
270 * stolen time for a vcore when it is inactive, or for a vcpu
281 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
320 struct kvmppc_vcore *vc = vcpu->arch.vcore;
336 * We can test vc->runner without taking the vcore lock,
355 struct kvmppc_vcore *vc = vcpu->arch.vcore;
415 struct kvmppc_vcore *vc = vcpu->arch.vcore;
512 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
770 * Return the accumulated stolen time for the vcore up until `now'.
771 * The caller should hold the vcore lock.
887 * Ensure that the read of vcore->dpdes comes after the read
892 vc = vcpu->arch.vcore;
1042 struct kvmppc_vcore *vcore = target->arch.vcore;
1047 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
1051 * In the case of the P9 single vcpu per vcore case, the real
1053 * source vcore.
1056 spin_lock(&vcore->lock);
1058 vcore->vcore_state != VCORE_INACTIVE &&
1059 vcore->runner)
1060 target = vcore->runner;
1061 spin_unlock(&vcore->lock);
1515 * which will update its vcore->dpdes value.
1567 vcpu->arch.vcore->dpdes = 0;
2201 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2224 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
2231 if (vcpu->arch.vcore != vc)
2329 * either vcore->dpdes or doorbell_request.
2335 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
2578 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
2754 atomic_inc(&vcpu->arch.vcore->online_count);
2756 atomic_dec(&vcpu->arch.vcore->online_count);
2789 struct kvmppc_vcore *vcore;
2791 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
2793 if (vcore == NULL)
2796 spin_lock_init(&vcore->lock);
2797 spin_lock_init(&vcore->stoltb_lock);
2798 rcuwait_init(&vcore->wait);
2799 vcore->preempt_tb = TB_NIL;
2800 vcore->lpcr = kvm->arch.lpcr;
2801 vcore->first_vcpuid = id;
2802 vcore->kvm = kvm;
2803 INIT_LIST_HEAD(&vcore->preempt_list);
2805 return vcore;
2963 struct kvmppc_vcore *vcore;
3043 vcore = NULL;
3057 vcore = kvm->arch.vcores[core];
3058 if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
3060 vcore = NULL;
3061 } else if (!vcore) {
3067 vcore = kvmppc_vcore_create(kvm,
3070 kvm->arch.vcores[core] = vcore;
3077 if (!vcore)
3080 spin_lock(&vcore->lock);
3081 ++vcore->num_threads;
3082 spin_unlock(&vcore->lock);
3083 vcpu->arch.vcore = vcore;
3084 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
3107 * so we pack smt_mode vcpus per vcore.
3114 * so each vcpu gets its own vcore.
3368 * We set the vcore pointer when starting a thread
3370 * for any threads that still have a non-NULL vcore ptr.
3568 * vcore *pvc onto the execution of the other vcores described in *cip.
3657 * It's safe to unlock the vcore in the loop here, because
3659 * the vcpu, and the vcore state is VCORE_EXITING here,
3859 /* Unlock all except the primary vcore */
3942 * or if the vcore's online count looks bogus.
3984 * the vcore pointer in the PACA of the secondaries.
4429 * Check to see if any of the runnable vcpus on the vcore have pending
4446 * All the vcpus in this vcore are idle, so wait for a decrementer
4599 vc = vcpu->arch.vcore;
4611 * If the vcore is already running, we may be able to start
4731 vc = vcpu->arch.vcore;
4959 atomic_inc(&vcpu->arch.vcore->online_count);
4988 vcpu->arch.waitp = &vcpu->arch.vcore->wait;
4996 vcpu->arch.vcore->lpcr);
5691 * all vCPUs in a vcore have to run on the same (sub)core,