Lines Matching defs:xc

39 static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc)
65 xc->pending |= 1 << cppr;
68 if (cppr >= xc->hw_cppr)
70 smp_processor_id(), cppr, xc->hw_cppr);
74 * xc->cppr, this will be done as we scan for interrupts
77 xc->hw_cppr = cppr;
133 static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc,
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
152 if (prio >= xc->cppr || prio > 7) {
153 if (xc->mfrr < xc->cppr) {
154 prio = xc->mfrr;
161 q = &xc->queues[prio];
196 xive_vm_source_eoi(xc->vp_ipi,
197 &xc->vp_ipi_data);
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
235 prio = xc->mfrr;
252 xc->pending = pending;
266 * Note: This can only make xc->cppr smaller as the previous
268 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
272 xc->cppr = prio;
279 if (xc->cppr != xc->hw_cppr) {
280 xc->hw_cppr = xc->cppr;
281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
295 xc->stat_vm_h_xirr++;
298 xive_vm_ack_pending(xc);
301 xc->pending, xc->hw_cppr, xc->cppr);
304 old_cppr = xive_prio_to_guest(xc->cppr);
307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch);
310 hirq, xc->hw_cppr, xc->cppr);
338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
339 u8 pending = xc->pending;
344 xc->stat_vm_h_ipoll++;
347 if (xc->server_num != server) {
351 xc = vcpu->arch.xive_vcpu;
364 hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
367 kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24));
372 static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc)
376 pending = xc->pending;
377 if (xc->mfrr != 0xff) {
378 if (xc->mfrr < 8)
379 pending |= 1 << xc->mfrr;
391 struct kvmppc_xive_vcpu *xc)
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
397 struct xive_q *q = &xc->queues[prio];
429 if (xc->server_num == state->act_server)
458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
464 xc->stat_vm_h_cppr++;
470 old_cppr = xc->cppr;
471 xc->cppr = cppr;
474 * Order the above update of xc->cppr with the subsequent
475 * read of xc->mfrr inside push_pending_to_hw()
486 xive_vm_push_pending_to_hw(xc);
505 xive_vm_scan_for_rerouted_irqs(xive, xc);
509 xc->hw_cppr = cppr;
520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
529 xc->stat_vm_h_eoi++;
531 xc->cppr = xive_prio_from_guest(new_cppr);
541 * This barrier orders the setting of xc->cppr vs.
542 * subsequent test of xc->mfrr done inside
566 * of xc->cppr vs. subsequent test of xc->mfrr done inside
609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi);
610 xive_vm_push_pending_to_hw(xc);
611 pr_devel(" after scan pending=%02x\n", xc->pending);
614 xc->hw_cppr = xc->cppr;
615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
627 xc->stat_vm_h_ipi++;
633 xc = vcpu->arch.xive_vcpu;
636 xc->mfrr = mfrr;
639 * The load of xc->cppr below and the subsequent MMIO store
644 * updating xc->cppr then reading xc->mfrr.
646 * - The target of the IPI sees the xc->mfrr update
651 if (mfrr < xc->cppr)
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data));
665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
668 return xc->vp_cam & TM_QW1W2_HO;
673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
674 struct kvmppc_xive *xive = xc->xive;
869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
870 struct xive_q *q = &xc->queues[prio];
875 if (xc->esc_virq[prio])
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
880 if (!xc->esc_virq[prio]) {
882 prio, xc->server_num);
888 vcpu->kvm->arch.lpid, xc->server_num);
891 vcpu->kvm->arch.lpid, xc->server_num, prio);
894 prio, xc->server_num);
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
905 prio, xc->server_num);
908 xc->esc_virq_names[prio] = name;
919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
930 irq_dispose_mapping(xc->esc_virq[prio]);
931 xc->esc_virq[prio] = 0;
938 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
939 struct kvmppc_xive *xive = xc->xive;
940 struct xive_q *q = &xc->queues[prio];
951 prio, xc->server_num);
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
967 prio, xc->server_num);
1008 struct kvmppc_xive_vcpu *xc;
1017 xc = vcpu->arch.xive_vcpu;
1018 if (WARN_ON(!xc))
1021 q = &xc->queues[prio];
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1031 if (WARN_ON(!xc))
1033 if (!xc->valid)
1036 q = &xc->queues[prio];
1481 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1483 if (!xc)
1487 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
1488 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
1494 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1499 if (!xc || !xive)
1509 xc->server_num, cppr, mfrr, xisr);
1521 xc->hw_cppr = xc->cppr = cppr;
1529 xc->mfrr = mfrr;
1531 xive_irq_trigger(&xc->vp_ipi_data);
1543 xc->delayed_irq = xisr;
1731 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1748 if (state->act_server != xc->server_num)
1806 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1813 if (!xc)
1816 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1819 xc->valid = false;
1823 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1827 if (xc->esc_virq[i]) {
1828 if (kvmppc_xive_has_single_escalation(xc->xive))
1829 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]);
1830 free_irq(xc->esc_virq[i], vcpu);
1831 irq_dispose_mapping(xc->esc_virq[i]);
1832 kfree(xc->esc_virq_names[i]);
1837 xive_native_disable_vp(xc->vp_id);
1844 struct xive_q *q = &xc->queues[i];
1846 xive_native_disable_queue(xc->vp_id, q, i);
1855 if (xc->vp_ipi) {
1856 xive_cleanup_irq_data(&xc->vp_ipi_data);
1857 xive_native_free_irq(xc->vp_ipi);
1860 kfree(xc);
1907 struct kvmppc_xive_vcpu *xc;
1929 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1930 if (!xc) {
1935 vcpu->arch.xive_vcpu = xc;
1936 xc->xive = xive;
1937 xc->vcpu = vcpu;
1938 xc->server_num = cpu;
1939 xc->vp_id = vp_id;
1940 xc->mfrr = 0xff;
1941 xc->valid = true;
1943 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1955 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1958 xc->vp_ipi = xive_native_alloc_irq();
1959 if (!xc->vp_ipi) {
1964 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1966 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1974 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
1988 struct xive_q *q = &xc->queues[i];
2003 r = xive_native_configure_queue(xc->vp_id,
2019 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
2021 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
2144 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2145 if (!xc)
2148 if (xc->queues[j].qpage)
2149 xive_pre_save_queue(xive, &xc->queues[j]);
2312 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2314 if (!xc)
2317 if (xc->delayed_irq == irq) {
2318 xc->delayed_irq = 0;
2810 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2814 struct xive_q *q = &xc->queues[i];
2817 if (!q->qpage && !xc->esc_virq[i])
2829 if (xc->esc_virq[i]) {
2830 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2836 xc->esc_virq[i],
2906 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2908 if (!xc)
2913 xc->server_num, xc->vp_id, xc->vp_chip_id,
2914 xc->cppr, xc->hw_cppr,
2915 xc->mfrr, xc->pending,
2916 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2920 t_rm_h_xirr += xc->stat_rm_h_xirr;
2921 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2922 t_rm_h_cppr += xc->stat_rm_h_cppr;
2923 t_rm_h_eoi += xc->stat_rm_h_eoi;
2924 t_rm_h_ipi += xc->stat_rm_h_ipi;
2925 t_vm_h_xirr += xc->stat_vm_h_xirr;
2926 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2927 t_vm_h_cppr += xc->stat_vm_h_cppr;
2928 t_vm_h_eoi += xc->stat_vm_h_eoi;
2929 t_vm_h_ipi += xc->stat_vm_h_ipi;