Lines Matching refs:xive

6 #define pr_fmt(fmt) "xive-kvm: " fmt
22 #include <asm/xive.h>
23 #include <asm/xive-regs.h>
390 static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive,
423 sb = kvmppc_xive_find_source(xive, irq, &src);
459 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
500 * interrupt away, which flushes the xive, followed by the
505 xive_vm_scan_for_rerouted_irqs(xive, xc);
517 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
550 sb = kvmppc_xive_find_source(xive, irq, &src);
674 struct kvmppc_xive *xive = xc->xive;
676 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE)
939 struct kvmppc_xive *xive = xc->xive;
948 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
954 memset(qpage, 0, 1 << xive->q_order);
964 xive->q_order, true);
971 /* Called with xive->lock held */
974 struct kvmppc_xive *xive = kvm->arch.xive;
979 lockdep_assert_held(&xive->lock);
982 if (xive->qmap & (1 << prio))
992 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive))
994 kvmppc_xive_has_single_escalation(xive));
1001 xive->qmap |= (1 << prio);
1084 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
1143 static void xive_finish_unmask(struct kvmppc_xive *xive,
1187 struct kvmppc_xive *xive = kvm->arch.xive;
1224 kvmppc_xive_vp(xive, server),
1271 struct kvmppc_xive *xive = kvm->arch.xive;
1278 if (!xive)
1286 mutex_lock(&xive->lock);
1287 rc = xive_check_provisioning(xive->kvm,
1289 mutex_unlock(&xive->lock);
1296 sb = kvmppc_xive_find_source(xive, irq, &idx);
1316 xive_lock_and_mask(xive, sb, state);
1357 xive_finish_unmask(xive, sb, state, priority);
1372 struct kvmppc_xive *xive = kvm->arch.xive;
1377 if (!xive)
1380 sb = kvmppc_xive_find_source(xive, irq, &idx);
1394 struct kvmppc_xive *xive = kvm->arch.xive;
1399 if (!xive)
1402 sb = kvmppc_xive_find_source(xive, irq, &idx);
1425 xive_finish_unmask(xive, sb, state, state->saved_priority);
1433 struct kvmppc_xive *xive = kvm->arch.xive;
1438 if (!xive)
1441 sb = kvmppc_xive_find_source(xive, irq, &idx);
1451 state->saved_priority = xive_lock_and_mask(xive, sb, state);
1457 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
1463 sb = kvmppc_xive_find_source(xive, irq, &idx);
1495 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1499 if (!xc || !xive)
1539 * case, we keep that info and increment a counter in the xive to
1542 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
1544 xive->delayed_irqs++;
1554 struct kvmppc_xive *xive = kvm->arch.xive;
1564 if (!xive)
1570 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1599 prio = xive_lock_and_mask(xive, sb, state);
1610 if (xive->ops && xive->ops->reset_mapped)
1611 xive->ops->reset_mapped(kvm, guest_irq);
1624 kvmppc_xive_vp(xive, state->act_server),
1652 struct kvmppc_xive *xive = kvm->arch.xive;
1659 if (!xive)
1664 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1674 prio = xive_lock_and_mask(xive, sb, state);
1701 if (xive->ops && xive->ops->reset_mapped) {
1702 xive->ops->reset_mapped(kvm, guest_irq);
1707 kvmppc_xive_vp(xive, state->act_server),
1733 struct kvmppc_xive *xive = kvm->arch.xive;
1736 for (i = 0; i <= xive->max_sbid; i++) {
1737 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1807 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1828 if (kvmppc_xive_has_single_escalation(xc->xive))
1849 xive->q_page_order);
1867 static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1869 /* We have a block of xive->nr_servers VPs. We just need to check
1872 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1875 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1879 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1884 if (xive->vp_base == XIVE_INVALID_VP) {
1885 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1886 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1888 if (xive->vp_base == XIVE_INVALID_VP)
1892 vp_id = kvmppc_xive_vp(xive, cpu);
1893 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1906 struct kvmppc_xive *xive = dev->private;
1917 if (xive->kvm != vcpu->kvm)
1923 mutex_lock(&xive->lock);
1925 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1936 xc->xive = xive;
1960 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1974 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
1991 if (i == 7 && kvmppc_xive_has_single_escalation(xive))
1995 if (xive->qmap & (1 << i)) {
1997 if (r == 0 && !kvmppc_xive_has_single_escalation(xive))
1999 vcpu, i, kvmppc_xive_has_single_escalation(xive));
2014 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive));
2024 mutex_unlock(&xive->lock);
2037 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
2043 sb = kvmppc_xive_find_source(xive, irq, &idx);
2067 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
2077 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
2087 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
2105 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
2111 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
2120 xive_pre_save_set_queued(xive, irq);
2124 static void xive_pre_save_scan(struct kvmppc_xive *xive)
2134 for (i = 0; i <= xive->max_sbid; i++) {
2135 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2139 xive_pre_save_mask_irq(xive, sb, j);
2143 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
2149 xive_pre_save_queue(xive, &xc->queues[j]);
2154 for (i = 0; i <= xive->max_sbid; i++) {
2155 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2159 xive_pre_save_unmask_irq(xive, sb, j);
2163 static void xive_post_save_scan(struct kvmppc_xive *xive)
2168 for (i = 0; i <= xive->max_sbid; i++) {
2169 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2177 xive->saved_src_count = 0;
2183 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
2191 sb = kvmppc_xive_find_source(xive, irq, &idx);
2218 if (xive->saved_src_count == 0)
2219 xive_pre_save_scan(xive);
2220 xive->saved_src_count++;
2256 if (xive->saved_src_count == xive->src_count)
2257 xive_post_save_scan(xive);
2267 struct kvmppc_xive *xive, int irq)
2274 mutex_lock(&xive->lock);
2277 if (xive->src_blocks[bid])
2295 xive->src_blocks[bid] = sb;
2297 if (bid > xive->max_sbid)
2298 xive->max_sbid = bid;
2301 mutex_unlock(&xive->lock);
2302 return xive->src_blocks[bid];
2305 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
2307 struct kvm *kvm = xive->kvm;
2319 xive->delayed_irqs--;
2326 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
2343 sb = kvmppc_xive_find_source(xive, irq, &idx);
2346 sb = kvmppc_xive_create_src_block(xive, irq);
2388 xive_lock_and_mask(xive, sb, state);
2411 mutex_lock(&xive->lock);
2412 rc = xive_check_provisioning(xive->kvm, act_prio);
2413 mutex_unlock(&xive->lock);
2417 rc = xive_target_interrupt(xive->kvm, state,
2430 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
2477 xive_finish_unmask(xive, sb, state, guest_prio);
2483 xive->src_count++;
2492 struct kvmppc_xive *xive = kvm->arch.xive;
2497 if (!xive)
2500 sb = kvmppc_xive_find_source(xive, irq, &idx);
2526 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
2540 mutex_lock(&xive->lock);
2541 if (xive->vp_base != XIVE_INVALID_VP)
2554 xive->nr_servers = KVM_MAX_VCPUS;
2556 xive->nr_servers = nr_servers;
2558 mutex_unlock(&xive->lock);
2565 struct kvmppc_xive *xive = dev->private;
2570 return xive_set_source(xive, attr->attr, attr->addr);
2574 return kvmppc_xive_set_nr_servers(xive, attr->addr);
2582 struct kvmppc_xive *xive = dev->private;
2587 return xive_get_source(xive, attr->attr, attr->addr);
2643 struct kvmppc_xive *xive = dev->private;
2644 struct kvm *kvm = xive->kvm;
2648 pr_devel("Releasing xive device\n");
2659 debugfs_remove(xive->dentry);
2681 * against xive code getting called during vcpu execution or
2684 kvm->arch.xive = NULL;
2687 for (i = 0; i <= xive->max_sbid; i++) {
2688 if (xive->src_blocks[i])
2689 kvmppc_xive_free_sources(xive->src_blocks[i]);
2690 kfree(xive->src_blocks[i]);
2691 xive->src_blocks[i] = NULL;
2694 if (xive->vp_base != XIVE_INVALID_VP)
2695 xive_native_free_vp_block(xive->vp_base);
2721 struct kvmppc_xive *xive = *kvm_xive_device;
2723 if (!xive) {
2724 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2725 *kvm_xive_device = xive;
2727 memset(xive, 0, sizeof(*xive));
2730 return xive;
2738 struct kvmppc_xive *xive;
2741 pr_devel("Creating xive for partition\n");
2744 if (kvm->arch.xive)
2747 xive = kvmppc_xive_get_device(kvm, type);
2748 if (!xive)
2751 dev->private = xive;
2752 xive->dev = dev;
2753 xive->kvm = kvm;
2754 mutex_init(&xive->lock);
2757 xive->q_order = xive_native_default_eq_shift();
2758 if (xive->q_order < PAGE_SHIFT)
2759 xive->q_page_order = 0;
2761 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2764 xive->vp_base = XIVE_INVALID_VP;
2768 xive->nr_servers = KVM_MAX_VCPUS;
2771 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
2774 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
2776 kvm->arch.xive = xive;
2885 struct kvmppc_xive *xive = m->private;
2886 struct kvm *kvm = xive->kvm;
2941 for (i = 0; i <= xive->max_sbid; i++) {
2942 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2956 static void xive_debugfs_init(struct kvmppc_xive *xive)
2958 xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry,
2959 xive, &xive_debug_fops);
2966 struct kvmppc_xive *xive = dev->private;
2969 xive_debugfs_init(xive);
2973 .name = "kvm-xive",