Lines Matching refs:vpe

266 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
268 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
269 return vpe->col_idx;
272 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
274 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
281 struct its_vpe *vpe = NULL;
285 vpe = irq_data_get_irq_chip_data(d);
289 vpe = map->vpe;
292 if (vpe) {
293 cpu = vpe_to_cpuid_lock(vpe, flags);
307 struct its_vpe *vpe = NULL;
310 vpe = irq_data_get_irq_chip_data(d);
314 vpe = map->vpe;
317 if (vpe)
318 vpe_to_cpuid_unlock(vpe, flags);
329 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
331 if (valid_col(its->collections + vpe->col_idx))
332 return vpe;
390 struct its_vpe *vpe;
394 struct its_vpe *vpe;
400 struct its_vpe *vpe;
408 struct its_vpe *vpe;
415 struct its_vpe *vpe;
422 struct its_vpe *vpe;
426 struct its_vpe *vpe;
778 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
789 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
795 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
800 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
806 vpe = NULL;
812 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
822 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
824 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
836 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
841 return vpe;
851 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
857 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
864 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
874 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
880 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
887 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
900 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
905 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
910 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
928 return valid_vpe(its, map->vpe);
946 return valid_vpe(its, map->vpe);
964 return valid_vpe(its, map->vpe);
975 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
979 return valid_vpe(its, desc->its_invdb_cmd.vpe);
990 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
999 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1282 desc.its_vmapti_cmd.vpe = map->vpe;
1296 desc.its_vmovi_cmd.vpe = map->vpe;
1305 struct its_vpe *vpe, bool valid)
1309 desc.its_vmapp_cmd.vpe = vpe;
1311 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1316 static void its_send_vmovp(struct its_vpe *vpe)
1321 int col_id = vpe->col_idx;
1323 desc.its_vmovp_cmd.vpe = vpe;
1343 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1350 if (!require_its_list_vmovp(vpe->its_vm, its))
1360 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1364 desc.its_vinvall_cmd.vpe = vpe;
1410 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1414 desc.its_invdb_cmd.vpe = vpe;
1490 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1811 struct its_vpe *vpe = vm->vpes[i];
1812 struct irq_data *d = irq_get_irq_data(vpe->irq);
1815 vpe->col_idx = cpumask_first(cpu_online_mask);
1816 its_send_vmapp(its, vpe, true);
1817 its_send_vinvall(its, vpe);
1818 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3736 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3743 if (vpe->vpe_proxy_event == -1)
3746 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3747 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3757 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3759 vpe->vpe_proxy_event = -1;
3762 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3772 its_vpe_db_proxy_unmap_locked(vpe);
3777 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3784 if (vpe->vpe_proxy_event != -1)
3792 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3793 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3796 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3797 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3800 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3813 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3821 its_vpe_db_proxy_map_locked(vpe);
3824 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3825 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3834 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3848 * protect us, and that we must ensure nobody samples vpe->col_idx
3850 * taken on any vLPI handling path that evaluates vpe->col_idx.
3852 from = vpe_to_cpuid_lock(vpe, &flags);
3872 vpe->col_idx = cpu;
3874 its_send_vmovp(vpe);
3875 its_vpe_db_proxy_move(vpe, from, cpu);
3879 vpe_to_cpuid_unlock(vpe, flags);
3898 static void its_vpe_schedule(struct its_vpe *vpe)
3904 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3913 val = virt_to_phys(page_address(vpe->vpt_page)) &
3922 * easily. So in the end, vpe->pending_last is only an
3929 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3934 static void its_vpe_deschedule(struct its_vpe *vpe)
3941 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3942 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3945 static void its_vpe_invall(struct its_vpe *vpe)
3953 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3960 its_send_vinvall(its, vpe);
3967 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3972 its_vpe_schedule(vpe);
3976 its_vpe_deschedule(vpe);
3984 its_vpe_invall(vpe);
3992 static void its_vpe_send_cmd(struct its_vpe *vpe,
3999 its_vpe_db_proxy_map_locked(vpe);
4000 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
4007 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4012 its_vpe_send_cmd(vpe, its_send_inv);
4038 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4046 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4048 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4050 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4055 its_vpe_send_cmd(vpe, its_send_int);
4057 its_vpe_send_cmd(vpe, its_send_clear);
4069 .name = "GICv4-vpe",
4098 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4108 its_send_invdb(its, vpe);
4123 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4133 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4138 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4157 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4161 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4162 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4171 vpe->pending_last = true;
4175 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4183 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4186 cpu = vpe_to_cpuid_lock(vpe, &flags);
4193 vpe_to_cpuid_unlock(vpe, flags);
4198 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4203 its_vpe_4_1_schedule(vpe, info);
4207 its_vpe_4_1_deschedule(vpe, info);
4215 its_vpe_4_1_invall(vpe);
4224 .name = "GICv4.1-vpe",
4234 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4237 desc.its_vsgi_cmd.vpe = vpe;
4239 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4240 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4241 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4254 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4256 vpe->sgi_config[d->hwirq].enabled = false;
4262 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4264 vpe->sgi_config[d->hwirq].enabled = true;
4289 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4293 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4306 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4326 cpu = vpe_to_cpuid_lock(vpe, &flags);
4329 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4346 vpe_to_cpuid_unlock(vpe, flags);
4358 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4363 vpe->sgi_config[d->hwirq].priority = info->priority;
4364 vpe->sgi_config[d->hwirq].group = info->group;
4387 struct its_vpe *vpe = args;
4394 vpe->sgi_config[i].priority = 0;
4395 vpe->sgi_config[i].enabled = false;
4396 vpe->sgi_config[i].group = false;
4399 &its_sgi_irq_chip, vpe);
4424 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4436 vpe->sgi_config[d->hwirq].enabled = false;
4458 static int its_vpe_init(struct its_vpe *vpe)
4481 raw_spin_lock_init(&vpe->vpe_lock);
4482 vpe->vpe_id = vpe_id;
4483 vpe->vpt_page = vpt_page;
4485 atomic_set(&vpe->vmapp_count, 0);
4487 vpe->vpe_proxy_event = -1;
4492 static void its_vpe_teardown(struct its_vpe *vpe)
4494 its_vpe_db_proxy_unmap(vpe);
4495 its_vpe_id_free(vpe->vpe_id);
4496 its_free_pending_table(vpe->vpt_page);
4511 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4513 BUG_ON(vm != vpe->its_vm);
4516 its_vpe_teardown(vpe);
4582 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4594 vpe->col_idx = cpumask_first(cpu_online_mask);
4600 its_send_vmapp(its, vpe, true);
4601 its_send_vinvall(its, vpe);
4604 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4612 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4626 its_send_vmapp(its, vpe, false);
4634 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4635 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),