Lines Matching defs:vcpu

177 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
185 set_bit(host_isa, vcpu->arch.isa);
189 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
204 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
207 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
212 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
217 reg_val = vcpu->arch.mvendorid;
220 reg_val = vcpu->arch.marchid;
223 reg_val = vcpu->arch.mimpid;
238 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
267 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
270 if (!vcpu->arch.ran_atleast_once) {
287 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
289 vcpu->arch.isa[0] = reg_val;
290 kvm_riscv_vcpu_fp_reset(vcpu);
296 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
302 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
308 if (reg_val == vcpu->arch.mvendorid)
310 if (!vcpu->arch.ran_atleast_once)
311 vcpu->arch.mvendorid = reg_val;
316 if (reg_val == vcpu->arch.marchid)
318 if (!vcpu->arch.ran_atleast_once)
319 vcpu->arch.marchid = reg_val;
324 if (reg_val == vcpu->arch.mimpid)
326 if (!vcpu->arch.ran_atleast_once)
327 vcpu->arch.mimpid = reg_val;
342 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
345 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
375 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
378 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
410 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
414 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
420 kvm_riscv_vcpu_flush_interrupts(vcpu);
429 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
433 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
446 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
451 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
455 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
465 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
469 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
479 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
497 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
500 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
505 rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
521 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
542 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
545 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
550 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
563 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
578 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
584 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
598 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
601 if (!vcpu->arch.ran_atleast_once) {
608 set_bit(host_isa_ext, vcpu->arch.isa);
611 clear_bit(host_isa_ext, vcpu->arch.isa);
614 kvm_riscv_vcpu_fp_reset(vcpu);
622 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
637 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
645 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
659 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
665 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
685 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
689 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
705 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
726 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
728 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
730 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
738 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
753 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
756 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
774 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
776 return copy_config_reg_indices(vcpu, NULL);
803 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
807 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
809 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
815 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
836 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
854 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
897 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
899 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
901 if (riscv_isa_extension_available(vcpu->arch.isa, f))
907 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
910 int n = num_fp_f_regs(vcpu);
926 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
928 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
930 if (riscv_isa_extension_available(vcpu->arch.isa, d))
936 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
940 int n = num_fp_d_regs(vcpu);
966 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
993 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
995 return copy_isa_ext_reg_indices(vcpu, NULL);
998 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1008 if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1023 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1025 return copy_sbi_ext_reg_indices(vcpu, NULL);
1028 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1030 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1055 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1057 return copy_sbi_reg_indices(vcpu, NULL);
1060 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1062 if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1069 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1072 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1073 int n = num_vector_regs(vcpu);
1114 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1118 res += num_config_regs(vcpu);
1120 res += num_csr_regs(vcpu);
1122 res += num_fp_f_regs(vcpu);
1123 res += num_fp_d_regs(vcpu);
1124 res += num_vector_regs(vcpu);
1125 res += num_isa_ext_regs(vcpu);
1126 res += num_sbi_ext_regs(vcpu);
1127 res += num_sbi_regs(vcpu);
1135 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1140 ret = copy_config_reg_indices(vcpu, uindices);
1150 ret = copy_csr_reg_indices(vcpu, uindices);
1160 ret = copy_fp_f_reg_indices(vcpu, uindices);
1165 ret = copy_fp_d_reg_indices(vcpu, uindices);
1170 ret = copy_vector_reg_indices(vcpu, uindices);
1175 ret = copy_isa_ext_reg_indices(vcpu, uindices);
1180 ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1185 ret = copy_sbi_reg_indices(vcpu, uindices);
1193 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1198 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1200 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1202 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1204 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1206 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1209 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1212 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1214 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1216 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1218 return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1226 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1231 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1233 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1235 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1237 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1239 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1242 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1245 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1247 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1249 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1251 return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);