Lines Matching defs:op

76 	struct instruction_op op;
96 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
97 int type = op.type & INSTR_TYPE_MASK;
98 int size = GETSIZE(op.type);
104 int instr_byte_swap = op.type & BYTEREV;
106 if (op.type & SIGNEXT)
108 op.reg, size, !instr_byte_swap);
111 op.reg, size, !instr_byte_swap);
113 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
114 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
123 if (op.type & FPCONV)
126 if (op.type & SIGNEXT)
128 KVM_MMIO_REG_FPR|op.reg, size, 1);
131 KVM_MMIO_REG_FPR|op.reg, size, 1);
133 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
134 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
168 KVM_MMIO_REG_VMX|op.reg,
173 KVM_MMIO_REG_VMX|op.reg,
182 if (op.vsx_flags & VSX_CHECK_VEC) {
190 if (op.vsx_flags & VSX_FPCONV)
193 if (op.element_size == 8) {
194 if (op.vsx_flags & VSX_SPLAT)
200 } else if (op.element_size == 4) {
201 if (op.vsx_flags & VSX_SPLAT)
210 if (size < op.element_size) {
216 size/op.element_size;
217 io_size_each = op.element_size;
221 KVM_MMIO_REG_VSX|op.reg, io_size_each,
222 1, op.type & SIGNEXT);
227 int instr_byte_swap = op.type & BYTEREV;
229 emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
232 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
233 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
250 if (op.type & FPCONV)
254 kvmppc_get_fpr(vcpu, op.reg), size, 1);
256 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
257 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
294 op.reg, 8, 1);
298 op.reg, size, 1);
307 if (op.vsx_flags & VSX_CHECK_VEC) {
319 if (op.vsx_flags & VSX_FPCONV)
322 if (op.element_size == 8)
325 else if (op.element_size == 4)
331 if (size < op.element_size) {
337 size/op.element_size;
338 io_size_each = op.element_size;
342 op.reg, io_size_each, 1);