Lines Matching defs:vm

2873   unsigned vm = INSTR (20, 16);
2884 unsigned int selector = aarch64_get_vec_u8 (cpu, vm, i);
2919 unsigned vm = INSTR (20, 16);
2935 aarch64_get_vec_u8 (cpu, second ? vm : vn, i * 2));
2938 aarch64_get_vec_u8 (cpu, second ? vn : vm, i * 2 + 1));
2947 aarch64_get_vec_u16 (cpu, second ? vm : vn, i * 2));
2950 aarch64_get_vec_u16 (cpu, second ? vn : vm, i * 2 + 1));
2956 (cpu, vd, 0, aarch64_get_vec_u32 (cpu, second ? vm : vn, 0));
2958 (cpu, vd, 1, aarch64_get_vec_u32 (cpu, second ? vn : vm, 1));
2960 (cpu, vd, 2, aarch64_get_vec_u32 (cpu, second ? vm : vn, 2));
2962 (cpu, vd, 3, aarch64_get_vec_u32 (cpu, second ? vn : vm, 3));
2970 aarch64_get_vec_u64 (cpu, second ? vm : vn, 0));
2972 aarch64_get_vec_u64 (cpu, second ? vn : vm, 1));
3046 unsigned vm = INSTR (20, 16);
3050 uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0);
3051 uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1);
3153 unsigned vm = INSTR (20, 16);
3157 uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0);
3158 uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1);
3527 unsigned vm = INSTR (9, 5);
3542 val += aarch64_get_vec_u8 (cpu, vm, i);
3551 val += aarch64_get_vec_u16 (cpu, vm, i);
3562 val += aarch64_get_vec_u32 (cpu, vm, i);
3584 unsigned vm = INSTR (9, 5);
3599 aarch64_get_vec_u32 (cpu, vm, elem));
3609 aarch64_get_vec_u64 (cpu, vm, elem));
3619 aarch64_get_reg_u32 (cpu, vm, NO_SP));
3629 aarch64_get_reg_u64 (cpu, vm, NO_SP));
3642 b[i] = aarch64_get_vec_##READ_TYPE (cpu, vm, i + bias); \
3665 unsigned vm = INSTR (20, 16);
3724 unsigned vm = INSTR (20, 16);
3745 - aarch64_get_vec_double (cpu, vm, i));
3752 - aarch64_get_vec_float (cpu, vm, i));
3764 aarch64_get_vec_double (cpu, vm, i)
3771 aarch64_get_vec_float (cpu, vm, i)
3790 unsigned vm = INSTR (20, 16);
3806 + aarch64_get_vec_u8 (cpu, vm, i));
3812 + aarch64_get_vec_u16 (cpu, vm, i));
3818 + aarch64_get_vec_u32 (cpu, vm, i));
3825 + aarch64_get_vec_u64 (cpu, vm, 0));
3828 + aarch64_get_vec_u64 (cpu, vm, 1));
3846 unsigned vm = INSTR (20, 16);
3890 unsigned vm = INSTR (20, 16);
3908 * aarch64_get_vec_u8 (cpu, vm, i)));
3916 * aarch64_get_vec_u16 (cpu, vm, i)));
3924 * aarch64_get_vec_u32 (cpu, vm, i)));
4002 unsigned vm = INSTR (20, 16);
4023 fn (aarch64_get_vec_double (cpu, vm, 0),
4024 aarch64_get_vec_double (cpu, vm, 1)));
4040 fn (aarch64_get_vec_float (cpu, vm, 0),
4041 aarch64_get_vec_float (cpu, vm, 1)));
4044 fn (aarch64_get_vec_float (cpu, vm, 2),
4045 aarch64_get_vec_float (cpu, vm, 3)));
4060 unsigned vm = INSTR (20, 16);
4073 & aarch64_get_vec_u32 (cpu, vm, i));
4087 unsigned vm = INSTR (20, 16);
4102 & aarch64_get_vec_u8 (cpu, vm, i)));
4116 unsigned vm = INSTR (20, 16);
4129 ^ aarch64_get_vec_u32 (cpu, vm, i));
4145 unsigned vm = INSTR (20, 16);
4161 uint32_t vm_val = aarch64_get_vec_u32 (cpu, vm, i);
4182 unsigned vm = INSTR (20, 16);
4195 | ~ aarch64_get_vec_u8 (cpu, vm, i));
4209 unsigned vm = INSTR (20, 16);
4222 | aarch64_get_vec_u8 (cpu, vm, i));
4236 unsigned vm = INSTR (20, 16);
4249 & ~ aarch64_get_vec_u8 (cpu, vm, i));
4562 unsigned vm = INSTR (20, 16);
4592 aarch64_get_vec_double (cpu, vm, i)));
4608 aarch64_get_vec_float (cpu, vm, i)));
4664 aarch64_get_vec_##SOURCE##8 (cpu, vm, i) \
4672 aarch64_get_vec_##SOURCE##16 (cpu, vm, i) \
4680 aarch64_get_vec_##SOURCE##32 (cpu, vm, i) \
4690 aarch64_get_vec_##SOURCE##64 (cpu, vm, i) \
4735 if (vm != 0) \
4768 aarch64_get_vec_double (cpu, vm, i) \
4777 aarch64_get_vec_float (cpu, vm, i) \
4801 unsigned vm = INSTR (20, 16);
4816 if (vm != 0)
4873 if (vm == 0)
4894 unsigned vm = INSTR (20, 16);
4912 shift = aarch64_get_vec_s8 (cpu, vm, i);
4925 shift = aarch64_get_vec_s8 (cpu, vm, i * 2);
4938 shift = aarch64_get_vec_s8 (cpu, vm, i * 4);
4953 shift = aarch64_get_vec_s8 (cpu, vm, i * 8);
4979 unsigned vm = INSTR (20, 16);
4994 shift = aarch64_get_vec_s8 (cpu, vm, i);
5007 shift = aarch64_get_vec_s8 (cpu, vm, i * 2);
5020 shift = aarch64_get_vec_s8 (cpu, vm, i * 4);
5035 shift = aarch64_get_vec_s8 (cpu, vm, i * 8);
5060 unsigned vm = INSTR (20, 16);
5078 aarch64_get_vec_double (cpu, vm, i) +
5086 aarch64_get_vec_float (cpu, vm, i) +
5105 unsigned vm = INSTR (20, 16);
5124 > aarch64_get_vec_u8 (cpu, vm, i)
5126 : aarch64_get_vec_u8 (cpu, vm, i));
5133 > aarch64_get_vec_u16 (cpu, vm, i)
5135 : aarch64_get_vec_u16 (cpu, vm, i));
5142 > aarch64_get_vec_u32 (cpu, vm, i)
5144 : aarch64_get_vec_u32 (cpu, vm, i));
5159 > aarch64_get_vec_s8 (cpu, vm, i)
5161 : aarch64_get_vec_s8 (cpu, vm, i));
5168 > aarch64_get_vec_s16 (cpu, vm, i)
5170 : aarch64_get_vec_s16 (cpu, vm, i));
5177 > aarch64_get_vec_s32 (cpu, vm, i)
5179 : aarch64_get_vec_s32 (cpu, vm, i));
5202 unsigned vm = INSTR (20, 16);
5221 < aarch64_get_vec_u8 (cpu, vm, i)
5223 : aarch64_get_vec_u8 (cpu, vm, i));
5230 < aarch64_get_vec_u16 (cpu, vm, i)
5232 : aarch64_get_vec_u16 (cpu, vm, i));
5239 < aarch64_get_vec_u32 (cpu, vm, i)
5241 : aarch64_get_vec_u32 (cpu, vm, i));
5256 < aarch64_get_vec_s8 (cpu, vm, i)
5258 : aarch64_get_vec_s8 (cpu, vm, i));
5265 < aarch64_get_vec_s16 (cpu, vm, i)
5267 : aarch64_get_vec_s16 (cpu, vm, i));
5274 < aarch64_get_vec_s32 (cpu, vm, i)
5276 : aarch64_get_vec_s32 (cpu, vm, i));
5300 unsigned vm = INSTR (20, 16);
5326 - aarch64_get_vec_s8 (cpu, vm, i + bias));
5334 - aarch64_get_vec_s16 (cpu, vm, i + bias));
5341 - aarch64_get_vec_s32 (cpu, vm, i + bias));
5359 - aarch64_get_vec_u8 (cpu, vm, i + bias));
5367 - aarch64_get_vec_u16 (cpu, vm, i + bias));
5374 - aarch64_get_vec_u32 (cpu, vm, i + bias));
5401 unsigned vm = INSTR (20, 16);
5410 /* Make copies of the source registers in case vd == vn/vm. */
5412 copy_vm = cpu->fr[vm];
6092 unsigned vm;
6110 vm = INSTR (19, 16);
6111 element2 = aarch64_get_vec_u16 (cpu, vm, index);
6130 vm = INSTR (20, 16);
6131 element2 = aarch64_get_vec_u32 (cpu, vm, index);
6165 unsigned vm = INSTR (20, 16);
6183 element2 = aarch64_get_vec_double (cpu, vm, H);
6196 float element2 = aarch64_get_vec_float (cpu, vm, (H << 1) | L);
6349 unsigned vm = INSTR (20, 16);
6363 if (vm > 15)
6367 val = aarch64_get_vec_u16 (cpu, vm, elem);
6379 uint64_t val = aarch64_get_vec_u32 (cpu, vm, elem);
6409 unsigned vm = INSTR (20, 16);
6425 - aarch64_get_vec_s8 (cpu, vm, i));
6432 - aarch64_get_vec_s16 (cpu, vm, i));
6439 - aarch64_get_vec_s32 (cpu, vm, i));
6449 - aarch64_get_vec_s64 (cpu, vm, i));
6468 unsigned vm = INSTR (20, 16);
6485 * aarch64_get_vec_u8 (cpu, vm, i)));
6493 * aarch64_get_vec_u16 (cpu, vm, i)));
6501 * aarch64_get_vec_u32 (cpu, vm, i)));
6523 unsigned vm = INSTR (20, 16);
6541 / aarch64_get_vec_double (cpu, vm, i));
6547 / aarch64_get_vec_float (cpu, vm, i));
6564 unsigned vm = INSTR (20, 16);
6582 * aarch64_get_vec_double (cpu, vm, i));
6588 * aarch64_get_vec_float (cpu, vm, i));
6605 unsigned vm = INSTR (20, 16);
6616 /* Extract values before adding them incase vd == vn/vm. */
6619 double tmp3 = aarch64_get_vec_double (cpu, vm, 0);
6620 double tmp4 = aarch64_get_vec_double (cpu, vm, 1);
6630 /* Extract values before adding them incase vd == vn/vm. */
6633 float tmp5 = aarch64_get_vec_float (cpu, vm, 0);
6634 float tmp6 = aarch64_get_vec_float (cpu, vm, 1);
6640 float tmp7 = aarch64_get_vec_float (cpu, vm, 2);
6641 float tmp8 = aarch64_get_vec_float (cpu, vm, 3);
6928 unsigned vm = INSTR (20, 16);
6950 val.b[j ++] = aarch64_get_vec_u8 (cpu, vm, i);
12035 unsigned vm = INSTR (20, 16);
12037 if (vm == R31)
12117 address + aarch64_get_reg_u64 (cpu, vm, NO_SP));