Lines Matching refs:ib

457 			volatile u32 *ib = p->ib.ptr;
479 ib[track->cb_color_slice_idx[id]] = slice;
1020 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
1025 * if packet is bigger than remaining ib size. or if packets is unknown.
1156 volatile uint32_t *ib;
1158 ib = p->ib.ptr;
1216 ib[h_idx + 2] = PACKET2(0);
1217 ib[h_idx + 3] = PACKET2(0);
1218 ib[h_idx + 4] = PACKET2(0);
1219 ib[h_idx + 5] = PACKET2(0);
1220 ib[h_idx + 6] = PACKET2(0);
1221 ib[h_idx + 7] = PACKET2(0);
1222 ib[h_idx + 8] = PACKET2(0);
1228 ib[h_idx] = header;
1229 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
1249 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1295 u32 m, i, tmp, *ib;
1316 ib = p->ib.ptr;
1348 ib[idx] = 0;*/
1364 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1393 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1395 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1403 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1404 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1436 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1448 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1460 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1472 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1496 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1516 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1580 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1598 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1666 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1667 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1674 track->cb_color_attrib[tmp] = ib[idx];
1694 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1695 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1702 track->cb_color_attrib[tmp] = ib[idx];
1719 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1736 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1777 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1793 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1805 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1813 ib[idx] |= 3;
1922 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1936 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1950 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1993 volatile u32 *ib;
2001 ib = p->ib.ptr;
2039 ib[idx + 0] = offset;
2040 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2085 ib[idx+0] = offset;
2086 ib[idx+1] = upper_32_bits(offset) & 0xff;
2112 ib[idx+0] = offset;
2113 ib[idx+1] = upper_32_bits(offset) & 0xff;
2140 ib[idx+1] = offset;
2141 ib[idx+2] = upper_32_bits(offset) & 0xff;
2226 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2252 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2253 ib[idx+2] = upper_32_bits(offset) & 0xff;
2310 ib[idx] = offset;
2311 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2348 ib[idx+2] = offset;
2349 ib[idx+3] = upper_32_bits(offset) & 0xff;
2370 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2390 ib[idx+1] = offset & 0xfffffff8;
2391 ib[idx+2] = upper_32_bits(offset) & 0xff;
2412 ib[idx+1] = offset & 0xfffffffc;
2413 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2434 ib[idx+1] = offset & 0xfffffffc;
2435 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2497 ib[idx+1+(i*8)+1] |=
2505 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2506 ib[idx+1+(i*8)+7] |=
2517 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2518 mip_address = ib[idx+1+(i*8)+3];
2540 ib[idx+1+(i*8)+2] += toffset;
2541 ib[idx+1+(i*8)+3] += moffset;
2557 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2561 ib[idx+1+(i*8)+0] = offset64;
2562 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2642 ib[idx+1] = offset;
2643 ib[idx+2] = upper_32_bits(offset) & 0xff;
2661 ib[idx+3] = offset;
2662 ib[idx+4] = upper_32_bits(offset) & 0xff;
2690 ib[idx+0] = offset;
2691 ib[idx+1] = upper_32_bits(offset) & 0xff;
2715 ib[idx+1] = offset;
2716 ib[idx+2] = upper_32_bits(offset) & 0xff;
2739 ib[idx+3] = offset;
2740 ib[idx+4] = upper_32_bits(offset) & 0xff;
2859 for (r = 0; r < p->ib.length_dw; r++) {
2860 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
2893 volatile u32 *ib = p->ib.ptr;
2923 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2929 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2930 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2986 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2987 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2988 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2989 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3001 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3003 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3004 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3007 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3008 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3010 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3046 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3047 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3048 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3049 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3059 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3063 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3064 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3069 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3070 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3074 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3094 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3095 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3130 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3131 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3132 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3133 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3148 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3152 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3153 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3158 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3159 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3163 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3201 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3202 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3203 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3204 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3213 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3214 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3215 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3216 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3248 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3249 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
3250 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3251 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3252 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
3253 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3276 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3277 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3278 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3279 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3297 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3298 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3310 for (r = 0; r < p->ib->length_dw; r++) {
3311 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
3442 u32 *ib, struct radeon_cs_packet *pkt)
3445 u32 idx_value = ib[idx];
3495 reg = ib[idx + 5] * 4;
3502 reg = ib[idx + 3] * 4;
3523 command = ib[idx + 4];
3524 info = ib[idx + 1];
3561 start_reg = ib[idx + 2];
3586 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3594 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
3595 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
3606 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3607 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
3617 } while (idx < ib->length_dw);
3625 * @ib: radeon_ib pointer
3631 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3637 header = ib->ptr[idx];
3728 } while (idx < ib->length_dw);