/linux-master/drivers/gpu/drm/radeon/ |
H A D | radeon_ib.c | 51 * @ib: IB object returned 59 struct radeon_ib *ib, struct radeon_vm *vm, 64 r = radeon_sa_bo_new(&rdev->ring_tmp_bo, &ib->sa_bo, size, 256); 70 radeon_sync_create(&ib->sync); 72 ib->ring = ring; 73 ib->fence = NULL; 74 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); 75 ib->vm = vm; 77 /* ib poo 58 radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size) argument 97 radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) argument 125 radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib, bool hdp_flush) argument [all...] |
H A D | si_dma.c | 61 * @ib: indirect buffer to fill with commands 69 struct radeon_ib *ib, 78 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 80 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 81 ib->ptr[ib->length_dw++] = lower_32_bits(src); 82 ib->ptr[ib 68 si_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 104 si_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument 151 si_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument [all...] |
H A D | ni_dma.c | 117 * @ib: IB object to schedule 122 struct radeon_ib *ib) 124 struct radeon_ring *ring = &rdev->ring[ib->ring]; 125 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; 144 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 145 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 307 * @ib 121 cayman_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) argument 314 cayman_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 352 cayman_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument 400 cayman_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument 442 cayman_dma_vm_pad_ib(struct radeon_ib *ib) argument [all...] |
H A D | radeon_vce.c | 349 struct radeon_ib ib; local 353 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); 355 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 359 dummy = ib.gpu_addr + 1024; 362 ib.length_dw = 0; 363 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ 364 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ 365 ib 416 struct radeon_ib ib; local 718 radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) argument [all...] |
H A D | cik_sdma.c | 128 * @ib: IB object to schedule 133 struct radeon_ib *ib) 135 struct radeon_ring *ring = &rdev->ring[ib->ring]; 136 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf; 154 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 155 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); 156 radeon_ring_write(ring, ib->length_dw); 703 struct radeon_ib ib; local 132 cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) argument 802 cik_sdma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 840 cik_sdma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument 889 cik_sdma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument 932 cik_sdma_vm_pad_ib(struct radeon_ib *ib) argument [all...] |
H A D | r600_dma.c | 338 struct radeon_ib ib; local 352 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 354 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 358 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 359 ib.ptr[1] = lower_32_bits(gpu_addr); 360 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; 361 ib.ptr[3] = 0xDEADBEEF; 362 ib.length_dw = 4; 364 r = radeon_ib_schedule(rdev, &ib, NULL, false); 366 radeon_ib_free(rdev, &ib); 404 r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) argument [all...] |
H A D | radeon_vm.c | 350 * @ib: indirect buffer to fill with commands 361 struct radeon_ib *ib, 370 radeon_asic_vm_copy_pages(rdev, ib, pe, src, count); 373 radeon_asic_vm_write_pages(rdev, ib, pe, addr, 377 radeon_asic_vm_set_pages(rdev, ib, pe, addr, 392 struct radeon_ib ib; local 408 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); 412 ib.length_dw = 0; 414 radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); 415 radeon_asic_vm_pad_ib(rdev, &ib); 360 radeon_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) argument 648 struct radeon_ib ib; local 731 radeon_vm_frag_ptes(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) argument 814 radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) argument 916 struct radeon_ib ib; local [all...] |
/linux-master/arch/s390/include/asm/ |
H A D | idals.h | 135 struct idal_buffer *ib; local 140 ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL); 141 if (!ib) 143 ib->size = size; 144 ib->page_order = page_order; 147 ib->data[i] = dma64_add(ib->data[i - 1], IDA_BLOCK_SIZE); 153 ib->data[i] = virt_to_dma64(vaddr); 155 return ib; 169 idal_buffer_free(struct idal_buffer *ib) argument 186 __idal_buffer_is_needed(struct idal_buffer *ib) argument 196 idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) argument 219 idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) argument 241 idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) argument [all...] |
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vce.c | 443 struct amdgpu_ib *ib; local 464 ib = &job->ibs[0]; 469 ib->length_dw = 0; 470 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 471 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 472 ib->ptr[ib->length_dw++] = handle; 475 ib 535 struct amdgpu_ib *ib; local 599 amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib, int lo, int hi, unsigned int size, int32_t index) argument 650 amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib, int lo, int hi, unsigned int size, uint32_t index) argument 736 amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) argument 974 amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) argument 1068 amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument [all...] |
H A D | amdgpu_vcn.c | 552 struct amdgpu_ib *ib; local 561 ib = &job->ibs[0]; 562 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); 563 ib->ptr[1] = addr; 564 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); 565 ib->ptr[3] = addr >> 32; 566 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); 567 ib->ptr[5] = 0; 569 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); 570 ib 593 amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_ib *ib) argument 628 amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_ib *ib) argument 658 struct amdgpu_ib ib; local 687 amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, uint32_t ib_pack_in_dw, bool enc) argument 726 struct amdgpu_ib *ib; local 790 struct amdgpu_ib ib; local 856 struct amdgpu_ib *ib; local 923 struct amdgpu_ib *ib; local 988 struct amdgpu_ib ib; local [all...] |
H A D | sdma_v2_4.c | 239 * @ib: IB object to schedule 246 struct amdgpu_ib *ib, 257 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 258 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 259 amdgpu_ring_write(ring, ib->length_dw); 584 struct amdgpu_ib ib; local 598 memset(&ib, 0, sizeof(ib)); 600 AMDGPU_IB_POOL_DIRECT, &ib); 604 ib 244 sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 650 sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 677 sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 707 sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 731 sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1185 sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1211 sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | cik_sdma.c | 215 * @ib: IB object to schedule 222 struct amdgpu_ib *ib, 232 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 233 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 234 amdgpu_ring_write(ring, ib->length_dw); 653 struct amdgpu_ib ib; local 667 memset(&ib, 0, sizeof(ib)); 669 AMDGPU_IB_POOL_DIRECT, &ib); 673 ib 220 cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 715 cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 742 cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 772 cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 796 cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1299 cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1324 cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | si_dma.c | 64 struct amdgpu_ib *ib, 74 amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 75 amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 246 struct amdgpu_ib ib; local 260 memset(&ib, 0, sizeof(ib)); 262 AMDGPU_IB_POOL_DIRECT, &ib); 266 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); 267 ib 62 si_dma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 306 si_dma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 331 si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 359 si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 400 si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 770 si_dma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 794 si_dma_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | sdma_v3_0.c | 415 * @ib: IB object to schedule 422 struct amdgpu_ib *ib, 433 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 434 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 435 amdgpu_ring_write(ring, ib->length_dw); 858 struct amdgpu_ib ib; local 872 memset(&ib, 0, sizeof(ib)); 874 AMDGPU_IB_POOL_DIRECT, &ib); 878 ib 420 sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 923 sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 950 sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 980 sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 1004 sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1625 sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1651 sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | sdma_v5_0.c | 405 * @ib: IB object to schedule 412 struct amdgpu_ib *ib, 431 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 432 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 433 amdgpu_ring_write(ring, ib->length_dw); 1048 struct amdgpu_ib ib; local 1057 memset(&ib, 0, sizeof(ib)); 1062 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 1063 ib 410 sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 1143 sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 1171 sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 1201 sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 1226 sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1814 sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1841 sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | sdma_v6_0.c | 197 * @ib: IB object to schedule 205 struct amdgpu_ib *ib, 224 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 225 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 226 amdgpu_ring_write(ring, ib->length_dw); 903 struct amdgpu_ib ib; local 912 memset(&ib, 0, sizeof(ib)); 917 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 918 ib 203 sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 997 sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 1025 sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 1055 sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 1080 sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1576 sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1603 sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | sdma_v5_2.c | 212 * @ib: IB object to schedule 219 struct amdgpu_ib *ib, 238 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 239 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 240 amdgpu_ring_write(ring, ib->length_dw); 888 struct amdgpu_ib ib; local 897 memset(&ib, 0, sizeof(ib)); 902 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 903 ib 217 sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 982 sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 1010 sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 1040 sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 1066 sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1760 sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1787 sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | sdma_v4_4_2.c | 307 * @ib: IB object to schedule 314 struct amdgpu_ib *ib, 325 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 326 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 327 amdgpu_ring_write(ring, ib->length_dw); 1010 struct amdgpu_ib ib; local 1024 memset(&ib, 0, sizeof(ib)); 1026 AMDGPU_IB_POOL_DIRECT, &ib); 1030 ib 312 sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 1077 sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) argument 1105 sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t value, unsigned count, uint32_t incr) argument 1135 sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) argument 1159 sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument 1954 sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, bool tmz) argument 1981 sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) argument [all...] |
H A D | amdgpu_sdma.h | 128 void (*emit_copy_buffer)(struct amdgpu_ib *ib, 144 void (*emit_fill_buffer)(struct amdgpu_ib *ib, 153 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) 154 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
|
H A D | uvd_v6_0.c | 214 struct amdgpu_ib *ib; local 224 ib = &job->ibs[0]; 227 ib->length_dw = 0; 228 ib->ptr[ib->length_dw++] = 0x00000018; 229 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 230 ib->ptr[ib->length_dw++] = handle; 231 ib 278 struct amdgpu_ib *ib; local 1023 uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument 1051 uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument [all...] |
H A D | amdgpu_ib.c | 58 * @ib: IB object returned 66 struct amdgpu_ib *ib) 72 &ib->sa_bo, size); 78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 80 ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; 83 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 93 * @ib: IB object to free 94 * @f: the fence SA bo need wait on for the ib alloatio 64 amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int size, enum amdgpu_ib_pool_type pool_type, struct amdgpu_ib *ib) argument 98 amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct dma_fence *f) argument 131 struct amdgpu_ib *ib = &ibs[0]; local [all...] |
/linux-master/drivers/net/ethernet/amd/ |
H A D | 7990.c | 100 t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \ 101 ib->brx_ring[t].length, \ 102 ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \ 106 t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \ 107 ib->btx_ring[t].length, \ 108 ib->btx_ring[t].misc, ib 140 volatile struct lance_init_block *ib = lp->init_block; local 275 volatile struct lance_init_block *ib = lp->init_block; local 355 volatile struct lance_init_block *ib = lp->init_block; local 542 volatile struct lance_init_block *ib = lp->init_block; local 601 volatile struct lance_init_block *ib = lp->init_block; local 628 volatile struct lance_init_block *ib = lp->init_block; local [all...] |
H A D | sunlance.c | 319 struct lance_init_block *ib = lp->init_block_mem; local 332 ib->phys_addr [0] = dev->dev_addr [1]; 333 ib->phys_addr [1] = dev->dev_addr [0]; 334 ib->phys_addr [2] = dev->dev_addr [3]; 335 ib->phys_addr [3] = dev->dev_addr [2]; 336 ib->phys_addr [4] = dev->dev_addr [5]; 337 ib->phys_addr [5] = dev->dev_addr [4]; 342 ib->btx_ring [i].tmd0 = leptr; 343 ib->btx_ring [i].tmd1_hadr = leptr >> 16; 344 ib 376 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 506 struct lance_init_block *ib = lp->init_block_mem; local 565 struct lance_init_block *ib = lp->init_block_mem; local 675 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 733 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 881 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 894 struct lance_init_block *ib = lp->init_block_mem; local 936 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 941 struct lance_init_block *ib = lp->init_block_mem; local 1120 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 1128 struct lance_init_block *ib = lp->init_block_mem; local 1173 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 1177 struct lance_init_block *ib = lp->init_block_mem; local 1190 struct lance_init_block __iomem *ib = lp->init_block_iomem; local 1196 struct lance_init_block *ib = lp->init_block_mem; local [all...] |
/linux-master/drivers/infiniband/hw/mlx4/ |
H A D | ah.c | 48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn | 50 ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr); 51 ah->av.ib.sl_tclass_flowlabel = 56 ah->av.ib.g_slid |= 0x80; 57 ah->av.ib.gid_index = grh->sgid_index; 58 ah->av.ib.hop_limit = grh->hop_limit; 59 ah->av.ib.sl_tclass_flowlabel |= 62 memcpy(ah->av.ib.dgid, grh->dgid.raw, 16); 65 ah->av.ib.dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); 73 ah->av.ib [all...] |
/linux-master/include/rdma/ |
H A D | ib_sa.h | 184 struct sa_path_rec_ib ib; member in union:sa_path_rec::__anon1257 218 static inline void path_conv_opa_to_ib(struct sa_path_rec *ib, argument 226 ib->dgid.global.interface_id 228 ib->dgid.global.subnet_prefix 230 ib->sgid.global.interface_id 232 ib->dgid.global.subnet_prefix 234 ib->ib.dlid = 0; 236 ib->ib 245 path_conv_ib_to_opa(struct sa_path_rec *opa, struct sa_path_rec *ib) argument [all...] |