Lines Matching defs:slice

161 	struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
163 slice->bo->total_slice_nents -= slice->nents;
164 list_del(&slice->slice);
165 drm_gem_object_put(&slice->bo->base);
166 sg_free_table(slice->sgt);
167 kfree(slice->sgt);
168 kfree(slice->reqs);
169 kfree(slice);
252 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
264 if (!slice->no_xfer)
265 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER);
295 * When we end up splitting up a single request (ie a buf slice) into
304 for_each_sgtable_sg(slice->sgt, sg, i) {
305 slice->reqs[i].cmd = cmd;
306 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
308 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
316 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg));
319 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val,
326 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val,
333 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val,
340 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val,
351 slice->reqs[i].cmd |= GEN_COMPLETION;
352 slice->reqs[i].db_addr = db_addr;
353 slice->reqs[i].db_len = db_len;
354 slice->reqs[i].db_data = db_data;
369 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ?
371 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index,
374 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index,
377 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index,
380 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index,
391 struct bo_slice *slice;
398 slice = kmalloc(sizeof(*slice), GFP_KERNEL);
399 if (!slice) {
404 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
405 if (!slice->reqs) {
410 slice->no_xfer = !slice_ent->size;
411 slice->sgt = sgt;
412 slice->nents = sgt->nents;
413 slice->dir = bo->dir;
414 slice->bo = bo;
415 slice->size = slice_ent->size;
416 slice->offset = slice_ent->offset;
418 ret = encode_reqs(qdev, slice, slice_ent);
423 kref_init(&slice->ref_count);
425 list_add_tail(&slice->slice, &bo->slices);
430 kfree(slice->reqs);
432 kfree(slice);
904 struct bo_slice *slice, *temp;
906 list_for_each_entry_safe(slice, temp, &bo->slices, slice)
907 kref_put(&slice->ref_count, free_slice);
1070 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
1074 struct dbc_req *reqs = slice->reqs;
1079 if (avail < slice->nents)
1082 if (tail + slice->nents > dbc->nelem) {
1084 avail = min_t(u32, avail, slice->nents);
1087 avail = slice->nents - avail;
1091 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents);
1094 *ptail = (tail + slice->nents) % dbc->nelem;
1099 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
1103 struct dbc_req *reqs = slice->reqs;
1114 * of the last DMA request of this slice that needs to be
1119 for (first_n = 0; first_n < slice->nents; first_n++)
1147 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
1173 struct bo_slice *slice;
1215 list_for_each_entry(slice, &bo->slices, slice) {
1216 for (j = 0; j < slice->nents; j++)
1217 slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
1219 if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset))
1220 /* Configure the slice for no DMA transfer */
1221 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail);
1222 else if (is_partial && pexec[i].resize < slice->offset + slice->size)
1223 /* Configure the slice to be partially DMA transferred */
1224 ret = copy_partial_exec_reqs(qdev, slice,
1225 pexec[i].resize - slice->offset, dbc,
1228 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);