Lines Matching defs:size

238 	/* Sanity check RQ size before proceeding */
279 int size = 0;
283 size += sizeof(struct mlx5_wqe_xrc_seg);
286 size += sizeof(struct mlx5_wqe_ctrl_seg) +
297 size += sizeof(struct mlx5_wqe_ctrl_seg) +
305 size += sizeof(struct mlx5_wqe_eth_pad) +
310 size += sizeof(struct mlx5_wqe_ctrl_seg) +
315 size += sizeof(struct mlx5_wqe_ctrl_seg) +
324 return size;
330 int size;
332 size = sq_overhead(attr);
333 if (size < 0)
334 return size;
337 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
341 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
343 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
346 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
659 unsigned long addr, size_t size,
666 *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
680 mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
681 addr, size, *npages, *page_shift, *ncont, *offset);
739 mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
2952 struct mlx5_ib_qp *qp, int *size)
2963 *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
2987 *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
2996 *size += ALIGN(left, 16) / 16;
3072 /* KLMs take twice the size of MTTs */
3217 /* KLMs take twice the size of MTTs */
3282 static u8 calc_sig(void *wqe, int size)
3288 for (i = 0; i < size; i++)
3446 struct mlx5_ib_qp *qp, void **seg, int *size)
3505 pr_err("Bad block size given: %u\n", block_size);
3529 *size += wqe_size / 16;
3539 *size += sizeof(*bsf) / 16;
3579 void **seg, int *size)
3602 * KLM octoword size - if protection was provided
3610 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
3616 *size += sizeof(struct mlx5_mkey_seg) / 16;
3620 ret = set_sig_data_segment(wr, qp, seg, size);
3629 u32 psv_idx, void **seg, int *size)
3649 *size += sizeof(*psv_seg) / 16;
3656 void **seg, int *size)
3669 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
3675 *size += sizeof(struct mlx5_mkey_seg) / 16;
3681 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
3686 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
3690 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
3695 *size += sizeof(struct mlx5_mkey_seg) / 16;
3759 int *size, int nreq)
3776 *size = sizeof(**ctrl) / 16;
3783 u8 size, unsigned idx, u64 wr_id,
3791 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
3800 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
3816 int uninitialized_var(size);
3862 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
3874 size += sizeof(*xrc) / 16;
3884 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
3899 set_linv_wr(qp, &seg, &size);
3907 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
3920 err = set_sig_umr_wr(wr, qp, &seg, &size);
3927 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
3937 &idx, &size, nreq);
3947 &size);
3954 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
3958 &idx, &size, nreq);
3969 &size);
3976 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
3994 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
4006 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
4013 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
4025 size += sizeof(struct mlx5_wqe_eth_pad) / 16;
4027 seg = set_eth_seg(seg, wr, qend, qp, &size);
4043 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4048 size += sizeof(struct mlx5_mkey_seg) / 16;
4067 size += sz;
4077 size += sizeof(struct mlx5_wqe_data_seg) / 16;
4083 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
4088 dump_wqe(qp, idx, size);
4112 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
4113 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
4135 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
4137 sig->signature = calc_sig(sig, size);
4670 /* Sanity check RQ size before proceeding */