Lines Matching refs:attr_mask

3352 				const struct ib_qp_attr *attr, int attr_mask,
3359 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3364 if (attr_mask & IB_QP_ACCESS_FLAGS)
3518 int attr_mask, u32 path_flags,
3527 if (attr_mask & IB_QP_PKEY_INDEX)
3554 (attr_mask & IB_QP_DEST_QPN))
3587 if (attr_mask & IB_QP_TIMEOUT)
4014 int attr_mask, u8 init,
4032 else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
4103 const struct ib_qp_attr *attr, int attr_mask,
4173 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
4189 tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
4206 } else if (attr_mask & IB_QP_PATH_MTU) {
4218 if (attr_mask & IB_QP_DEST_QPN)
4224 if (attr_mask & IB_QP_PKEY_INDEX)
4232 if (attr_mask & IB_QP_PORT)
4235 if (attr_mask & IB_QP_AV) {
4237 attr_mask & IB_QP_PORT ? attr->port_num :
4239 attr_mask, 0, attr, false);
4244 if (attr_mask & IB_QP_TIMEOUT)
4247 if (attr_mask & IB_QP_ALT_PATH) {
4250 attr_mask | IB_QP_PKEY_INDEX |
4268 if (attr_mask & IB_QP_RNR_RETRY)
4271 if (attr_mask & IB_QP_RETRY_CNT)
4274 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
4277 if (attr_mask & IB_QP_SQ_PSN)
4280 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
4284 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
4285 err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
4290 if (attr_mask & IB_QP_MIN_RNR_TIMER)
4293 if (attr_mask & IB_QP_RQ_PSN)
4296 if (attr_mask & IB_QP_QKEY)
4303 u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
4333 optpar |= ib_mask_to_mlx5_opt(attr_mask);
4346 if (attr_mask & IB_QP_PORT)
4349 if (attr_mask & IB_QP_RATE_LIMIT) {
4395 if (attr_mask & IB_QP_ACCESS_FLAGS)
4397 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4399 if (attr_mask & IB_QP_PORT)
4401 if (attr_mask & IB_QP_ALT_PATH)
4453 enum ib_qp_attr_mask attr_mask)
4459 return is_valid_mask(attr_mask, req, opt);
4462 return is_valid_mask(attr_mask, req, opt);
4465 return is_valid_mask(attr_mask, req, opt);
4469 return is_valid_mask(attr_mask, req, opt);
4474 return is_valid_mask(attr_mask, req, opt);
4477 return is_valid_mask(attr_mask, req, opt);
4479 return is_valid_mask(attr_mask, req, opt);
4492 int attr_mask, struct mlx5_ib_modify_qp *ucmd,
4502 if (!(attr_mask & IB_QP_STATE))
4522 if (!is_valid_mask(attr_mask, required, 0))
4570 if (!is_valid_mask(attr_mask, required, 0))
4621 int attr_mask, enum ib_qp_type qp_type)
4638 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
4645 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
4655 int attr_mask, struct ib_udata *udata)
4668 if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
4695 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
4700 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4704 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
4705 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
4708 if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
4709 mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
4710 attr_mask);
4716 attr_mask)) {
4717 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4718 cur_state, new_state, qp->type, attr_mask);
4721 !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
4722 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4723 cur_state, new_state, qp_type, attr_mask);
4727 if ((attr_mask & IB_QP_PORT) &&
4735 if ((attr_mask & IB_QP_PKEY_INDEX) &&
4741 if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
4749 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,