Lines Matching refs:device

302 		event.device     = ibqp->device;
477 * resulting number of wqes does not exceed device
840 if (alloc_proxy_bufs(pd->device, qp)) {
889 /* Maintain device to QPs access, needed for further handling
914 free_proxy_bufs(pd->device, qp);
1179 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1194 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1215 int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0);
1220 sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
1223 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1245 struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
1247 struct mlx4_ib_dev *dev = to_mdev(device);
1278 struct mlx4_ib_dev *dev = to_mdev(qp->device);
1623 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1766 u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
1779 status = ib_get_cached_gid(ibqp->device, port_num,
2162 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2301 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2430 struct ib_device *ib_dev = sqp->qp.ibqp.device;
2453 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
2950 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2960 if (!ib_get_cached_gid(ibqp->device,
3142 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
3247 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3277 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3308 ib_dma_sync_single_for_device(ibqp->device,
3431 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);