Searched refs:qpn (Results 1 - 25 of 45) sorted by relevance

12

/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/core/
H A Dagent.h49 int port_num, int qpn);
H A Dagent.c87 struct ib_wc *wc, struct ib_device *device, int port_num, int qpn) {
104 agent = port_priv->agent[qpn];
86 agent_send_response(struct ib_mad *mad, struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device, int port_num, int qpn) argument
H A Dcm_msgs.h127 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) argument
129 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
524 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) argument
526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
627 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) argument
629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
676 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) argument
678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
812 __be32 qpn)
814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) <<
811 cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, __be32 qpn) argument
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/net/mlx4/
H A Dqp.c64 void mlx4_qp_event(struct mlx4_priv *priv, u32 qpn, int event_type) {
70 qp = __mlx4_qp_lookup(priv, qpn);
77 MLX4_DEBUG("Async event for none existent QP %08x\n", qpn);
95 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
97 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn
98 && qp->qpn <= dev->phys_caps.base_sqpn + 1;
148 ret = mlx4_cmd(dev, 0, qp->qpn, 2, MLX4_CMD_2RST_QP,
153 port = (qp->qpn & 1) + 1;
184 qp->qpn);
294 __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) argument
332 mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) argument
369 mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) argument
[all...]
H A Dmcg.c123 enum mlx4_steer_type steer, u32 qpn) {
134 if (pqp->qpn == qpn)
144 enum mlx4_steer_type steer, unsigned int index, u32 qpn) {
167 /*If the given qpn is also a promisc qp,
170 pqp = get_promisc_qp(dev, port, steer, qpn);
177 dqp->qpn = qpn;
203 /*don't add already existing qpn*/
204 if (pqp->qpn
122 get_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) argument
143 new_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument
233 existing_steering_entry(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, unsigned int index, u32 qpn) argument
1211 int qpn; local
[all...]
H A Den_resources.c47 int is_tx, int rss, int qpn, int cqn, int user_prio,
63 context->local_qpn = cpu_to_be32(qpn);
46 mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context) argument
H A Den_netdev.c199 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
478 int *qpn, u64 *reg_id) {
488 qp.qpn = *qpn;
509 // rule.qpn = *qpn;
530 unsigned char *mac, int qpn, u64 reg_id)
540 qp.qpn = qpn;
477 mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, uint64_t mac, int *qpn, u64 *reg_id) argument
563 int *qpn = &priv->base_qpn; local
[all...]
H A Den_rx.c583 // printf("cqe->owner_sr_opcode %d; cons_index %d:%d; size %d; cq->buf %p; index %d; qpn %x\n",
585 // debug_printf("%s: qpn %x\n", __func__, cqe->vlan_my_qpn);
833 static inline int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, argument
846 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
848 MLX4_ERR("Failed to allocate qp #%x\n", qpn);
854 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, qpn,
881 int qpn; local
883 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
885 MLX4_ERR("Failed reserving drop qpn\n");
888 err = mlx4_qp_alloc(priv->mdev->dev, qpn,
[all...]
H A Den_tx.c174 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
181 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
183 MLX4_ERR("Failed allocating qp %d\n", ring->qpn);
203 err_reserve: /*mlx4_qp_release_range(mdev->dev, ring->qpn, 1);*/
221 MLX4_DEBUG( "Destroying tx ring, qpn: %d\n", ring->qpn);
228 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
255 ring->doorbell_qpn = ring->qp.qpn << 8;
257 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
H A Dmlx4_en.h280 int qpn; member in struct:mlx4_en_tx_ring
331 int qpn; member in struct:mlx4_en_rx_ring
815 int is_tx, int rss, int qpn, int cqn, int user_prio,
H A Dmlx4.h307 __be32 qpn; member in struct:mlx4_mpt_entry
458 uint32_t qpn; member in struct:mlx4_promisc_qp
926 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
928 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
1183 void mlx4_qp_event(struct mlx4_priv *priv, uint32_t qpn, int event_type);
/barrelfish-master/lib/devif/backends/net/mlx4/include/rdma/
H A Dib_user_mad.h60 * @qpn - Remote QP number received from/to be sent to
78 __be32 qpn; member in struct:ib_user_mad_hdr_old
103 * @qpn - Remote QP number received from/to be sent to
122 __be32 qpn; member in struct:ib_user_mad_hdr
171 * @qpn - Queue pair number; must be 0 or 1.
187 __u8 qpn; member in struct:ib_user_mad_reg_req
H A Diw_cm.h110 u32 qpn; member in struct:iw_cm_conn_param
119 int qpn);
174 * @qpn: The queue pair number
176 struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
H A Dib_user_cm.h133 __u32 qpn; member in struct:ib_ucm_req
158 __u32 qpn; member in struct:ib_ucm_rep
211 __u32 qpn; member in struct:ib_ucm_sidr_rep
295 __u32 qpn; member in struct:ib_ucm_sidr_rep_event_resp
H A Dib_user_verbs_exp.h120 __u32 qpn; member in struct:ib_uverbs_exp_create_qp_resp
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c197 return qp->qpn >= dev->qp_table.sqp_start &&
198 qp->qpn <= dev->qp_table.sqp_start + 3;
203 return qp->qpn >= dev->qp_table.sqp_start &&
204 qp->qpn <= dev->qp_table.sqp_start + 1;
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, argument
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
251 (int) event_type, qpn);
452 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
621 qp_context->local_qpn = cpu_to_be32(qp->qpn);
762 ((qp->qpn
1354 mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, int port, struct mthca_sqp *sqp) argument
[all...]
H A Dmthca_mad.c165 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; local
167 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
H A Dmthca_eq.c142 __be32 qpn; member in struct:mthca_eqe::__anon693::__anon696
281 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
286 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
291 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
296 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
306 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
311 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
316 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
321 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
H A Dmthca_provider.h263 u32 qpn; member in struct:mthca_qp
/barrelfish-master/lib/devif/backends/net/mlx4/include/linux/mlx4/
H A Ddevice.h661 int qpn; member in struct:mlx4_qp
877 __be32 qpn; member in struct:mlx4_eqe::__anon810::__anon813
1005 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) { argument
1006 return (qpn
1011 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) { argument
1014 if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
1103 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
1223 u32 qpn; member in struct:mlx4_net_trans_rule
1234 __be32 qpn; member in struct:mlx4_net_trans_rule_hw_ctrl
[all...]
H A Dqp.h440 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) { argument
441 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
/barrelfish-master/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mlx4/
H A Dmad.c360 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; local
362 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
367 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
1295 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1299 return (qpn >= proxy_start && qpn <= proxy_start + 1);
H A Dqp.c147 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn
148 && qp->mqp.qpn
158 && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn
159 && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
165 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]
166 || qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
182 && qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn
183 && qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
189 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
332 "on QP %06x\n", type, qp->qpn);
773 alloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *attr, int *qpn) argument
850 int qpn; local
[all...]
H A Dmain.c1015 if (flow_spec->l2_id.ib_uc.qpn) {
1016 spec_l2->ib.l3_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
1092 rule.qpn = mqp->mqp.qpn;
2151 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) { argument
2161 *qpn = dev->steer_qpn_base + offset;
2165 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2167 if (!qpn ||
2171 BUG_ON(qpn < dev->steer_qpn_base);
2174 qpn
[all...]
/barrelfish-master/lib/devif/backends/net/mlx4/include/infiniband/
H A Dkern-abi.h465 __u32 qpn; member in struct:ibv_create_qp_resp
598 __u32 qpn; member in struct:ibv_create_xrc_rcv_qp_resp
1007 __u32 qpn; member in struct:ibv_create_qp_resp_v3
1012 __u32 qpn;

Completed in 212 milliseconds

12