Searched refs:qp (Results 26 - 50 of 380) sorted by relevance

1234567891011>>

/linux-master/net/ipv4/
H A Dip_fragment.c78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
84 struct ipq *qp = container_of(q, struct ipq, q); local
90 qp->ecn = 0;
91 qp->peer = q->fqdir->max_dist ?
98 struct ipq *qp; local
100 qp = container_of(q, struct ipq, q);
101 if (qp->peer)
102 inet_putpeer(qp->peer);
139 struct ipq *qp; local
142 qp
226 ip_frag_too_far(struct ipq *qp) argument
249 ip_frag_reinit(struct ipq *qp) argument
275 ip_frag_queue(struct ipq *qp, struct sk_buff *skb) argument
406 ip_frag_coalesce_ok(const struct ipq *qp) argument
412 ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct sk_buff *prev_tail, struct net_device *dev) argument
488 struct ipq *qp; local
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dqp.c15 #include "qp.h"
22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
352 * rvt_driver_qp_init - Init driver qp resources
416 * rvt_free_qp_cb - callback function to reset a qp
417 * @qp: the qp to reset
420 * This function resets the qp and removes it from the
421 * qp hash table.
423 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) argument
426 struct rvt_dev_info *rdi = ib_to_rvt(qp
579 rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) argument
639 rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) argument
663 rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) argument
688 rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) argument
729 rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
823 rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) argument
889 rvt_stop_rc_timers(qp); variable
897 rvt_del_timers_sync(qp); variable
931 rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) argument
977 free_ud_wq_attr(struct rvt_qp *qp) argument
997 alloc_ud_wq_attr(struct rvt_qp *qp, int node) argument
1032 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); local
1286 rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) argument
1372 rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
1406 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); local
1679 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); local
1721 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); local
1784 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); local
1862 rvt_qp_valid_operation( struct rvt_qp *qp, const struct rvt_operation_params *post_parms, const struct ib_send_wr *wr) argument
1909 rvt_qp_is_avail( struct rvt_qp *qp, struct rvt_dev_info *rdi, bool reserved_op) argument
1960 rvt_post_one_wr(struct rvt_qp *qp, const struct ib_send_wr *wr, bool *call_send) argument
2146 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); local
2267 init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) argument
2343 rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) argument
2450 rvt_comm_est(struct rvt_qp *qp) argument
2464 rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) argument
2507 rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) argument
2526 rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) argument
2544 rvt_stop_rc_timers(struct rvt_qp *qp) argument
2563 rvt_stop_rnr_timer(struct rvt_qp *qp) argument
2577 rvt_del_timers_sync(struct rvt_qp *qp) argument
2589 struct rvt_qp *qp = from_timer(qp, t, s_timer); local
2617 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); local
2649 rvt_qp_iter_init(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) argument
2687 struct rvt_qp *qp; local
2748 rvt_qp_iter(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) argument
2778 rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) argument
2812 rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, void *data, u32 length, bool release, bool copy_last) argument
2906 struct rvt_qp *qp; local
[all...]
H A Dtrace_rc.h18 TP_PROTO(struct rvt_qp *qp, u32 psn),
19 TP_ARGS(qp, psn),
21 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
32 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
33 __entry->qpn = qp->ibqp.qp_num;
34 __entry->s_flags = qp->s_flags;
36 __entry->s_psn = qp->s_psn;
37 __entry->s_next_psn = qp->s_next_psn;
38 __entry->s_sending_psn = qp->s_sending_psn;
39 __entry->s_sending_hpsn = qp
[all...]
/linux-master/drivers/infiniband/hw/hfi1/
H A Dtid_rdma.c8 #include "qp.h"
114 static void hfi1_init_trdma_req(struct rvt_qp *qp,
116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
123 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 static void hfi1_do_tid_send(struct rvt_qp *qp);
130 struct rvt_qp *qp, u3
143 tid_rdma_schedule_ack(struct rvt_qp *qp) argument
151 tid_rdma_trigger_ack(struct rvt_qp *qp) argument
193 tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p) argument
206 tid_rdma_conn_req(struct rvt_qp *qp, u64 *data) argument
214 tid_rdma_conn_reply(struct rvt_qp *qp, u64 data) argument
269 tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data) argument
285 tid_rdma_conn_error(struct rvt_qp *qp) argument
320 qp_to_rcd(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
338 hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_init_attr *init_attr) argument
413 hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
550 rvt_put_qp(qp); variable
578 rvt_get_qp(qp); variable
596 hfi1_schedule_send(qp); variable
611 tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp) argument
647 struct rvt_qp *qp; local
684 rvt_put_qp(qp); variable
780 hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) argument
820 hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) argument
1621 __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
1654 hfi1_init_trdma_req(struct rvt_qp *qp, struct tid_rdma_request *req) argument
1709 struct rvt_qp *qp = req->qp; local
1889 tid_rdma_rcv_read_request(struct rvt_qp *qp, struct rvt_ack_entry *e, struct hfi1_packet *packet, struct ib_other_headers *ohdr, u32 bth0, u32 psn, u64 vaddr, u32 len) argument
1985 tid_rdma_rcv_error(struct hfi1_packet *packet, struct ib_other_headers *ohdr, struct rvt_qp *qp, u32 psn, int diff) argument
2231 struct rvt_qp *qp = packet->qp; local
2344 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_other_headers *ohdr, u32 *bth0, u32 *bth1, u32 *bth2, u32 *len, bool *last) argument
2452 struct rvt_qp *qp = packet->qp; local
2604 struct rvt_qp *qp = packet->qp; local
2629 restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, struct rvt_swqe *wqe) argument
2667 struct rvt_qp *qp = packet->qp; local
2860 struct rvt_qp *qp; local
3046 hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 *bth2) argument
3163 hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp) argument
3210 hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
3268 hfi1_check_sge_align(struct rvt_qp *qp, struct rvt_sge *sge, int num_sge) argument
3282 setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
3366 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u32 *len) argument
3402 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp) argument
3426 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg) argument
3464 hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx) argument
3660 struct rvt_qp *qp = packet->qp; local
3835 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_other_headers *ohdr, u32 *bth1, u32 bth2, u32 *len, struct rvt_sge_state **ss) argument
3938 hfi1_add_tid_reap_timer(struct rvt_qp *qp) argument
3951 hfi1_mod_tid_reap_timer(struct rvt_qp *qp) argument
3961 hfi1_stop_tid_reap_timer(struct rvt_qp *qp) argument
3974 hfi1_del_tid_reap_timer(struct rvt_qp *qp) argument
3985 struct rvt_qp *qp = qpriv->owner; local
4041 struct rvt_qp *qp = packet->qp; local
4212 struct rvt_qp *qp = req->qp; local
4271 struct rvt_qp *qp = packet->qp; local
4426 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_other_headers *ohdr, u16 iflow, u32 *bth1, u32 *bth2) argument
4500 struct rvt_qp *qp = packet->qp; local
4750 hfi1_add_tid_retry_timer(struct rvt_qp *qp) argument
4765 hfi1_mod_tid_retry_timer(struct rvt_qp *qp) argument
4777 hfi1_stop_tid_retry_timer(struct rvt_qp *qp) argument
4790 hfi1_del_tid_retry_timer(struct rvt_qp *qp) argument
4801 struct rvt_qp *qp = priv->owner; local
4843 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u32 *bth1, u32 *bth2, u16 fidx) argument
4872 struct rvt_qp *qp = packet->qp; local
5126 update_tid_tail(qp); variable
5186 make_tid_rdma_ack(struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps) argument
5330 hfi1_send_tid_ok(struct rvt_qp *qp) argument
5344 struct rvt_qp *qp = iowait_to_qp(w->iow); local
5349 hfi1_do_tid_send(struct rvt_qp *qp) argument
5416 _hfi1_schedule_tid_send(struct rvt_qp *qp) argument
5447 hfi1_schedule_tid_send(struct rvt_qp *qp) argument
5466 hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e) argument
5502 tid_rdma_rcv_err(struct hfi1_packet *packet, struct ib_other_headers *ohdr, struct rvt_qp *qp, u32 psn, int diff, bool fecn) argument
[all...]
H A Dopfn.c8 #include "qp.h"
17 bool (*request)(struct rvt_qp *qp, u64 *data);
18 bool (*response)(struct rvt_qp *qp, u64 *data);
19 bool (*reply)(struct rvt_qp *qp, u64 data);
20 void (*error)(struct rvt_qp *qp);
34 static void opfn_schedule_conn_request(struct rvt_qp *qp);
41 static void opfn_conn_request(struct rvt_qp *qp) argument
43 struct hfi1_qp_priv *priv = qp->priv;
51 trace_hfi1_opfn_state_conn_request(qp);
70 if (!extd || !extd->request || !extd->request(qp,
127 opfn_schedule_conn_request(struct rvt_qp *qp) argument
135 opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e, struct ib_atomic_eth *ateth) argument
175 opfn_conn_reply(struct rvt_qp *qp, u64 data) argument
214 opfn_conn_error(struct rvt_qp *qp) argument
242 opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask) argument
293 opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1) argument
[all...]
H A Dud.c13 #include "qp.h"
36 struct rvt_qp *qp; local
47 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
49 if (!qp) {
57 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
58 IB_QPT_UD : qp->ibqp.qp_type;
61 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
69 if (qp->ibqp.qp_num > 1) {
78 qp->s_pkey_index,
82 sqp->ibqp.qp_num, qp
224 hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_other_headers *ohdr, u16 *pkey, u32 extra_bytes, bool bypass) argument
261 hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct rvt_swqe *wqe) argument
332 hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct rvt_swqe *wqe) argument
435 hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) argument
591 return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) argument
653 return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) argument
726 opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, struct rvt_qp *qp, u16 slid, struct opa_smp *smp) argument
819 struct rvt_qp *qp = packet->qp; local
[all...]
H A Dverbs_txreq.c8 #include "qp.h"
16 struct rvt_qp *qp; local
21 qp = tx->qp;
22 dev = to_idev(qp->ibqp.device);
40 qp = iowait_to_qp(wait);
41 priv = qp->priv;
45 hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
52 struct rvt_qp *qp)
53 __must_hold(&qp
71 rvt_get_qp(qp); variable
[all...]
H A Dtrace_tid.h28 #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
195 TP_PROTO(struct rvt_qp *qp),
196 TP_ARGS(qp),
198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
205 struct hfi1_qp_priv *priv = qp->priv;
207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
208 __entry->qpn = qp->ibqp.qp_num;
225 TP_PROTO(struct rvt_qp *qp),
226 TP_ARGS(qp)
231 TP_PROTO(struct rvt_qp *qp),
[all...]
H A Dverbs_txreq.h19 struct rvt_qp *qp; member in struct:verbs_txreq
31 struct rvt_qp *qp);
35 struct rvt_qp *qp)
36 __must_hold(&qp->slock)
39 struct hfi1_qp_priv *priv = qp->priv;
44 tx = __get_txreq(dev, qp);
48 tx->qp = qp;
H A Dopfn.h78 void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
80 void opfn_conn_reply(struct rvt_qp *qp, u64 data);
81 void opfn_conn_error(struct rvt_qp *qp);
82 void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask);
83 void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1);
H A Dtid_rdma.h60 u32 qp; member in struct:tid_rdma_params
98 struct rvt_qp *qp; member in struct:tid_rdma_request
203 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
204 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
205 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
206 void tid_rdma_conn_error(struct rvt_qp *qp);
207 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
218 * @qp: the qp
221 trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
264 hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_loc.h12 int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr);
79 int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
80 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
86 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
87 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
88 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
96 int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
102 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
106 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
107 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
116 qp_num(struct rxe_qp *qp) argument
121 qp_type(struct rxe_qp *qp) argument
126 qp_state(struct rxe_qp *qp) argument
131 qp_mtu(struct rxe_qp *qp) argument
141 rxe_advance_resp_resource(struct rxe_qp *qp) argument
179 wr_opcode_mask(int opcode, struct rxe_qp *qp) argument
[all...]
H A Drxe_recv.c14 struct rxe_qp *qp)
19 if (unlikely(!qp->valid))
24 switch (qp_type(qp)) {
42 spin_lock_irqsave(&qp->state_lock, flags);
44 if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
45 spin_unlock_irqrestore(&qp->state_lock, flags);
49 if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
50 spin_unlock_irqrestore(&qp->state_lock, flags);
54 spin_unlock_irqrestore(&qp->state_lock, flags);
76 u32 qpn, struct rxe_qp *qp)
13 check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) argument
75 check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, u32 qpn, struct rxe_qp *qp) argument
100 check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) argument
139 struct rxe_qp *qp = NULL; local
195 struct rxe_qp *qp; local
[all...]
H A Drxe_task.h28 struct rxe_qp *qp; member in struct:rxe_task
29 int (*func)(struct rxe_qp *qp);
41 * qp => parameter to pass to func
44 int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/linux-master/drivers/infiniband/hw/mlx4/
H A Dqp.c47 #include <linux/mlx4/qp.h>
107 struct mlx4_qp *qp; member in struct:mlx4_ib_qp_event_work
113 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
118 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
119 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
123 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
130 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
131 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
137 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy ||
138 qp
151 is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
174 get_wqe(struct mlx4_ib_qp *qp, int offset) argument
179 get_recv_wqe(struct mlx4_ib_qp *qp, int n) argument
184 get_send_wqe(struct mlx4_ib_qp *qp, int n) argument
194 stamp_send_wqe(struct mlx4_ib_qp *qp, int n) argument
259 mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) argument
284 mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) argument
336 set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, bool is_user, bool has_rq, struct mlx4_ib_qp *qp, u32 inl_recv_sz) argument
381 set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) argument
446 set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) argument
470 alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
509 free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
540 mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
654 create_qp_rss(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx4_ib_create_qp_rss *ucmd, struct mlx4_ib_qp *qp) argument
710 _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) argument
783 mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context, struct mlx4_ib_qp *qp, int range_size, int *wqn) argument
833 mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context, struct mlx4_ib_qp *qp, bool dirty_release) argument
861 create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx4_ib_qp *qp) argument
991 create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) argument
1332 del_gid_entries(struct mlx4_ib_qp *qp) argument
1342 get_pd(struct mlx4_ib_qp *qp) argument
1350 get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) argument
1371 destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
1400 destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, struct ib_udata *udata) argument
1467 qp, 1); local
1516 _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) argument
1627 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
1660 _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) argument
1688 mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) argument
1726 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
1890 mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port, u16 vlan_id, u8 *smac) argument
1902 mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) argument
1914 update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
1926 handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_qp_context *context) argument
1950 create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
2087 fill_qp_rss_context(struct mlx4_qp_context *context, struct mlx4_ib_qp *qp) argument
2119 struct mlx4_ib_qp *qp; local
2699 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
2860 build_sriov_qp0_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
3013 build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
3480 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument
3525 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
3846 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
4014 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
4125 struct mlx4_ib_qp *qp; local
4207 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); local
4263 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); local
4319 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); local
4446 mlx4_ib_drain_sq(struct ib_qp *qp) argument
4481 mlx4_ib_drain_rq(struct ib_qp *qp) argument
[all...]
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c56 struct pvrdma_qp *qp);
58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, argument
61 *send_cq = to_vcq(qp->ibqp.send_cq);
62 *recv_cq = to_vcq(qp->ibqp.recv_cq);
101 static void pvrdma_reset_qp(struct pvrdma_qp *qp) argument
107 get_cqs(qp, &scq, &rcq);
110 _pvrdma_flush_cqe(qp, scq);
112 _pvrdma_flush_cqe(qp, rcq);
120 if (qp->rq.ring) {
121 atomic_set(&qp
130 pvrdma_set_rq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, struct pvrdma_qp *qp) argument
156 pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, struct pvrdma_qp *qp) argument
194 struct pvrdma_qp *qp = to_vqp(ibqp); local
430 _pvrdma_free_qp(struct pvrdma_qp *qp) argument
451 pvrdma_free_qp(struct pvrdma_qp *qp) argument
499 pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) argument
509 __pvrdma_destroy_qp(struct pvrdma_dev *dev, struct pvrdma_qp *qp) argument
529 struct pvrdma_qp *qp = to_vqp(ibqp); local
634 get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) argument
640 get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) argument
674 struct pvrdma_qp *qp = to_vqp(ibqp); local
878 struct pvrdma_qp *qp = to_vqp(ibqp); local
970 struct pvrdma_qp *qp = to_vqp(ibqp); local
[all...]
/linux-master/drivers/interconnect/qcom/
H A Dicc-rpm.c56 struct qcom_icc_provider *qp = to_qcom_provider(provider); local
61 rc = regmap_update_bits(qp->regmap,
62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
68 return regmap_update_bits(qp->regmap,
69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp, argument
93 return regmap_update_bits(qp->regmap,
94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
100 struct qcom_icc_provider *qp; local
109 qp
134 qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp, struct qcom_icc_qos *qos) argument
155 struct qcom_icc_provider *qp; local
192 struct qcom_icc_provider *qp = to_qcom_provider(node->provider); local
294 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node *qn, int ctx) argument
327 struct qcom_icc_provider *qp = to_qcom_provider(provider); local
350 struct qcom_icc_provider *qp; local
454 struct qcom_icc_provider *qp; local
632 struct qcom_icc_provider *qp = platform_get_drvdata(pdev); local
[all...]
/linux-master/include/rdma/
H A Drdmavt_qp.h124 /* Number of bits to pay attention to in the opcode for checking qp type */
165 * in qp->s_max_sge.
486 spinlock_t lock; /* protect changes to the qp table */
511 struct rvt_qp *qp; member in struct:rvt_mcast_qp
532 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, argument
535 return (struct rvt_swqe *)((char *)qp->s_wq +
537 qp->s_max_sge *
555 * @qp - the target QP
557 static inline bool rvt_is_user_qp(struct rvt_qp *qp) argument
559 return !!qp
566 rvt_get_qp(struct rvt_qp *qp) argument
575 rvt_put_qp(struct rvt_qp *qp) argument
606 rvt_qp_wqe_reserve( struct rvt_qp *qp, struct rvt_swqe *wqe) argument
628 rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags) argument
661 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len) argument
672 rvt_div_mtu(struct rvt_qp *qp, u32 len) argument
703 struct rvt_qp *qp = NULL; local
724 rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift) argument
736 rvt_mod_retry_timer(struct rvt_qp *qp) argument
748 rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
763 rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val) argument
783 rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc, bool solicited) argument
803 rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc, bool solicited) argument
829 rvt_qp_complete_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_opcode opcode, enum ib_wc_status status) argument
880 rvt_add_retry_timer(struct rvt_qp *qp) argument
901 struct rvt_qp *qp; member in struct:rvt_qp_iter
967 rvt_to_iport(struct rvt_qp *qp) argument
982 rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe) argument
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib_qp.c224 if (rcu_dereference(ibp->rvp.qp[0]))
226 if (rcu_dereference(ibp->rvp.qp[1]))
233 void qib_notify_qp_reset(struct rvt_qp *qp) argument
235 struct qib_qp_priv *priv = qp->priv;
240 void qib_notify_error_qp(struct rvt_qp *qp) argument
242 struct qib_qp_priv *priv = qp->priv;
243 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
246 if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
247 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
252 if (!(qp
291 qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr) argument
314 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) argument
319 qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
340 qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
348 qib_stop_send_queue(struct rvt_qp *qp) argument
355 qib_quiesce_qp(struct rvt_qp *qp) argument
366 qib_flush_qp_waiters(struct rvt_qp *qp) argument
385 qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) argument
427 struct rvt_qp *qp = iter->qp; local
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dmem.c106 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
108 struct mlx5_bf *bf = &qp->bf;
116 spin_lock_irqsave(&qp->sq.lock, flags);
118 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
119 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
124 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
126 (qp->trans_qp.base.mqp.qpn << 8));
128 qp->sq.wrid[idx] = wr_id;
129 qp
184 test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp) argument
209 struct ib_qp *qp; local
[all...]
/linux-master/drivers/infiniband/hw/irdma/
H A Duk.c56 * @qp: hw qp ptr
58 static int irdma_nop_1(struct irdma_qp_uk *qp) argument
65 if (!qp->sq_ring.head)
68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 wqe = qp->sq_base[wqe_idx].elem;
71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
79 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
91 * @qp: hw qp pt
94 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) argument
115 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) argument
154 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, u16 quanta, u32 total_size, struct irdma_post_sq_info *info) argument
206 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) argument
232 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
327 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool inv_stag, bool post_sq) argument
407 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
654 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
722 irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
794 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
841 irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) argument
975 struct irdma_qp_uk *qp; local
1323 irdma_setup_connection_wqes(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) argument
1420 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) argument
1531 irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) argument
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) argument
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) argument
72 scq = qp->scq;
73 rcq = qp->rcq;
75 if (!qp->sq.flushed) {
77 "FP: Adding to SQ Flush list = %p\n", qp);
114 bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) argument
123 __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) argument
137 bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) argument
174 bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
196 bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
826 bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
950 bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) argument
968 bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
1180 __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) argument
1213 __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp) argument
1245 __filter_modify_flags(struct bnxt_qplib_qp *qp) argument
1269 bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
1375 bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
1463 __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) argument
1509 bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
1540 bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) argument
1557 bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, struct bnxt_qplib_sge *sge) argument
1576 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp) argument
1583 bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index) argument
1588 bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, struct bnxt_qplib_sge *sge) argument
1608 bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, struct bnxt_qplib_swq *swq) argument
1628 bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, struct bnxt_qplib_swq *swq) argument
1665 bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, u16 *idx) argument
1728 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, u16 *wqe_sz, u16 *qdf, u8 mode) argument
1753 bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq, struct bnxt_qplib_swq *swq, bool hw_retx) argument
1777 bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) argument
1784 bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe) argument
2037 bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) argument
2044 bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe) argument
2273 __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2314 __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2364 struct bnxt_qplib_qp *qp = qp_handle; local
2377 do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, u32 cq_cons, u32 swq_last, u32 cqe_sq_cons) argument
2481 struct bnxt_qplib_qp *qp; local
2590 struct bnxt_qplib_qp *qp; local
2670 struct bnxt_qplib_qp *qp; local
2766 struct bnxt_qplib_qp *qp; local
2859 struct bnxt_qplib_qp *qp; local
2976 struct bnxt_qplib_qp *qp = NULL; local
3090 bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) argument
[all...]
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw.h473 #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
483 int (*rx_data)(struct siw_qp *qp);
506 int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr,
508 int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl);
509 void siw_qp_llp_close(struct siw_qp *qp);
510 void siw_qp_cm_drop(struct siw_qp *qp, int schedule);
511 void siw_send_terminate(struct siw_qp *qp);
513 void siw_qp_get_ref(struct ib_qp *qp);
514 void siw_qp_put_ref(struct ib_qp *qp);
551 set_rx_fpdu_context(struct siw_qp *qp, u8 opcode) argument
599 struct siw_qp *qp; local
611 qp_id(struct siw_qp *qp) argument
616 siw_qp_get(struct siw_qp *qp) argument
621 siw_qp_put(struct siw_qp *qp) argument
626 siw_sq_empty(struct siw_qp *qp) argument
633 sq_get_next(struct siw_qp *qp) argument
643 orq_get_current(struct siw_qp *qp) argument
648 orq_get_free(struct siw_qp *qp) argument
658 siw_orq_empty(struct siw_qp *qp) argument
663 irq_alloc_free(struct siw_qp *qp) argument
[all...]
/linux-master/drivers/infiniband/hw/qedr/
H A Dverbs.c1202 "create qp: unsupported qp type=0x%x requested\n",
1209 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1216 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1223 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1230 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1247 "create qp: consumer QP cannot use GSI CQs.\n");
1272 struct qedr_qp *qp)
1284 uresp->rq_icid = qp->icid;
1285 if (qp
1270 qedr_copy_rq_uresp(struct qedr_dev *dev, struct qedr_create_qp_uresp *uresp, struct qedr_qp *qp) argument
1290 qedr_copy_sq_uresp(struct qedr_dev *dev, struct qedr_create_qp_uresp *uresp, struct qedr_qp *qp) argument
1307 qedr_copy_qp_uresp(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_udata *udata, struct qedr_create_qp_uresp *uresp) argument
1342 qedr_set_common_qp_params(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_pd *pd, struct ib_qp_init_attr *attrs) argument
1393 qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp) argument
1728 qedr_init_common_qp_in_params(struct qedr_dev *dev, struct qedr_pd *pd, struct qedr_qp *qp, struct ib_qp_init_attr *attrs, bool fmr_and_reserved_lkey, struct qed_rdma_create_qp_in_params *params) argument
1765 qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) argument
1782 qedr_iwarp_populate_user_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct qed_rdma_create_qp_out_params *out_params) argument
1800 qedr_cleanup_user(struct qedr_dev *dev, struct qedr_ucontext *ctx, struct qedr_qp *qp) argument
1839 qedr_create_user_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_pd *ibpd, struct ib_udata *udata, struct ib_qp_init_attr *attrs) argument
1993 qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp) argument
2031 qedr_roce_create_kernel_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct qed_rdma_create_qp_in_params *in_params, u32 n_sq_elems, u32 n_rq_elems) argument
2078 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct qed_rdma_create_qp_in_params *in_params, u32 n_sq_elems, u32 n_rq_elems) argument
2138 qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp) argument
2161 qedr_create_kernel_qp(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_pd *ibpd, struct ib_qp_init_attr *attrs) argument
2237 qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, struct ib_udata *udata) argument
2266 struct qedr_qp *qp = get_qedr_qp(ibqp); local
2362 qedr_update_qp_state(struct qedr_dev *dev, struct qedr_qp *qp, enum qed_roce_qp_state cur_state, enum qed_roce_qp_state new_state) argument
2468 struct qedr_qp *qp = get_qedr_qp(ibqp); local
2746 struct qedr_qp *qp = get_qedr_qp(ibqp); local
2813 struct qedr_qp *qp = get_qedr_qp(ibqp); local
3289 qedr_prepare_sq_inline_data(struct qedr_dev *dev, struct qedr_qp *qp, u8 *wqe_size, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr, u8 *bits, u8 bit) argument
3375 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size, const struct ib_send_wr *wr) argument
3396 qedr_prepare_sq_rdma_data(struct qedr_dev *dev, struct qedr_qp *qp, struct rdma_sq_rdma_wqe_1st *rwqe, struct rdma_sq_rdma_wqe_2nd *rwqe2, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
3419 qedr_prepare_sq_send_data(struct qedr_dev *dev, struct qedr_qp *qp, struct rdma_sq_send_wqe_1st *swqe, struct rdma_sq_send_wqe_2st *swqe2, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
3438 qedr_prepare_reg(struct qedr_qp *qp, struct rdma_sq_fmr_wqe_1st *fwqe1, const struct ib_reg_wr *wr) argument
3502 qedr_can_post_send(struct qedr_qp *qp, const struct ib_send_wr *wr) argument
3544 struct qedr_qp *qp = get_qedr_qp(ibqp); local
3759 struct qedr_qp *qp = get_qedr_qp(ibqp); local
3915 struct qedr_qp *qp = get_qedr_qp(ibqp); local
4018 struct qedr_qp *qp; local
4046 qedr_chk_if_fmr(struct qedr_qp *qp) argument
4052 process_req(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status, int force) argument
4107 qedr_poll_cq_req(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_requester *req) argument
4264 __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp, u64 wr_id) argument
4293 process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp) argument
4318 process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp) argument
4333 process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, u16 hw_cons) argument
4358 try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_responder *resp, int *update) argument
4367 qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_responder *resp) argument
4380 qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_responder *resp, int *update) argument
4400 try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_requester *req, int *update) argument
4433 struct qedr_qp *qp; local
[all...]
H A Dqedr_roce_cm.c58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, argument
64 dev->gsi_qp = qp;
76 struct qedr_qp *qp = dev->gsi_qp; local
81 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
88 spin_lock_irqsave(&qp->q_lock, flags);
89 qedr_inc_sw_gsi_cons(&qp->sq);
90 spin_unlock_irqrestore(&qp->q_lock, flags);
101 struct qedr_qp *qp = dev->gsi_qp; local
104 spin_lock_irqsave(&qp->q_lock, flags);
106 qp
262 qedr_ll2_start(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) argument
322 qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) argument
379 qedr_gsi_build_header(struct qedr_dev *dev, struct qedr_qp *qp, const struct ib_send_wr *swr, struct ib_ud_header *udh, int *roce_mode) argument
490 qedr_gsi_build_packet(struct qedr_dev *dev, struct qedr_qp *qp, const struct ib_send_wr *swr, struct qed_roce_ll2_packet **p_packet) argument
545 struct qedr_qp *qp = get_qedr_qp(ibqp); local
615 struct qedr_qp *qp = get_qedr_qp(ibqp); local
674 struct qedr_qp *qp = dev->gsi_qp; local
[all...]

Completed in 303 milliseconds

1234567891011>>