Searched refs:qp (Results 1 - 25 of 379) sorted by relevance

1234567891011>>

/linux-master/drivers/infiniband/hw/hfi1/
H A Dqp.h23 * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
49 static inline int hfi1_send_ok(struct rvt_qp *qp) argument
51 struct hfi1_qp_priv *priv = qp->priv;
53 return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
55 (qp->s_flags & RVT_S_RESP_PENDING) ||
56 !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
62 static inline void clear_ahg(struct rvt_qp *qp) argument
64 struct hfi1_qp_priv *priv = qp->priv;
67 qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
68 if (priv->s_sde && qp
[all...]
H A Dqp.c16 #include "qp.h"
24 static void flush_tx_list(struct rvt_qp *qp);
33 static void qp_pio_drain(struct rvt_qp *qp);
122 static void flush_tx_list(struct rvt_qp *qp) argument
124 struct hfi1_qp_priv *priv = qp->priv;
130 static void flush_iowait(struct rvt_qp *qp) argument
132 struct hfi1_qp_priv *priv = qp->priv;
142 rvt_put_qp(qp);
160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, argument
163 struct ib_qp *ibqp = &qp
202 qp_set_16b(struct rvt_qp *qp) argument
222 hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
263 hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) argument
323 _hfi1_schedule_send(struct rvt_qp *qp) argument
340 qp_pio_drain(struct rvt_qp *qp) argument
366 hfi1_schedule_send(struct rvt_qp *qp) argument
379 hfi1_qp_schedule(struct rvt_qp *qp) argument
396 hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) argument
411 hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) argument
444 struct rvt_qp *qp; local
495 struct rvt_qp *qp = iowait_to_qp(wait); local
503 struct rvt_qp *qp = iowait_to_qp(wait); local
522 struct rvt_qp *qp = iowait_to_qp(w); local
539 qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) argument
564 qp_to_send_context(struct rvt_qp *qp, u8 sc5) argument
584 qp_idle(struct rvt_qp *qp) argument
601 struct rvt_qp *qp = iter->qp; local
668 qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
698 qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) argument
732 flush_qp_waiters(struct rvt_qp *qp) argument
739 stop_send_queue(struct rvt_qp *qp) argument
748 quiesce_qp(struct rvt_qp *qp) argument
759 notify_qp_reset(struct rvt_qp *qp) argument
774 hfi1_migrate_qp(struct rvt_qp *qp) argument
799 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) argument
821 get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr) argument
841 notify_error_qp(struct rvt_qp *qp) argument
879 hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) argument
[all...]
H A Duc.c8 #include "qp.h"
15 * @qp: a pointer to the QP
22 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) argument
24 struct hfi1_qp_priv *priv = qp->priv;
30 u32 pmtu = qp->pmtu;
33 ps->s_txreq = get_txreq(ps->dev, qp);
37 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
38 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
41 if (qp->s_last == READ_ONCE(qp
264 struct rvt_qp *qp = packet->qp; local
[all...]
H A Drc.c11 #include "qp.h"
16 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
18 __must_hold(&qp->s_lock)
24 for (i = qp->r_head_ack_queue; ; i = p) {
25 if (i == qp->s_tail_ack_queue)
30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
31 if (p == qp->r_head_ack_queue) {
35 e = &qp->s_ack_queue[p];
41 if (p == qp->s_tail_ack_queue &&
59 * @qp
67 make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps) argument
388 hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) argument
1180 hfi1_make_bth_aeth(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1) argument
1198 struct rvt_qp *qp = packet->qp; local
1225 struct rvt_qp *qp = packet->qp; local
1278 struct rvt_qp *qp = packet->qp; local
1347 struct rvt_qp *qp = packet->qp; local
1417 update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) argument
1458 reset_psn(struct rvt_qp *qp, u32 psn) argument
1564 hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) argument
1635 reset_sending_psn(struct rvt_qp *qp, u32 psn) argument
1672 hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) argument
1693 hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) argument
1814 update_last_psn(struct rvt_qp *qp, u32 psn) argument
1824 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) argument
1905 set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd) argument
1930 update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn, u32 lpsn) argument
1964 do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct hfi1_ctxtdata *rcd) argument
2263 rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct hfi1_ctxtdata *rcd) argument
2307 struct rvt_qp *qp = packet->qp; local
2484 rc_cancel_ack(struct rvt_qp *qp) argument
2510 rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) argument
2768 struct rvt_qp *qp = packet->qp; local
3209 hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet, struct rvt_qp *qp) argument
[all...]
H A Drc.h13 static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n) argument
18 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
20 qp->s_tail_ack_queue = next;
21 qp->s_acked_ack_queue = next;
22 qp->s_ack_state = OP(ACKNOWLEDGE);
26 struct rvt_qp *qp)
28 if (list_empty(&qp->rspwait)) {
29 qp->r_flags |= RVT_R_RSP_NAK;
30 rvt_get_qp(qp);
31 list_add_tail(&qp
25 rc_defered_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) argument
[all...]
H A Druc.c10 #include "qp.h"
31 struct rvt_qp *qp = packet->qp; local
32 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
39 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
41 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
48 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
51 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
64 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
69 if (slid != rdma_ah_get_dlid(&qp
163 build_ahg(struct rvt_qp *qp, u32 npsn) argument
207 hfi1_make_ruc_bth(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2) argument
231 hfi1_make_ruc_header_16B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps) argument
320 hfi1_make_ruc_header_9B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps) argument
386 hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1, u32 bth2, int middle, struct hfi1_pkt_state *ps) argument
429 hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps, bool tid) argument
471 hfi1_do_send_from_rvt(struct rvt_qp *qp) argument
479 struct rvt_qp *qp = iowait_to_qp(w->iow); local
493 hfi1_do_send(struct rvt_qp *qp, bool in_thread) argument
[all...]
H A Dtrace_rc.h18 TP_PROTO(struct rvt_qp *qp, u32 psn),
19 TP_ARGS(qp, psn),
21 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
32 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
33 __entry->qpn = qp->ibqp.qp_num;
34 __entry->s_flags = qp->s_flags;
36 __entry->s_psn = qp->s_psn;
37 __entry->s_next_psn = qp->s_next_psn;
38 __entry->s_sending_psn = qp->s_sending_psn;
39 __entry->s_sending_hpsn = qp
[all...]
/linux-master/include/linux/
H A Dntb_transport.h65 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
67 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
72 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
73 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
77 void ntb_transport_free_queue(struct ntb_transport_qp *qp);
78 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
80 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
82 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
83 void ntb_transport_link_up(struct ntb_transport_qp *qp);
84 void ntb_transport_link_down(struct ntb_transport_qp *qp);
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_qp.c103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) argument
105 qp->resp.res_head = 0;
106 qp->resp.res_tail = 0;
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
109 if (!qp->resp.resources)
115 static void free_rd_atomic_resources(struct rxe_qp *qp) argument
117 if (qp->resp.resources) {
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121 struct resp_res *res = &qp->resp.resources[i];
125 kfree(qp
135 cleanup_rd_atomic_resources(struct rxe_qp *qp) argument
148 rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init) argument
186 rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) argument
235 rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) argument
279 rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) argument
324 rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) argument
349 rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct rxe_create_qp_resp __user *uresp, struct ib_pd *ibpd, struct ib_udata *udata) argument
414 rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) argument
439 rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) argument
514 rxe_qp_reset(struct rxe_qp *qp) argument
557 rxe_qp_error(struct rxe_qp *qp) argument
571 rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) argument
584 __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) argument
617 rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) argument
759 rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) argument
794 rxe_qp_chk_destroy(struct rxe_qp *qp) argument
811 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); local
877 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); local
[all...]
H A Drxe_comp.c117 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); local
120 rxe_dbg_qp(qp, "retransmit timer fired\n");
122 spin_lock_irqsave(&qp->state_lock, flags);
123 if (qp->valid) {
124 qp->comp.timeout = 1;
125 rxe_sched_task(&qp->comp.task);
127 spin_unlock_irqrestore(&qp->state_lock, flags);
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) argument
134 skb_queue_tail(&qp
146 get_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe **wqe_p) argument
174 reset_retry_counters(struct rxe_qp *qp) argument
181 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument
224 check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument
363 do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument
383 do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument
402 make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) argument
449 do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
483 comp_check_sq_drain_done(struct rxe_qp *qp) argument
508 complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument
532 complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument
554 drain_resp_pkts(struct rxe_qp *qp) argument
566 flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
594 flush_send_queue(struct rxe_qp *qp, bool notify) argument
617 struct rxe_qp *qp = pkt->qp; local
633 reset_retry_timer(struct rxe_qp *qp) argument
647 rxe_completer(struct rxe_qp *qp) argument
[all...]
H A Drxe_req.c14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp, argument
23 int to_send = (wqe->dma.resid > qp->mtu) ?
24 qp->mtu : wqe->dma.resid;
26 qp->req.opcode = next_opcode(qp, wqe,
38 static void req_retry(struct rxe_qp *qp) argument
45 struct rxe_queue *q = qp->sq.queue;
52 qp->req.wqe_index = cons;
53 qp
101 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); local
116 req_check_sq_drain_done(struct rxe_qp *qp) argument
162 __req_next_wqe(struct rxe_qp *qp) argument
175 req_next_wqe(struct rxe_qp *qp) argument
206 rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
224 next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) argument
303 next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits) argument
354 next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode) argument
384 check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
404 get_mtu(struct rxe_qp *qp) argument
414 init_req_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, int opcode, u32 payload, struct rxe_pkt_info *pkt) argument
498 finish_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 payload) argument
541 update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt) argument
553 update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, u32 payload) argument
576 save_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 *rollback_psn) argument
588 rollback_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 rollback_psn) argument
600 update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
615 rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
667 rxe_requester(struct rxe_qp *qp) argument
[all...]
H A Drxe_resp.c50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) argument
55 skb_queue_tail(&qp->req_pkts, skb);
58 (skb_queue_len(&qp->req_pkts) > 1);
61 rxe_sched_task(&qp->resp.task);
63 rxe_run_task(&qp->resp.task);
66 static inline enum resp_states get_req(struct rxe_qp *qp, argument
71 skb = skb_peek(&qp->req_pkts);
77 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
80 static enum resp_states check_psn(struct rxe_qp *qp, argument
83 int diff = psn_compare(pkt->psn, qp
124 check_op_seq(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
214 check_qp_attr_access(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
238 check_op_valid(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
269 get_srq_wqe(struct rxe_qp *qp) argument
320 check_resource(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
348 rxe_resp_check_length(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
401 qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
415 qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
427 check_rkey(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
562 send_data_in(struct rxe_qp *qp, void *data_addr, int data_len) argument
576 write_data_in(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
597 rxe_prepare_res(struct rxe_qp *qp, struct rxe_pkt_info *pkt, int type) argument
642 process_flush(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
687 atomic_reply(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
722 atomic_write_reply(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
760 prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *ack, int opcode, int payload, u32 psn, u8 syndrome) argument
826 rxe_recheck_mr(struct rxe_qp *qp, u32 rkey) argument
865 read_reply(struct rxe_qp *qp, struct rxe_pkt_info *req_pkt) argument
969 invalidate_rkey(struct rxe_qp *qp, u32 rkey) argument
980 execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
1051 do_complete(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
1166 send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, int opcode, const char *msg) argument
1184 send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) argument
1190 send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) argument
1202 send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) argument
1215 acknowledge(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
1233 cleanup(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
1253 find_resource(struct rxe_qp *qp, u32 psn) argument
1272 duplicate_request(struct rxe_qp *qp, struct rxe_pkt_info *pkt) argument
1369 do_class_ac_error(struct rxe_qp *qp, u8 syndrome, enum ib_wc_status status) argument
1379 do_class_d1e_error(struct rxe_qp *qp) argument
1414 drain_req_pkts(struct rxe_qp *qp) argument
1426 flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) argument
1454 flush_recv_queue(struct rxe_qp *qp, bool notify) argument
1488 rxe_responder(struct rxe_qp *qp) argument
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib_rc.c54 * @qp: a pointer to the QP
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, argument
72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
78 switch (qp->s_ack_state) {
81 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
94 qp->s_tail_ack_queue = 0;
99 if (qp->r_head_ack_queue == qp
216 qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) argument
598 qib_send_rc_ack(struct rvt_qp *qp) argument
736 reset_psn(struct rvt_qp *qp, u32 psn) argument
821 qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) argument
857 reset_sending_psn(struct rvt_qp *qp, u32 psn) argument
882 qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) argument
941 update_last_psn(struct rvt_qp *qp, u32 psn) argument
951 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct qib_ibport *ibp) argument
1007 do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) argument
1224 rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) argument
1269 qib_rc_rcv_resp(struct qib_ibport *ibp, struct ib_other_headers *ohdr, void *data, u32 tlen, struct rvt_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) argument
1493 qib_rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) argument
1690 qib_update_ack_queue(struct rvt_qp *qp, unsigned n) argument
1714 qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) argument
[all...]
H A Dqib_ruc.c44 void qib_migrate_qp(struct rvt_qp *qp) argument
48 qp->s_mig_state = IB_MIG_MIGRATED;
49 qp->remote_ah_attr = qp->alt_ah_attr;
50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
51 qp->s_pkey_index = qp->s_alt_pkey_index;
53 ev.device = qp->ibqp.device;
54 ev.element.qp
82 qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0) argument
206 qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2) argument
250 struct rvt_qp *qp = priv->owner; local
263 qib_do_send(struct rvt_qp *qp) argument
[all...]
H A Dqib_uc.c42 * @qp: a pointer to the QP
49 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) argument
51 struct qib_qp_priv *priv = qp->priv;
57 u32 pmtu = qp->pmtu;
60 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
61 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
64 if (qp->s_last == READ_ONCE(qp->s_head))
68 qp->s_flags |= RVT_S_WAIT_DMA;
71 wqe = rvt_get_swqe_ptr(qp, q
238 qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) argument
[all...]
H A Dqib_ud.c57 struct rvt_qp *qp; local
67 qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
68 if (!qp) {
75 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
76 IB_QPT_UD : qp->ibqp.qp_type;
79 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
87 if (qp->ibqp.qp_num > 1) {
93 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
99 sqp->ibqp.qp_num, qp->ibqp.qp_num,
111 if (qp
231 qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) argument
427 qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) argument
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Drc.c48 * @qp: the queue pair to compute the AETH for
52 __be32 rvt_compute_aeth(struct rvt_qp *qp) argument
54 u32 aeth = qp->r_msn & IB_MSN_MASK;
56 if (qp->ibqp.srq) {
68 credits = READ_ONCE(qp->r_rq.kwq->count);
71 if (qp->ip) {
72 head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head);
73 tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail);
75 head = READ_ONCE(qp->r_rq.kwq->head);
76 tail = READ_ONCE(qp
121 rvt_get_credit(struct rvt_qp *qp, u32 aeth) argument
[all...]
H A Dtrace_qp.h18 TP_PROTO(struct rvt_qp *qp, u32 bucket),
19 TP_ARGS(qp, bucket),
21 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
26 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
27 __entry->qpn = qp->ibqp.qp_num;
39 TP_PROTO(struct rvt_qp *qp, u32 bucket),
40 TP_ARGS(qp, bucket));
43 TP_PROTO(struct rvt_qp *qp, u32 bucket),
44 TP_ARGS(qp, bucket));
48 TP_PROTO(struct rvt_qp *qp, u3
[all...]
/linux-master/drivers/infiniband/core/
H A Dmr_pool.c8 struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list) argument
13 spin_lock_irqsave(&qp->mr_lock, flags);
17 qp->mrs_used++;
19 spin_unlock_irqrestore(&qp->mr_lock, flags);
25 void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr) argument
29 spin_lock_irqsave(&qp->mr_lock, flags);
31 qp->mrs_used--;
32 spin_unlock_irqrestore(&qp->mr_lock, flags);
36 int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr, argument
45 mr = ib_alloc_mr_integrity(qp
66 ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list) argument
[all...]
/linux-master/include/rdma/
H A Dmr_pool.h10 struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list);
11 void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr);
13 int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
15 void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list);
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_qp.c96 struct siw_qp *qp; local
105 qp = sk_to_qp(sk);
107 if (likely(!qp->rx_stream.rx_suspend &&
108 down_read_trylock(&qp->state_lock))) {
109 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 };
111 if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
120 up_read(&qp->state_lock);
122 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n",
123 qp->rx_stream.rx_suspend);
129 void siw_qp_llp_close(struct siw_qp *qp) argument
203 siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) argument
229 siw_qp_enable_crc(struct siw_qp *qp) argument
263 siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) argument
367 siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype, u8 ecode, int in_tx) argument
390 siw_send_terminate(struct siw_qp *qp) argument
615 siw_qp_modify_nonstate(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) argument
637 siw_qp_nextstate_from_idle(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) argument
706 siw_qp_nextstate_from_rts(struct siw_qp *qp, struct siw_qp_attrs *attrs) argument
767 siw_qp_nextstate_from_term(struct siw_qp *qp, struct siw_qp_attrs *attrs) argument
784 siw_qp_nextstate_from_close(struct siw_qp *qp, struct siw_qp_attrs *attrs) argument
828 siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) argument
884 siw_activate_tx_from_sq(struct siw_qp *qp) argument
980 siw_activate_tx(struct siw_qp *qp) argument
1063 siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, enum siw_wc_status status) argument
1121 siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, u32 inval_stag, enum siw_wc_status status) argument
1191 siw_sq_flush(struct siw_qp *qp) argument
1267 siw_rq_flush(struct siw_qp *qp) argument
1314 siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) argument
1329 struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); local
[all...]
/linux-master/drivers/ntb/
H A Dntb_transport.c120 struct ntb_transport_qp *qp; member in struct:ntb_queue_entry
148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
466 struct ntb_transport_qp *qp; local
470 qp = filp->private_data;
472 if (!qp || !qp
616 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; local
685 struct ntb_transport_qp *qp = dev; local
695 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; local
722 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; local
912 ntb_qp_link_context_reset(struct ntb_transport_qp *qp) argument
935 ntb_qp_link_down_reset(struct ntb_transport_qp *qp) argument
942 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) argument
958 struct ntb_transport_qp *qp = container_of(work, local
970 ntb_qp_link_down(struct ntb_transport_qp *qp) argument
977 struct ntb_transport_qp *qp; local
1104 struct ntb_transport_qp *qp = &nt->qp_vec[i]; local
1131 struct ntb_transport_qp *qp = container_of(work, local
1166 struct ntb_transport_qp *qp; local
1411 struct ntb_transport_qp *qp; local
1444 ntb_complete_rxc(struct ntb_transport_qp *qp) argument
1494 struct ntb_transport_qp *qp = entry->qp; local
1530 struct ntb_transport_qp *qp = entry->qp; local
1597 struct ntb_transport_qp *qp = entry->qp; local
1621 ntb_process_rxc(struct ntb_transport_qp *qp) argument
1695 struct ntb_transport_qp *qp = (void *)data; local
1736 struct ntb_transport_qp *qp = entry->qp; local
1806 ntb_async_tx_submit(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
1865 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
1901 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
1928 ntb_send_link_down(struct ntb_transport_qp *qp) argument
1989 struct ntb_transport_qp *qp; local
2113 ntb_transport_free_queue(struct ntb_transport_qp *qp) argument
2205 ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) argument
2238 ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) argument
2280 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) argument
2322 ntb_transport_link_up(struct ntb_transport_qp *qp) argument
2342 ntb_transport_link_down(struct ntb_transport_qp *qp) argument
2370 ntb_transport_link_query(struct ntb_transport_qp *qp) argument
2387 ntb_transport_qp_num(struct ntb_transport_qp *qp) argument
2404 ntb_transport_max_size(struct ntb_transport_qp *qp) argument
2427 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) argument
2439 struct ntb_transport_qp *qp; local
[all...]
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_qp.c12 void erdma_qp_llp_close(struct erdma_qp *qp) argument
16 down_write(&qp->state_lock);
18 switch (qp->attrs.state) {
24 erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
27 qp->attrs.state = ERDMA_QP_STATE_IDLE;
33 if (qp->cep) {
34 erdma_cep_put(qp->cep);
35 qp->cep = NULL;
38 up_write(&qp->state_lock);
43 struct erdma_qp *qp local
51 erdma_modify_qp_state_to_rts(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, enum erdma_qp_attr_mask mask) argument
102 erdma_modify_qp_state_to_stop(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, enum erdma_qp_attr_mask mask) argument
120 erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, enum erdma_qp_attr_mask mask) argument
191 struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref); local
196 erdma_qp_put(struct erdma_qp *qp) argument
202 erdma_qp_get(struct erdma_qp *qp) argument
207 fill_inline_data(struct erdma_qp *qp, const struct ib_send_wr *send_wr, u16 wqe_idx, u32 sgl_offset, __le32 *length_field) argument
254 fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr, u16 wqe_idx, u32 sgl_offset, __le32 *length_field) argument
285 erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, const struct ib_send_wr *send_wr) argument
490 kick_sq_db(struct erdma_qp *qp, u16 pi) argument
502 struct erdma_qp *qp = to_eqp(ibqp); local
540 erdma_post_recv_one(struct erdma_qp *qp, const struct ib_recv_wr *recv_wr) argument
574 struct erdma_qp *qp = to_eqp(ibqp); local
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) argument
198 return qp->qpn >= dev->qp_table.sqp_start &&
199 qp->qpn <= dev->qp_table.sqp_start + 3;
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) argument
204 return qp->qpn >= dev->qp_table.sqp_start &&
205 qp->qpn <= dev->qp_table.sqp_start + 1;
208 static void *get_recv_wqe(struct mthca_qp *qp, int n) argument
210 if (qp->is_direct)
211 return qp->queue.direct.buf + (n << qp
217 get_send_wqe(struct mthca_qp *qp, int n) argument
241 struct mthca_qp *qp; local
328 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
434 struct mthca_qp *qp = to_mqp(ibqp); local
563 struct mthca_qp *qp = to_mqp(ibqp); local
862 struct mthca_qp *qp = to_mqp(ibqp); local
931 mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) argument
965 mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) argument
990 mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp, struct ib_udata *udata) argument
1083 mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) argument
1092 mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1124 mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1133 mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1153 mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1162 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp, struct ib_udata *udata) argument
1252 mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) argument
1291 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp, struct ib_udata *udata) argument
1366 mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, u32 port, struct mthca_qp *qp, struct ib_udata *udata) argument
1434 get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) argument
1445 mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) argument
1500 build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind, const struct ib_ud_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) argument
1629 struct mthca_qp *qp = to_mqp(ibqp); local
1827 struct mthca_qp *qp = to_mqp(ibqp); local
1932 struct mthca_qp *qp = to_mqp(ibqp); local
2166 struct mthca_qp *qp = to_mqp(ibqp); local
2233 mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, int index, int *dbd, __be32 *new_wqe) argument
[all...]
/linux-master/drivers/net/ethernet/qlogic/qed/
H A Dqed_roce.c96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, argument
101 if (qp->roce_mode == ROCE_V2_IPV4) {
107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
206 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) argument
210 if (qp
222 qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) argument
360 qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) argument
490 qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, bool move_to_err, u32 modify_flags) argument
577 qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, bool move_to_sqd, bool move_to_err, u32 modify_flags) argument
664 qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, u32 *cq_prod) argument
750 qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) argument
811 qed_roce_query_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params) argument
951 qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) argument
980 qed_roce_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) argument
[all...]

Completed in 239 milliseconds

1234567891011>>