Searched refs:wqe (Results 1 - 25 of 82) sorted by relevance

1234

/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/scsi/bfa/
H A Dbfa_sgpg.c148 struct bfa_sgpg_wqe_s *wqe; local
164 wqe = bfa_q_first(&mod->sgpg_wait_q);
165 if (mod->free_sgpgs < wqe->nsgpg)
168 nsgpg = wqe->nsgpg;
169 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
170 wqe->nsgpg -= nsgpg;
171 if (wqe->nsgpg == 0) {
172 list_del(&wqe->qe);
173 wqe->cbfn(wqe
179 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) argument
205 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) argument
218 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), void *cbarg) argument
[all...]
H A Dbfa_sgpg_priv.h73 void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
75 void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
77 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/scsi/bfa/
H A Dbfa_sgpg.c148 struct bfa_sgpg_wqe_s *wqe; local
164 wqe = bfa_q_first(&mod->sgpg_wait_q);
165 if (mod->free_sgpgs < wqe->nsgpg)
168 nsgpg = wqe->nsgpg;
169 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
170 wqe->nsgpg -= nsgpg;
171 if (wqe->nsgpg == 0) {
172 list_del(&wqe->qe);
173 wqe->cbfn(wqe
179 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) argument
205 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) argument
218 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), void *cbarg) argument
[all...]
H A Dbfa_sgpg_priv.h73 void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
75 void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
77 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/cxgb3/
H A Diwch_qp.c42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, argument
51 wqe->send.rdmaop = T3_SEND_WITH_SE;
53 wqe->send.rdmaop = T3_SEND;
54 wqe->send.rem_stag = 0;
58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
60 wqe->send.rdmaop = T3_SEND_WITH_INV;
61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
68 wqe->send.reserved[0] = 0;
69 wqe->send.reserved[1] = 0;
70 wqe
87 build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument
128 build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument
149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) argument
191 build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument
247 build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument
287 build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument
359 union t3_wr *wqe; local
471 union t3_wr *wqe; local
535 union t3_wr *wqe; local
743 union t3_wr *wqe; local
775 union t3_wr *wqe; local
862 union t3_wr *wqe = qhp->wq.queue; local
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/infiniband/hw/cxgb3/
H A Diwch_qp.c42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, argument
51 wqe->send.rdmaop = T3_SEND_WITH_SE;
53 wqe->send.rdmaop = T3_SEND;
54 wqe->send.rem_stag = 0;
58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
60 wqe->send.rdmaop = T3_SEND_WITH_INV;
61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
68 wqe->send.reserved[0] = 0;
69 wqe->send.reserved[1] = 0;
70 wqe
87 build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument
128 build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument
149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) argument
191 build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument
247 build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument
287 build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument
359 union t3_wr *wqe; local
471 union t3_wr *wqe; local
535 union t3_wr *wqe; local
743 union t3_wr *wqe; local
775 union t3_wr *wqe; local
862 union t3_wr *wqe = qhp->wq.queue; local
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/ipath/
H A Dipath_ruc.c114 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, argument
121 for (i = j = 0; i < wqe->num_sge; i++) {
122 if (wqe->sg_list[i].length == 0)
126 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
128 *lengthp += wqe->sg_list[i].length;
137 wc.wr_id = wqe->wr_id;
163 struct ipath_rwqe *wqe; local
196 wqe = get_rwqe_ptr(rq, tail);
202 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
203 qp->r_wr_id = wqe
255 struct ipath_swqe *wqe; local
690 ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dipath_rc.c42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, argument
47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
48 ss->sge = wqe->sg_list[0];
49 ss->sg_list = wqe->sg_list + 1;
50 ss->num_sge = wqe->wr.num_sge;
52 return wqe->length - len;
58 * @wqe: the work queue to initialize the QP's SGE from
62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) argument
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
218 struct ipath_swqe *wqe; local
727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n); local
807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); local
863 struct ipath_swqe *wqe; local
1125 struct ipath_swqe *wqe; local
[all...]
H A Dipath_uc.c49 struct ipath_swqe *wqe; local
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
84 wqe = get_swqe_ptr(qp, qp->s_cur);
97 qp->s_psn = wqe->psn = qp->s_next_psn;
98 qp->s_sge.sge = wqe->sg_list[0];
99 qp->s_sge.sg_list = wqe->sg_list + 1;
100 qp->s_sge.num_sge = wqe->wr.num_sge;
101 qp->s_len = len = wqe->length;
102 switch (wqe
[all...]
H A Dipath_ud.c61 struct ipath_rwqe *wqe; local
132 wqe = get_rwqe_ptr(rq, tail);
134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
148 wc.wr_id = wqe->wr_id;
244 struct ipath_swqe *wqe; local
267 wqe = get_swqe_ptr(qp, qp->s_last);
268 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
275 wqe = get_swqe_ptr(qp, qp->s_cur);
281 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
297 ipath_ud_loopback(qp, wqe);
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/infiniband/hw/ipath/
H A Dipath_ruc.c114 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, argument
121 for (i = j = 0; i < wqe->num_sge; i++) {
122 if (wqe->sg_list[i].length == 0)
126 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
128 *lengthp += wqe->sg_list[i].length;
137 wc.wr_id = wqe->wr_id;
163 struct ipath_rwqe *wqe; local
196 wqe = get_rwqe_ptr(rq, tail);
202 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
203 qp->r_wr_id = wqe
255 struct ipath_swqe *wqe; local
690 ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dipath_rc.c42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, argument
47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
48 ss->sge = wqe->sg_list[0];
49 ss->sg_list = wqe->sg_list + 1;
50 ss->num_sge = wqe->wr.num_sge;
52 return wqe->length - len;
58 * @wqe: the work queue to initialize the QP's SGE from
62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) argument
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
218 struct ipath_swqe *wqe; local
727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n); local
807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); local
863 struct ipath_swqe *wqe; local
1125 struct ipath_swqe *wqe; local
[all...]
H A Dipath_uc.c49 struct ipath_swqe *wqe; local
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
84 wqe = get_swqe_ptr(qp, qp->s_cur);
97 qp->s_psn = wqe->psn = qp->s_next_psn;
98 qp->s_sge.sge = wqe->sg_list[0];
99 qp->s_sge.sg_list = wqe->sg_list + 1;
100 qp->s_sge.num_sge = wqe->wr.num_sge;
101 qp->s_len = len = wqe->length;
102 switch (wqe
[all...]
H A Dipath_ud.c61 struct ipath_rwqe *wqe; local
132 wqe = get_rwqe_ptr(rq, tail);
134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
148 wc.wr_id = wqe->wr_id;
244 struct ipath_swqe *wqe; local
267 wqe = get_swqe_ptr(qp, qp->s_last);
268 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
275 wqe = get_swqe_ptr(qp, qp->s_cur);
281 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
297 ipath_ud_loopback(qp, wqe);
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/cxgb4/
H A Dqp.c304 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument
316 wqe->send.sendop_pkd = cpu_to_be32(
319 wqe->send.sendop_pkd = cpu_to_be32(
321 wqe->send.stag_inv = 0;
325 wqe->send.sendop_pkd = cpu_to_be32(
328 wqe->send.sendop_pkd = cpu_to_be32(
330 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
340 ret = build_immd(sq, wqe->send.u.immd_src, wr,
344 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
349 wqe
369 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
412 build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
441 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) argument
456 build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
506 build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
536 union t4_wr *wqe; local
647 union t4_recv_wr *wqe; local
847 union t4_wr *wqe; local
878 struct fw_ri_wr *wqe; local
967 struct fw_ri_wr *wqe; local
1041 struct fw_ri_wr *wqe; local
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/infiniband/hw/cxgb4/
H A Dqp.c304 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument
316 wqe->send.sendop_pkd = cpu_to_be32(
319 wqe->send.sendop_pkd = cpu_to_be32(
321 wqe->send.stag_inv = 0;
325 wqe->send.sendop_pkd = cpu_to_be32(
328 wqe->send.sendop_pkd = cpu_to_be32(
330 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
340 ret = build_immd(sq, wqe->send.u.immd_src, wr,
344 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
349 wqe
369 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
412 build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
441 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) argument
456 build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
506 build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
536 union t4_wr *wqe; local
647 union t4_recv_wr *wqe; local
847 union t4_wr *wqe; local
878 struct fw_ri_wr *wqe; local
967 struct fw_ri_wr *wqe; local
1041 struct fw_ri_wr *wqe; local
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/qib/
H A Dqib_rc.c43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, argument
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
54 return wqe->length - len;
237 struct qib_swqe *wqe; local
274 wqe = get_swqe_ptr(qp, qp->s_last);
276 qib_send_complete(qp, wqe, IB_WC_SUCCES
791 struct qib_swqe *wqe = get_swqe_ptr(qp, n); local
875 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); local
951 struct qib_swqe *wqe; local
977 struct qib_swqe *wqe; local
1058 do_rc_completion(struct qib_qp *qp, struct qib_swqe *wqe, struct qib_ibport *ibp) argument
1136 struct qib_swqe *wqe; local
1357 struct qib_swqe *wqe; local
1409 struct qib_swqe *wqe; local
[all...]
H A Dqib_ruc.c81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) argument
94 for (i = j = 0; i < wqe->num_sge; i++) {
95 if (wqe->sg_list[i].length == 0)
99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
101 qp->r_len += wqe->sg_list[i].length;
117 wc.wr_id = wqe->wr_id;
144 struct qib_rwqe *wqe; local
176 wqe = get_rwqe_ptr(rq, tail);
185 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
189 qp->r_wr_id = wqe
354 struct qib_swqe *wqe; local
768 qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dqib_uc.c49 struct qib_swqe *wqe; local
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
84 wqe = get_swqe_ptr(qp, qp->s_cur);
97 wqe->psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
103 len = wqe
[all...]
H A Dqib_ud.c238 struct qib_swqe *wqe; local
261 wqe = get_swqe_ptr(qp, qp->s_last);
262 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
269 wqe = get_swqe_ptr(qp, qp->s_cur);
277 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
293 qib_ud_loopback(qp, wqe);
295 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
301 extra_bytes = -wqe->length & 3;
302 nwords = (wqe->length + extra_bytes) >> 2;
306 qp->s_cur_size = wqe
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/infiniband/hw/qib/
H A Dqib_rc.c43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, argument
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
54 return wqe->length - len;
237 struct qib_swqe *wqe; local
274 wqe = get_swqe_ptr(qp, qp->s_last);
276 qib_send_complete(qp, wqe, IB_WC_SUCCES
791 struct qib_swqe *wqe = get_swqe_ptr(qp, n); local
875 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); local
951 struct qib_swqe *wqe; local
977 struct qib_swqe *wqe; local
1058 do_rc_completion(struct qib_qp *qp, struct qib_swqe *wqe, struct qib_ibport *ibp) argument
1136 struct qib_swqe *wqe; local
1357 struct qib_swqe *wqe; local
1409 struct qib_swqe *wqe; local
[all...]
H A Dqib_ruc.c81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) argument
94 for (i = j = 0; i < wqe->num_sge; i++) {
95 if (wqe->sg_list[i].length == 0)
99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
101 qp->r_len += wqe->sg_list[i].length;
117 wc.wr_id = wqe->wr_id;
144 struct qib_rwqe *wqe; local
176 wqe = get_rwqe_ptr(rq, tail);
185 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
189 qp->r_wr_id = wqe
354 struct qib_swqe *wqe; local
768 qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dqib_uc.c49 struct qib_swqe *wqe; local
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
84 wqe = get_swqe_ptr(qp, qp->s_cur);
97 wqe->psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
103 len = wqe
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/mthca/
H A Dmthca_srq.c90 static inline int *wqe_to_link(void *wqe) argument
92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
147 void *wqe; local
174 next = wqe = get_wqe(srq, i);
177 *wqe_to_link(wqe) = i + 1;
180 *wqe_to_link(wqe) = -1;
184 for (scatter = wqe + sizeof (struct mthca_next_seg);
185 (void *) scatter < wqe + (1 << srq->wqe_shift);
502 void *wqe; local
511 wqe
601 void *wqe; local
[all...]
/asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/infiniband/hw/mthca/
H A Dmthca_srq.c90 static inline int *wqe_to_link(void *wqe) argument
92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
147 void *wqe; local
174 next = wqe = get_wqe(srq, i);
177 *wqe_to_link(wqe) = i + 1;
180 *wqe_to_link(wqe) = -1;
184 for (scatter = wqe + sizeof (struct mthca_next_seg);
185 (void *) scatter < wqe + (1 << srq->wqe_shift);
502 void *wqe; local
511 wqe
601 void *wqe; local
[all...]

Completed in 290 milliseconds

1234