Searched refs:qp (Results 1 - 25 of 252) sorted by relevance

1234567891011

/freebsd-current/sys/dev/ntb/
H A Dntb_transport.h35 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
37 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
46 void ntb_transport_free_queue(struct ntb_transport_qp *qp);
47 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
48 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
49 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
51 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
53 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
54 void ntb_transport_link_up(struct ntb_transport_qp *qp);
55 void ntb_transport_link_down(struct ntb_transport_qp *qp);
[all...]
H A Dntb_transport.c108 struct ntb_transport_qp *qp; member in struct:ntb_queue_entry
130 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
140 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
265 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
274 static int ntb_process_tx(struct ntb_transport_qp *qp,
277 static int ntb_process_rxc(struct ntb_transport_qp *qp);
278 static void ntb_memcpy_rx(struct ntb_transport_qp *qp,
280 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
282 static void ntb_complete_rxc(struct ntb_transport_qp *qp);
348 int rc, i, db_count, spad_count, qp, qpu, qpo, qpt; local
597 struct ntb_transport_qp *qp; local
657 ntb_transport_free_queue(struct ntb_transport_qp *qp) argument
706 struct ntb_transport_qp *qp; local
745 ntb_transport_link_up(struct ntb_transport_qp *qp) argument
773 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) argument
813 struct ntb_transport_qp *qp = entry->qp; local
862 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
879 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
920 struct ntb_transport_qp *qp = arg; local
940 ntb_process_rxc(struct ntb_transport_qp *qp) argument
1009 ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) argument
1029 ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) argument
1039 ntb_complete_rxc(struct ntb_transport_qp *qp) argument
1090 struct ntb_transport_qp *qp; local
1135 struct ntb_transport_qp *qp; local
1329 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; local
1378 struct ntb_transport_qp *qp = arg; local
1410 struct ntb_transport_qp *qp; local
1441 ntb_qp_link_down(struct ntb_transport_qp *qp) argument
1448 ntb_qp_link_down_reset(struct ntb_transport_qp *qp) argument
1466 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) argument
1486 ntb_transport_link_down(struct ntb_transport_qp *qp) argument
1514 ntb_transport_link_query(struct ntb_transport_qp *qp) argument
1529 ntb_transport_link_speed(struct ntb_transport_qp *qp) argument
1558 ntb_send_link_down(struct ntb_transport_qp *qp) argument
1648 ntb_transport_qp_num(struct ntb_transport_qp *qp) argument
1663 ntb_transport_max_size(struct ntb_transport_qp *qp) argument
1670 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) argument
[all...]
/freebsd-current/sys/dev/nvmf/controller/
H A Dnvmft_qpair.c26 struct nvmf_qpair *qp; member in struct:nvmft_qpair
35 volatile u_int qp_refs; /* Internal references on 'qp'. */
42 static int _nvmft_send_generic_error(struct nvmft_qpair *qp,
48 struct nvmft_qpair *qp = arg; local
49 struct nvmft_controller *ctrlr = qp->ctrlr;
60 nvmft_printf(ctrlr, "error %d on %s\n", error, qp->name);
61 nvmft_controller_error(ctrlr, qp, error);
67 struct nvmft_qpair *qp = arg; local
68 struct nvmft_controller *ctrlr = qp->ctrlr;
75 qp
105 struct nvmft_qpair *qp; local
132 nvmft_qpair_shutdown(struct nvmft_qpair *qp) argument
145 nvmft_qpair_destroy(struct nvmft_qpair *qp) argument
154 nvmft_qpair_ctrlr(struct nvmft_qpair *qp) argument
160 nvmft_qpair_id(struct nvmft_qpair *qp) argument
166 nvmft_qpair_name(struct nvmft_qpair *qp) argument
172 _nvmft_send_response(struct nvmft_qpair *qp, const void *cqe) argument
206 nvmft_command_completed(struct nvmft_qpair *qp, struct nvmf_capsule *nc) argument
218 nvmft_send_response(struct nvmft_qpair *qp, const void *cqe) argument
242 nvmft_send_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc, uint8_t sc_type, uint8_t sc_status) argument
255 nvmft_send_generic_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc, uint8_t sc_status) argument
266 _nvmft_send_generic_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc, uint8_t sc_status) argument
279 nvmft_send_success(struct nvmft_qpair *qp, struct nvmf_capsule *nc) argument
294 nvmft_send_connect_response(struct nvmft_qpair *qp, const struct nvmf_fabric_connect_rsp *rsp) argument
320 nvmft_connect_error(struct nvmft_qpair *qp, const struct nvmf_fabric_connect_cmd *cmd, uint8_t sc_type, uint8_t sc_status) argument
334 nvmft_connect_invalid_parameters(struct nvmft_qpair *qp, const struct nvmf_fabric_connect_cmd *cmd, bool data, uint16_t offset) argument
348 nvmft_finish_accept(struct nvmft_qpair *qp, const struct nvmf_fabric_connect_cmd *cmd, struct nvmft_controller *ctrlr) argument
[all...]
H A Dnvmft_var.h47 struct nvmft_qpair *qp; member in struct:nvmft_io_qpair
110 void nvmft_dispatch_command(struct nvmft_qpair *qp,
116 struct nvmft_qpair *qp, int error);
121 void nvmft_handle_io_command(struct nvmft_qpair *qp, uint16_t qid,
138 void nvmft_qpair_shutdown(struct nvmft_qpair *qp);
139 void nvmft_qpair_destroy(struct nvmft_qpair *qp);
140 struct nvmft_controller *nvmft_qpair_ctrlr(struct nvmft_qpair *qp);
141 uint16_t nvmft_qpair_id(struct nvmft_qpair *qp);
142 const char *nvmft_qpair_name(struct nvmft_qpair *qp);
143 void nvmft_command_completed(struct nvmft_qpair *qp,
[all...]
/freebsd-current/sys/dev/nvmf/host/
H A Dnvmf_qpair.c26 struct nvmf_qpair *qp; member in struct:nvmf_host_qpair
47 nvmf_allocate_request(struct nvmf_host_qpair *qp, void *sqe, argument
60 mtx_lock(&qp->lock);
61 nq = qp->qp;
63 mtx_unlock(&qp->lock);
67 qp->allocating++;
68 MPASS(qp->allocating != 0);
69 mtx_unlock(&qp->lock);
71 req->qp
110 nvmf_dispatch_command(struct nvmf_host_qpair *qp, struct nvmf_host_command *cmd) argument
141 struct nvmf_host_qpair *qp = arg; local
154 struct nvmf_host_qpair *qp = arg; local
229 struct nvmf_host_qpair *qp; local
275 nvmf_shutdown_qp(struct nvmf_host_qpair *qp) argument
337 nvmf_destroy_qp(struct nvmf_host_qpair *qp) argument
355 struct nvmf_host_qpair *qp; local
[all...]
/freebsd-current/contrib/nvi/common/
H A Dseq.c38 SEQ *lastqp, *qp; local
48 if ((qp =
59 free(qp->output);
60 qp->olen = olen;
61 qp->output = p;
66 CALLOC(sp, qp, 1, sizeof(SEQ));
67 if (qp == NULL) {
74 qp->name = NULL;
75 else if ((qp->name = v_wstrdup(sp, name, nlen)) == NULL) {
79 qp
130 SEQ *qp, *pre_qp = NULL; local
160 seq_free(SEQ *qp) argument
181 SEQ *lqp = NULL, *qp; local
259 SEQ *qp; local
278 SEQ *qp; local
322 SEQ *qp; local
[all...]
/freebsd-current/usr.bin/bintrans/tests/
H A Dbintrans_test.sh4 atf_check -e empty -o file:"$(atf_get_srcdir)/textqpenc" bintrans qp $(atf_get_srcdir)/textqpdec
11 atf_check -e empty -o inline:"=" bintrans qp -u test
13 atf_check -e empty -o inline:"=\ra" bintrans qp -u test
15 atf_check -e empty -o inline:"a" bintrans qp -u test
17 atf_check -e empty -o inline:"This is a line" bintrans qp -u test
19 atf_check -e empty -o inline:"This= is a line" bintrans qp -u test
21 atf_check -e empty -o inline:"This=2 is a line" bintrans qp -u test
23 atf_check -e empty -o inline:"This# is a line" bintrans qp -u test
25 atf_check -e empty -o inline:"This= is a line" bintrans qp -u test
27 atf_check -e empty -o inline:"This_ is a line" bintrans qp
[all...]
/freebsd-current/contrib/ofed/libirdma/
H A Dirdma_uk.c90 * @qp: hw qp ptr
92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){ argument
95 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
100 * @qp: hw qp ptr
103 irdma_nop_1(struct irdma_qp_uk *qp) argument
108 if (!qp->sq_ring.head)
111 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
112 wqe = qp
134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) argument
154 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) argument
194 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx) argument
204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta, u32 wqe_idx, bool post_sq) argument
230 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, u16 *quanta, u32 total_size, struct irdma_post_sq_info *info) argument
293 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) argument
320 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
418 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool inv_stag, bool post_sq) argument
508 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
759 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
830 irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
904 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
955 irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
1006 irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) argument
1171 irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx, u32 end_idx) argument
1205 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx) argument
1262 struct irdma_qp_uk *qp; local
1643 irdma_setup_connection_wqes(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) argument
1718 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) argument
[all...]
/freebsd-current/sys/dev/qlnx/qlnxe/
H A Decore_roce.c218 static void ecore_rdma_copy_gids(struct ecore_rdma_qp *qp, __le32 *src_gid, argument
222 if (qp->roce_mode == ROCE_V2_IPV4) {
228 src_gid[3] = OSAL_CPU_TO_LE32(qp->sgid.ipv4_addr);
229 dst_gid[3] = OSAL_CPU_TO_LE32(qp->dgid.ipv4_addr);
234 for (i = 0; i < OSAL_ARRAY_SIZE(qp->sgid.dwords); i++) {
235 src_gid[i] = OSAL_CPU_TO_LE32(qp->sgid.dwords[i]);
236 dst_gid[i] = OSAL_CPU_TO_LE32(qp->dgid.dwords[i]);
335 DP_NOTICE(p_hwfn, false, "failed to allocate qp\n");
342 /* Verify the cid bits that of this qp index are clear */
366 /* qp inde
389 ecore_roce_sp_create_responder( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp) argument
558 ecore_roce_sp_create_requester( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp) argument
709 ecore_roce_sp_modify_responder( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, bool move_to_err, u32 modify_flags) argument
814 ecore_roce_sp_modify_requester( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, bool move_to_sqd, bool move_to_err, u32 modify_flags) argument
921 ecore_roce_sp_destroy_qp_responder( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, u32 *num_invalidated_mw, u32 *cq_prod) argument
1006 ecore_roce_sp_destroy_qp_requester( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, u32 *num_bound_mw, u32 *cq_prod) argument
1084 ecore_roce_sp_query_responder( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, struct ecore_rdma_query_qp_out_params *out_params) argument
1147 ecore_roce_sp_query_requester( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, struct ecore_rdma_query_qp_out_params *out_params, bool *sq_draining) argument
1215 ecore_roce_query_qp( struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, struct ecore_rdma_query_qp_out_params *out_params) argument
1236 ecore_roce_destroy_qp(struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, struct ecore_rdma_destroy_qp_out_params *out_params) argument
1384 ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn, struct ecore_rdma_qp *qp, enum ecore_roce_qp_state prev_state, struct ecore_rdma_modify_qp_in_params *params) argument
[all...]
/freebsd-current/sys/dev/nvmf/
H A Dnvmf_tcp.c39 struct nvmf_tcp_qpair *qp; member in struct:nvmf_tcp_command_buffer
64 struct nvmf_qpair qp; member in struct:nvmf_tcp_qpair
133 #define TQP(qp) ((struct nvmf_tcp_qpair *)(qp))
174 tcp_alloc_command_buffer(struct nvmf_tcp_qpair *qp, argument
181 cb->qp = qp;
264 nvmf_tcp_write_pdu(struct nvmf_tcp_qpair *qp, struct mbuf *m) argument
266 struct socket *so = qp->so;
269 mbufq_enqueue(&qp
277 nvmf_tcp_report_error(struct nvmf_tcp_qpair *qp, uint16_t fes, uint32_t fei, struct mbuf *rx_pdu, u_int hlen) argument
305 nvmf_tcp_validate_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
382 nvmf_tcp_save_command_capsule(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
401 nvmf_tcp_save_response_capsule(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
433 nvmf_tcp_construct_pdu(struct nvmf_tcp_qpair *qp, void *hdr, size_t hlen, struct mbuf *data, uint32_t data_len) argument
505 nvmf_tcp_next_r2t(struct nvmf_tcp_qpair *qp) argument
527 nvmf_tcp_allocate_ttag(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_command_buffer *cb) argument
562 tcp_send_r2t(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, uint32_t data_offset, uint32_t data_len) argument
585 nvmf_tcp_send_next_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_command_buffer *cb) argument
639 nvmf_tcp_handle_h2c_data(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
749 nvmf_tcp_handle_c2h_data(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
924 tcp_send_h2c_pdu(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, uint32_t data_offset, struct mbuf *m, size_t len, bool last_pdu) argument
944 nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
1053 nvmf_tcp_dispatch_pdu(struct nvmf_tcp_qpair *qp, const struct nvme_tcp_common_pdu_hdr *ch, struct nvmf_tcp_rxpdu *pdu) argument
1083 struct nvmf_tcp_qpair *qp = arg; local
1223 tcp_command_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_capsule *tc) argument
1280 tcp_response_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_capsule *tc) argument
1293 capsule_to_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_capsule *tc) argument
1304 struct nvmf_tcp_qpair *qp = arg; local
1403 struct nvmf_tcp_qpair *qp = arg; local
1413 struct nvmf_tcp_qpair *qp = arg; local
1424 struct nvmf_tcp_qpair *qp; local
1513 tcp_release_qpair(struct nvmf_tcp_qpair *qp) argument
1522 struct nvmf_tcp_qpair *qp = TQP(nq); local
1596 struct nvmf_tcp_qpair *qp = TQP(nq); local
1610 struct nvmf_tcp_qpair *qp = TQP(tc->nc.nc_qpair); local
1634 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
1693 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
1761 tcp_send_c2h_pdu(struct nvmf_tcp_qpair *qp, uint16_t cid, uint32_t data_offset, struct mbuf *m, size_t len, bool last_pdu, bool success) argument
1785 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
[all...]
/freebsd-current/sys/dev/mthca/
H A Dmthca_qp.c196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) argument
198 return qp->qpn >= dev->qp_table.sqp_start &&
199 qp->qpn <= dev->qp_table.sqp_start + 3;
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) argument
204 return qp->qpn >= dev->qp_table.sqp_start &&
205 qp->qpn <= dev->qp_table.sqp_start + 1;
208 static void *get_recv_wqe(struct mthca_qp *qp, int n) argument
210 if (qp->is_direct)
211 return qp->queue.direct.buf + (n << qp
217 get_send_wqe(struct mthca_qp *qp, int n) argument
241 struct mthca_qp *qp; local
328 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
429 struct mthca_qp *qp = to_mqp(ibqp); local
551 struct mthca_qp *qp = to_mqp(ibqp); local
850 struct mthca_qp *qp = to_mqp(ibqp); local
915 mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) argument
949 mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) argument
974 mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp, struct ib_udata *udata) argument
1067 mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) argument
1076 mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1107 mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1116 mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1136 mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1145 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp, struct ib_udata *udata) argument
1234 mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) argument
1273 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp, struct ib_udata *udata) argument
1416 get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) argument
1427 mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) argument
1612 struct mthca_qp *qp = to_mqp(ibqp); local
1815 struct mthca_qp *qp = to_mqp(ibqp); local
1926 struct mthca_qp *qp = to_mqp(ibqp); local
2166 struct mthca_qp *qp = to_mqp(ibqp); local
2233 mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, int index, int *dbd, __be32 *new_wqe) argument
[all...]
/freebsd-current/sys/dev/irdma/
H A Dirdma_uk.c90 * @qp: hw qp ptr
92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){ argument
95 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
100 * @qp: hw qp ptr
103 irdma_nop_1(struct irdma_qp_uk *qp) argument
108 if (!qp->sq_ring.head)
111 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
112 wqe = qp
134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) argument
154 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) argument
194 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx) argument
204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta, u32 wqe_idx, bool post_sq) argument
230 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, u16 *quanta, u32 total_size, struct irdma_post_sq_info *info) argument
293 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) argument
320 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
418 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool inv_stag, bool post_sq) argument
508 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
727 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
798 irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
872 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) argument
922 irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) argument
1050 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx) argument
1098 irdma_detect_unsignaled_cmpls(struct irdma_cq_uk *cq, struct irdma_qp_uk *qp, struct irdma_cq_poll_info *info, u32 wqe_idx) argument
1153 struct irdma_qp_uk *qp; local
1535 irdma_setup_connection_wqes(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) argument
1636 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) argument
[all...]
/freebsd-current/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_qp.c48 #include <dev/mlx4/qp.h>
87 struct mlx4_ib_qp qp; member in struct:mlx4_ib_sqp
128 return container_of(mqp, struct mlx4_ib_sqp, qp);
131 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
136 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
137 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
141 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
148 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
149 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
155 if (qp
169 is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
192 get_wqe(struct mlx4_ib_qp *qp, int offset) argument
197 get_recv_wqe(struct mlx4_ib_qp *qp, int n) argument
202 get_send_wqe(struct mlx4_ib_qp *qp, int n) argument
216 stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) argument
246 post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) argument
284 pad_wraparound(struct mlx4_ib_qp *qp, int ind) argument
294 mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) argument
386 set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) argument
424 set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp, bool shrink_wqe) argument
540 set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) argument
560 alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
599 free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
630 mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
651 struct mlx4_ib_qp *qp; local
988 del_gid_entries(struct mlx4_ib_qp *qp) argument
998 get_pd(struct mlx4_ib_qp *qp) argument
1006 get_cqs(struct mlx4_ib_qp *qp, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) argument
1025 destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct ib_udata *udata) argument
1137 struct mlx4_ib_qp *qp = NULL; local
1283 _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) argument
1310 mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) argument
1348 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
1512 mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port, u16 vlan_id, u8 *smac) argument
1524 mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) argument
1536 update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
1548 handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_qp_context *context) argument
1572 create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
1631 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
2171 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
2889 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument
2934 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
3269 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
3432 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
[all...]
/freebsd-current/lib/libnvmf/
H A Dnvmf_transport.c95 struct nvmf_qpair *qp; local
98 qp = na->na_ops->allocate_qpair(na, params);
99 if (qp == NULL)
103 qp->nq_association = na;
104 qp->nq_admin = params->admin;
105 TAILQ_INIT(&qp->nq_rx_capsules);
106 return (qp);
110 nvmf_free_qpair(struct nvmf_qpair *qp) argument
115 TAILQ_FOREACH_SAFE(nc, &qp->nq_rx_capsules, nc_link, tc) {
116 TAILQ_REMOVE(&qp
125 nvmf_allocate_command(struct nvmf_qpair *qp, const void *sqe) argument
144 nvmf_allocate_response(struct nvmf_qpair *qp, const void *cqe) argument
186 nvmf_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp) argument
239 nvmf_kernel_handoff_params(struct nvmf_qpair *qp, struct nvmf_handoff_qpair_params *qparams) argument
[all...]
H A Dnvmf_tcp.c26 struct nvmf_tcp_qpair *qp; member in struct:nvmf_tcp_command_buffer
62 struct nvmf_qpair qp; member in struct:nvmf_tcp_qpair
82 #define TQP(qp) ((struct nvmf_tcp_qpair *)(qp))
93 tcp_alloc_command_buffer(struct nvmf_tcp_qpair *qp, void *data, argument
100 cb->qp = qp;
109 LIST_INSERT_HEAD(&qp->rx_buffers, cb, link);
111 LIST_INSERT_HEAD(&qp->tx_buffers, cb, link);
116 tcp_find_command_buffer(struct nvmf_tcp_qpair *qp, uint16_ argument
131 tcp_purge_command_buffer(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, bool receive) argument
149 nvmf_tcp_write_pdu(struct nvmf_tcp_qpair *qp, const void *pdu, size_t len) argument
166 nvmf_tcp_write_pdu_iov(struct nvmf_tcp_qpair *qp, struct iovec *iov, u_int iovcnt, size_t len) argument
192 nvmf_tcp_report_error(struct nvmf_association *na, struct nvmf_tcp_qpair *qp, uint16_t fes, uint32_t fei, const void *rx_pdu, size_t pdu_len, u_int hlen) argument
223 nvmf_tcp_validate_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu, size_t pdu_len) argument
307 nvmf_tcp_read_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
367 nvmf_tcp_save_command_capsule(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
388 nvmf_tcp_save_response_capsule(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
423 nvmf_tcp_construct_pdu(struct nvmf_tcp_qpair *qp, void *hdr, size_t hlen, void *data, uint32_t data_len) argument
494 nvmf_tcp_handle_h2c_data(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
567 nvmf_tcp_handle_c2h_data(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
657 tcp_send_h2c_pdu(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, uint32_t data_offset, void *buf, size_t len, bool last_pdu) argument
676 tcp_send_h2c_pdus(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, uint32_t data_offset, void *buf, size_t len, bool last_pdu) argument
700 nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu) argument
755 nvmf_tcp_receive_pdu(struct nvmf_tcp_qpair *qp) argument
785 nvmf_tcp_validate_ic_pdu(struct nvmf_association *na, struct nvmf_tcp_qpair *qp, const struct nvme_tcp_common_pdu_hdr *ch, size_t pdu_len) argument
895 nvmf_tcp_read_ic_req(struct nvmf_association *na, struct nvmf_tcp_qpair *qp, struct nvme_tcp_ic_req *pdu) argument
911 nvmf_tcp_read_ic_resp(struct nvmf_association *na, struct nvmf_tcp_qpair *qp, struct nvme_tcp_ic_resp *pdu) argument
953 tcp_connect(struct nvmf_tcp_qpair *qp, struct nvmf_association *na, bool admin) argument
1035 tcp_accept(struct nvmf_tcp_qpair *qp, struct nvmf_association *na) argument
1079 struct nvmf_tcp_qpair *qp; local
1107 struct nvmf_tcp_qpair *qp = TQP(nq); local
1128 struct nvmf_tcp_qpair *qp = TQP(nq); local
1165 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
1214 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
1236 struct nvmf_tcp_qpair *qp = TQP(nq); local
1295 tcp_send_r2t(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, uint32_t data_offset, uint32_t data_len) argument
1314 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
1378 tcp_send_c2h_pdu(struct nvmf_tcp_qpair *qp, uint16_t cid, uint32_t data_offset, const void *buf, size_t len, bool last_pdu, bool success) argument
1403 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair); local
[all...]
H A Dnvmf_controller.c43 nvmf_controller_receive_capsule(struct nvmf_qpair *qp, argument
51 error = nvmf_receive_capsule(qp, &nc);
69 struct nvmf_qpair *qp = nc->nc_qpair; local
72 if (qp->nq_flow_control) {
73 qp->nq_sqhd = (qp->nq_sqhd + 1) % qp->nq_qsize;
74 nc->nc_cqe.sqhd = htole16(qp->nq_sqhd);
143 struct nvmf_qpair *qp; local
150 qp
339 struct nvmf_qpair *qp = cc->nc_qpair; local
360 nvmf_controller_cap(struct nvmf_qpair *qp) argument
369 nvmf_validate_cc(struct nvmf_qpair *qp, uint64_t cap, uint32_t old_cc, uint32_t new_cc) argument
379 nvmf_init_discovery_controller_data(struct nvmf_qpair *qp, struct nvme_controller_data *cdata) argument
419 nvmf_init_io_controller_data(struct nvmf_qpair *qp, const char *serial, const char *subnqn, int nn, uint32_t ioccsz, struct nvme_controller_data *cdata) argument
458 nvmf_handoff_controller_qpair(struct nvmf_qpair *qp, struct nvmf_handoff_controller_qpair *h) argument
[all...]
/freebsd-current/sys/ofed/drivers/infiniband/core/
H A Dcore_priv.h154 struct ib_qp *qp; local
159 qp = dev->create_qp(pd, attr, udata);
160 if (IS_ERR(qp))
161 return qp;
163 qp->device = dev;
164 qp->pd = pd;
165 qp->uobject = uobj;
166 qp->real_qp = qp;
168 qp
[all...]
H A Dib_verbs.c787 struct ib_qp *qp = context; local
790 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
791 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
792 if (event->element.qp->event_handler)
793 event->element.qp->event_handler(event, event->element.qp->qp_context);
794 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
797 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) argument
800 list_add(&qp
808 struct ib_qp *qp; local
833 struct ib_qp *qp, *real_qp; local
852 ib_create_xrc_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr) argument
879 struct ib_qp *qp; local
1321 is_qp_type_connected(const struct ib_qp *qp) argument
1332 _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) argument
1411 ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) argument
1427 ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) argument
1438 ib_close_qp(struct ib_qp *qp) argument
1458 __ib_destroy_shared_qp(struct ib_qp *qp) argument
1486 ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata) argument
1699 is_valid_mcast_lid(struct ib_qp *qp, u16 lid) argument
1737 ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) argument
1755 ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) argument
1795 struct ib_qp *qp; local
2145 __ib_drain_sq(struct ib_qp *qp) argument
2185 __ib_drain_rq(struct ib_qp *qp) argument
2238 ib_drain_sq(struct ib_qp *qp) argument
2267 ib_drain_rq(struct ib_qp *qp) argument
2292 ib_drain_qp(struct ib_qp *qp) argument
[all...]
/freebsd-current/contrib/ofed/libmlx4/
H A Dqp.c59 static void *get_recv_wqe(struct mlx4_qp *qp, int n) argument
61 return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift);
64 static void *get_send_wqe(struct mlx4_qp *qp, int n) argument
66 return qp->buf.buf + qp->sq.offset + (n << qp->sq.wqe_shift);
74 static void stamp_send_wqe(struct mlx4_qp *qp, int n) argument
76 uint32_t *wqe = get_send_wqe(qp,
84 mlx4_init_qp_indices(struct mlx4_qp *qp) argument
92 mlx4_qp_init_sq_ownership(struct mlx4_qp *qp) argument
217 struct mlx4_qp *qp = to_mqp(ibqp); local
515 struct mlx4_qp *qp = to_mqp(ibqp); local
599 mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type, struct mlx4_qp *qp) argument
653 mlx4_alloc_qp_buf(struct ibv_context *context, struct ibv_qp_cap *cap, enum ibv_qp_type type, struct mlx4_qp *qp) argument
704 mlx4_set_sq_sizes(struct mlx4_qp *qp, struct ibv_qp_cap *cap, enum ibv_qp_type type) argument
752 mlx4_store_qp(struct mlx4_context *ctx, uint32_t qpn, struct mlx4_qp *qp) argument
[all...]
H A Dverbs.c351 int mlx4_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, argument
369 ret = mlx4_post_send(qp, &wr, &bad_wr);
758 struct mlx4_qp *qp)
774 ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp,
775 sizeof(qp->verbs_qp), attr,
798 struct mlx4_qp *qp; local
821 qp = calloc(1, sizeof *qp);
822 if (!qp)
826 attr->cap.max_send_wr = qp
755 mlx4_cmd_create_qp_ex(struct ibv_context *context, struct ibv_qp_init_attr_ex *attr, struct mlx4_create_qp *cmd, struct mlx4_qp *qp) argument
935 struct ibv_qp *qp; local
950 struct mlx4_qp *qp; local
974 struct mlx4_qp *qp = to_mqp(ibqp); local
990 mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask) argument
1055 mlx4_lock_cqs(struct ibv_qp *qp) argument
1076 mlx4_unlock_cqs(struct ibv_qp *qp) argument
1100 struct mlx4_qp *qp = to_mqp(ibqp); local
[all...]
/freebsd-current/sys/crypto/ccp/
H A Dccp_hardware.c123 ccp_queue_write_tail(struct ccp_queue *qp) argument
125 ccp_write_queue_4(qp->cq_softc, qp->cq_qindex, CMD_Q_TAIL_LO_BASE,
126 ((uint32_t)qp->desc_ring_bus_addr) + (Q_DESC_SIZE * qp->cq_tail));
134 ccp_queue_lsb_entry(struct ccp_queue *qp, unsigned lsb_entry) argument
136 return ((qp->private_lsb * LSB_REGION_LENGTH + lsb_entry));
144 ccp_queue_lsb_address(struct ccp_queue *qp, unsigned lsb_entry) argument
146 return (ccp_queue_lsb_entry(qp, lsb_entry) * LSB_ENTRY_SIZE);
213 struct ccp_queue *qp; local
319 struct ccp_queue *qp; local
455 ccp_intr_handle_error(struct ccp_queue *qp, const struct ccp_desc *desc) argument
538 ccp_intr_run_completions(struct ccp_queue *qp, uint32_t ints) argument
845 ccp_passthrough(struct ccp_queue *qp, bus_addr_t dst, enum ccp_memtype dst_type, bus_addr_t src, enum ccp_memtype src_type, bus_size_t len, enum ccp_passthru_byteswap swapmode, enum ccp_passthru_bitwise bitmode, bool interrupt, const struct ccp_completion_ctx *cctx) argument
885 ccp_passthrough_sgl(struct ccp_queue *qp, bus_addr_t lsb_addr, bool tolsb, struct sglist *sgl, bus_size_t len, bool interrupt, const struct ccp_completion_ctx *cctx) argument
1038 ccp_sha_single_desc(struct ccp_queue *qp, const struct SHA_Defn *defn, vm_paddr_t addr, size_t len, bool start, bool end, uint64_t msgbits) argument
1072 ccp_sha(struct ccp_queue *qp, enum sha_version version, struct sglist *sgl_src, struct sglist *sgl_dst, const struct ccp_completion_ctx *cctx) argument
1202 ccp_do_hmac_done(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, int error) argument
1247 ccp_hmac_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) argument
1257 ccp_do_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, const struct ccp_completion_ctx *cctx) argument
1305 ccp_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) argument
1331 ccp_blkcipher_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) argument
1379 ccp_do_pst_to_lsb(struct ccp_queue *qp, uint32_t lsbaddr, const void *src, size_t len) argument
1395 ccp_do_xts(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, enum ccp_cipher_dir dir, const struct ccp_completion_ctx *cctx) argument
1465 ccp_do_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, const struct ccp_completion_ctx *cctx) argument
1669 ccp_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) argument
1681 ccp_authenc_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) argument
1694 ccp_authenc(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) argument
1720 ccp_do_ghash_aad(struct ccp_queue *qp, struct ccp_session *s) argument
1758 ccp_do_gctr(struct ccp_queue *qp, struct ccp_session *s, enum ccp_cipher_dir dir, struct sglist_seg *seg, bool som, bool eom) argument
1795 ccp_do_ghash_final(struct ccp_queue *qp, struct ccp_session *s) argument
1828 ccp_gcm_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) argument
1861 ccp_gcm(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) argument
2051 db_ccp_show_queue_hw(struct ccp_queue *qp) argument
[all...]
H A Dccp.c140 struct ccp_queue *qp; local
144 qp = &sc->queues[i];
146 qp->cq_softc = sc;
147 qp->cq_qindex = i;
148 mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
150 qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
152 qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
153 qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
160 struct ccp_queue *qp; local
164 qp
531 struct ccp_queue *qp; local
647 ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags) argument
668 ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) argument
681 ccp_queue_release(struct ccp_queue *qp) argument
693 ccp_queue_abort(struct ccp_queue *qp) argument
735 db_show_ccp_qp(struct ccp_queue *qp) argument
[all...]
/freebsd-current/sys/dev/qlnx/qlnxr/
H A Dqlnxr_cm.c41 struct qlnxr_qp *qp,
49 dev->gsi_qp = qp;
67 struct qlnxr_qp *qp = dev->gsi_qp; local
76 spin_lock_irqsave(&qp->q_lock, flags);
78 qlnxr_inc_sw_gsi_cons(&qp->sq);
80 spin_unlock_irqrestore(&qp->q_lock, flags);
96 // struct qlnxr_qp *qp = dev->gsi_qp;
97 struct qlnxr_qp *qp = NULL; local
113 qp = dev->gsi_qp;
123 spin_lock_irqsave(&qp
40 qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev, struct qlnxr_qp *qp, struct ib_qp_init_attr *attrs) argument
313 qlnxr_ll2_start(struct qlnxr_dev *dev, struct ib_qp_init_attr *attrs, struct qlnxr_qp *qp) argument
388 qlnxr_create_gsi_qp(struct qlnxr_dev *dev, struct ib_qp_init_attr *attrs, struct qlnxr_qp *qp) argument
481 qlnxr_gsi_build_header(struct qlnxr_dev *dev, struct qlnxr_qp *qp, const struct ib_send_wr *swr, struct ib_ud_header *udh, int *roce_mode) argument
604 qlnxr_gsi_build_packet(struct qlnxr_dev *dev, struct qlnxr_qp *qp, const struct ib_send_wr *swr, struct ecore_roce_ll2_packet **p_packet) argument
674 struct qlnxr_qp *qp = get_qlnxr_qp(ibqp); local
748 struct qlnxr_qp *qp = get_qlnxr_qp(ibqp); local
808 struct qlnxr_qp *qp = dev->gsi_qp; local
[all...]
/freebsd-current/contrib/ofed/libmlx5/
H A Dqp.c61 static void *get_recv_wqe(struct mlx5_qp *qp, int n) argument
63 return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift);
93 int mlx5_copy_to_recv_wqe(struct mlx5_qp *qp, int idx, void *buf, int size) argument
96 int max = 1 << (qp->rq.wqe_shift - 4);
98 scat = get_recv_wqe(qp, idx);
99 if (unlikely(qp->wq_sig))
105 int mlx5_copy_to_send_wqe(struct mlx5_qp *qp, int idx, void *buf, int size) argument
112 idx &= (qp
153 mlx5_get_send_wqe(struct mlx5_qp *qp, int n) argument
164 mlx5_init_qp_indices(struct mlx5_qp *qp) argument
238 mlx5_bf_copy(unsigned long long *dst, unsigned long long *src, unsigned bytecnt, struct mlx5_qp *qp) argument
269 set_data_inl_seg(struct mlx5_qp *qp, struct ibv_send_wr *wr, void *wqe, int *sz, struct mlx5_sg_copy_ptr *sg_copy_ptr) argument
319 dump_wqe(FILE *fp, int idx, int size_16, struct mlx5_qp *qp) argument
340 mlx5_get_atomic_laddr(struct mlx5_qp *qp, uint16_t idx, int *byte_count) argument
421 set_umr_data_seg(struct mlx5_qp *qp, enum ibv_mw_type type, int32_t rkey, struct ibv_mw_bind_info *bind_info, uint32_t qpn, void **seg, int *size) argument
441 set_umr_mkey_seg(struct mlx5_qp *qp, enum ibv_mw_type type, int32_t rkey, struct ibv_mw_bind_info *bind_info, uint32_t qpn, void **seg, int *size) argument
479 set_umr_control_seg(struct mlx5_qp *qp, enum ibv_mw_type type, int32_t rkey, struct ibv_mw_bind_info *bind_info, uint32_t qpn, void **seg, int *size) argument
516 set_bind_wr(struct mlx5_qp *qp, enum ibv_mw_type type, int32_t rkey, struct ibv_mw_bind_info *bind_info, uint32_t qpn, void **seg, int *size) argument
562 set_tso_eth_seg(void **seg, struct ibv_send_wr *wr, void *qend, struct mlx5_qp *qp, int *size) argument
616 struct mlx5_qp *qp = to_mqp(ibqp); local
990 mlx5_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, struct ibv_mw_bind *mw_bind) argument
1037 set_sig_seg(struct mlx5_qp *qp, struct mlx5_rwqe_sig *sig, int size, uint16_t idx) argument
1137 struct mlx5_qp *qp = to_mqp(ibqp); local
1238 mlx5_store_qp(struct mlx5_context *ctx, uint32_t qpn, struct mlx5_qp *qp) argument
[all...]
/freebsd-current/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_qp.c100 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) argument
102 return mlx5_buf_offset(&qp->buf, offset);
105 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) argument
107 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
110 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) argument
112 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
118 * @qp
132 mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, void *buffer, u32 length, struct mlx5_ib_qp_base *base) argument
187 mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) argument
234 set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) argument
374 calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) argument
421 set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd, struct mlx5_ib_qp_base *base, struct ib_qp_init_attr *attr) argument
760 create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct ib_qp_init_attr *attr, u32 **in, struct mlx5_ib_create_qp_resp *resp, int *inlen, struct mlx5_ib_qp_base *base) argument
897 destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base, struct ib_udata *udata) argument
919 create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx5_ib_qp *qp, u32 **in, int *inlen, struct mlx5_ib_qp_base *base) argument
1031 destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) argument
1042 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) argument
1271 create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u32 *in, struct ib_pd *pd) argument
1326 destroy_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) argument
1344 raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, struct mlx5_ib_raw_packet_qp *raw_packet_qp) argument
1356 destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) argument
1362 create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) argument
1586 create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx5_ib_qp *qp) argument
1946 get_pd(struct mlx5_ib_qp *qp) argument
1990 destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct ib_udata *udata) argument
2099 struct mlx5_ib_qp *qp; local
2190 mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) argument
2205 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask, __be32 *hw_access_flags_be) argument
2324 mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, u32 path_flags, const struct ib_qp_attr *attr, bool alt) argument
2615 modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct mlx5_modify_raw_qp_param *raw_qp_param, u8 tx_affinity) argument
2719 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
2961 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
3059 set_eth_seg(struct mlx5_wqe_eth_seg *eseg, const struct ib_send_wr *wr, void *qend, struct mlx5_ib_qp *qp, int *size) argument
3408 set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void *wqe, int *sz) argument
3554 set_sig_data_segment(const struct ib_sig_handover_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) argument
3687 set_sig_umr_wr(const struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, void **seg, int *size) argument
3763 set_reg_wr(struct mlx5_ib_qp *qp, const struct ib_reg_wr *wr, void **seg, int *size) argument
3795 set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) argument
3809 dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) argument
3847 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned *idx, int *size, int nreq, int send_flags) argument
3872 finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, u8 size, unsigned idx, u64 wr_id, int nreq, u8 fence, u8 next_fence, u32 mlx5_opcode) argument
3902 struct mlx5_ib_qp *qp; local
4214 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
4415 sqrq_state_to_qp_state(u8 sq_state, u8 rq_state, struct mlx5_ib_qp *qp, u8 *qp_state) argument
4460 query_raw_packet_qp_state(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u8 *raw_packet_qp_state) argument
4487 query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct ib_qp_attr *qp_attr) argument
4555 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
[all...]

Completed in 464 milliseconds

1234567891011