Searched refs:cq (Results 176 - 200 of 350) sorted by relevance

1234567891011>>

/linux-master/drivers/isdn/mISDN/
H A Dsocket.c281 struct mISDN_ctrl_req cq; local
292 if (copy_from_user(&cq, p, sizeof(cq))) {
299 if (bchan->nr == cq.channel) {
301 CONTROL_CHANNEL, &cq);
307 CONTROL_CHANNEL, &cq);
310 if (copy_to_user(p, &cq, sizeof(cq)))
/linux-master/net/sunrpc/xprtrdma/
H A Dsvc_rdma_rw.c17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
250 * @cq: controlling Completion Queue
255 static void svc_rdma_reply_done(struct ib_cq *cq, struct ib_wc *wc) argument
260 struct svcxprt_rdma *rdma = cq->cq_context;
278 * @cq: controlling Completion Queue
283 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) argument
285 struct svcxprt_rdma *rdma = cq->cq_context;
313 * @cq: controlling Completion Queue
317 static void svc_rdma_wc_read_done(struct ib_cq *cq, struc argument
[all...]
/linux-master/drivers/infiniband/hw/mlx4/
H A Dmad.c1291 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) argument
1294 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1302 static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg) argument
1305 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1743 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1745 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1813 qp_init_attr.init_attr.send_cq = ctx->cq;
1814 qp_init_attr.init_attr.recv_cq = ctx->cq;
1907 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1909 while (mlx4_ib_poll_cq(ctx->cq,
[all...]
/linux-master/include/net/mana/
H A Dgdma.h91 } cq; member in union:gdma_doorbell_entry
318 } cq; member in union:gdma_queue::__anon3165
343 } cq; member in union:gdma_queue_spec::__anon3168
431 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
433 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw.h706 #define siw_dbg_cq(cq, fmt, ...) \
707 ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \
722 void siw_cq_flush(struct siw_cq *cq);
725 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc);
/linux-master/net/xdp/
H A Dxsk_buff_pool.c98 pool->cq = xs->cq_tmp;
249 if (!pool->fq || !pool->cq)
284 if (pool->cq) {
285 xskq_destroy(pool->cq);
286 pool->cq = NULL;
/linux-master/drivers/isdn/hardware/mISDN/
H A Davmfritz.c830 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) argument
832 return mISDN_ctrl_bchannel(bch, cq);
867 channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq) argument
871 switch (cq->op) {
873 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
876 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
877 if (cq->channel < 0 || cq->channel > 3) {
881 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel);
884 ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq
[all...]
H A Dnetjet.c783 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq) argument
785 return mISDN_ctrl_bchannel(&bc->bch, cq);
821 channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq) argument
825 switch (cq->op) {
827 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
830 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
831 if (cq->channel < 0 || cq->channel > 3) {
835 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
838 ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq
[all...]
H A Dhfcpci.c1520 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) argument
1522 return mISDN_ctrl_bchannel(bch, cq);
1780 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) argument
1785 switch (cq->op) {
1787 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1792 if (cq->channel < 0 || cq->channel > 2) {
1796 if (cq->channel & 1) {
1808 if (cq->channel & 2) {
1820 if (cq
[all...]
/linux-master/kernel/locking/
H A Dlockdep.c1464 static inline void __cq_init(struct circular_queue *cq) argument
1466 cq->front = cq->rear = 0;
1470 static inline int __cq_empty(struct circular_queue *cq) argument
1472 return (cq->front == cq->rear);
1475 static inline int __cq_full(struct circular_queue *cq) argument
1477 return ((cq->rear + 1) & CQ_MASK) == cq->front;
1480 static inline int __cq_enqueue(struct circular_queue *cq, struc argument
1494 __cq_dequeue(struct circular_queue *cq) argument
1507 __cq_get_elem_count(struct circular_queue *cq) argument
1724 struct circular_queue *cq = &lock_cq; local
[all...]
/linux-master/drivers/net/ethernet/pensando/ionic/
H A Dionic_lif.c580 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
589 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
606 /* Base the NotifyQ cq.base off of the ALIGNed q.base */
607 new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
608 new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
609 new->cq.bound_q = &new->q;
650 /* cq DMA descriptors */
655 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
659 new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
660 new->cq
784 struct ionic_cq *cq = &qcq->cq; local
852 struct ionic_cq *cq = &qcq->cq; local
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dqp.c985 struct t4_cq *cq; local
989 cq = &schp->cq;
1004 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1005 cq->sw_queue[cq->sw_pidx] = cqe;
1006 t4_swcq_produce(cq);
1009 if (t4_clear_cq_armed(&schp->cq)) {
1041 struct t4_cq *cq; local
1044 cq
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dresource_tracker.c194 struct res_cq *cq; member in struct:res_srq
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
464 dev->quotas.cq =
1672 enum res_cq_states state, struct res_cq **cq)
1702 if (cq)
1703 *cq = r;
3444 struct res_cq *cq = NULL; local
3447 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3460 cq->mtt = mtt;
3480 struct res_cq *cq local
1671 cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, enum res_cq_states state, struct res_cq **cq) argument
3504 struct res_cq *cq; local
3521 handle_resize(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd, struct res_cq *cq) argument
3576 struct res_cq *cq; local
4804 struct res_cq *cq; local
[all...]
/linux-master/drivers/scsi/be2iscsi/
H A Dbe_main.c695 mcc = &phba->ctrl.mcc_obj.cq;
770 mcc = &phba->ctrl.mcc_obj.cq;
1816 mcc_cq = &phba->ctrl.mcc_obj.cq;
1868 struct be_queue_info *cq; local
1879 cq = pbe_eq->cq;
1880 sol = queue_tail_node(cq);
1925 /* replenish cq */
1927 hwi_ring_cq_db(phba, cq->id, 32, 0);
2037 queue_tail_inc(cq);
3080 struct be_queue_info *cq, *eq; local
3147 struct be_queue_info *dq, *cq; local
3199 struct be_queue_info *dataq, *cq; local
3523 struct be_queue_info *q, *cq; local
[all...]
/linux-master/drivers/infiniband/ulp/iser/
H A Diser_verbs.c251 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
252 if (IS_ERR(ib_conn->cq)) {
253 ret = PTR_ERR(ib_conn->cq);
262 init_attr.send_cq = ib_conn->cq;
263 init_attr.recv_cq = ib_conn->cq;
285 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
382 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
/linux-master/include/rdma/
H A Dib_verbs.h757 struct ib_cq *cq; member in union:ib_event::__anon163
1077 struct ib_cq *cq; member in struct:ib_srq_init_attr::__anon169
1377 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1582 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1634 struct ib_cq *cq; member in struct:ib_srq::__anon177
1685 struct ib_cq *cq; member in struct:ib_wq
1705 struct ib_cq *cq; member in struct:ib_wq_init_attr
2339 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2340 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2341 int (*req_notify_cq)(struct ib_cq *cq, enu
3941 ib_destroy_cq(struct ib_cq *cq) argument
3960 ib_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) argument
3993 ib_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) argument
[all...]
/linux-master/drivers/net/ethernet/emulex/benet/
H A Dbe_main.c1446 entry = txo->cq.dma_mem.va;
1448 i, txo->cq.head, txo->cq.tail,
1449 atomic_read(&txo->cq.used));
2534 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2573 queue_tail_inc(&rxo->cq);
2712 struct be_queue_info *tx_cq = &txo->cq;
2830 struct be_queue_info *rx_cq = &rxo->cq;
2893 be_cq_notify(adapter, txo->cq.id, false, cmpl);
3001 q = &adapter->mcc_obj.cq;
3010 struct be_queue_info *q, *cq; local
3061 struct be_queue_info *cq; local
3120 struct be_queue_info *eq, *cq; local
[all...]
/linux-master/drivers/infiniband/hw/ocrdma/
H A Docrdma.h158 struct ocrdma_queue_info cq; member in struct:ocrdma_mq
326 * to cq polling
328 /* syncronizes cq completion handler invoked from multiple context */
496 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) argument
500 return (cqe_valid == cq->phase);
H A Docrdma_hw.h125 void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq);
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_common.h29 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
48 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
70 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
/linux-master/drivers/infiniband/ulp/isert/
H A Dib_isert.c58 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
59 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
60 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
61 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
112 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE);
113 if (IS_ERR(isert_conn->cq)) {
114 isert_err("Unable to allocate cq\n");
115 ret = PTR_ERR(isert_conn->cq);
123 attr.send_cq = isert_conn->cq;
124 attr.recv_cq = isert_conn->cq;
1319 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) argument
1378 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) argument
1576 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) argument
1618 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) argument
1694 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) argument
1710 isert_send_done(struct ib_cq *cq, struct ib_wc *wc) argument
[all...]
/linux-master/drivers/nvme/target/
H A Drdma.c91 struct ib_cq *cq; member in struct:nvmet_rdma_queue
166 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
167 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
168 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
169 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
701 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) argument
754 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) argument
787 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) argument
1007 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) argument
1270 queue->cq
[all...]
/linux-master/drivers/scsi/hisi_sas/
H A Dhisi_sas_v3_hw.c2435 static int complete_v3_hw(struct hisi_sas_cq *cq) argument
2438 struct hisi_hba *hisi_hba = cq->hisi_hba;
2440 int queue = cq->id;
2443 rd_point = cq->rd_point;
2494 cq->rd_point = rd_point;
2503 struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; local
2506 spin_lock(&cq->poll_lock);
2507 completed = complete_v3_hw(cq);
2508 spin_unlock(&cq
2515 struct hisi_sas_cq *cq = p; local
2524 struct hisi_sas_cq *cq = p; local
2601 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; local
3661 struct hisi_sas_cq *cq = debugfs_cq->cq; local
4583 struct hisi_sas_debugfs_cq *cq = local
4931 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; local
[all...]
/linux-master/drivers/infiniband/core/
H A Dmad_priv.h202 struct ib_cq *cq; member in struct:ib_mad_port_private
/linux-master/drivers/infiniband/hw/efa/
H A Defa_com.h66 struct efa_com_admin_cq cq; member in struct:efa_com_admin_queue

Completed in 306 milliseconds

1234567891011>>