Searched refs:sq (Results 151 - 175 of 239) sorted by relevance

12345678910

/linux-master/drivers/infiniband/hw/mlx5/
H A Dcq.c347 idx = tail & (qp->sq.wqe_cnt - 1);
351 tail = qp->sq.w_list[idx].next;
353 tail = qp->sq.w_list[idx].next;
354 qp->sq.last_poll = tail;
402 wq = (is_send) ? &qp->sq : &qp->rq;
502 wq = &(*cur_qp)->sq;
534 wq = &(*cur_qp)->sq;
H A Dmlx5_ib.h454 struct mlx5_ib_wq *sq; member in struct:mlx5_ib_sq
463 struct mlx5_ib_sq sq; member in struct:mlx5_ib_raw_packet_qp
510 struct mlx5_ib_wq sq; member in struct:mlx5_ib_qp
/linux-master/drivers/infiniband/hw/irdma/
H A Duk.c90 * irdma_clr_wqes - clear next 128 sq entries
96 struct irdma_qp_quanta *sq; local
101 sq = qp->sq_base + wqe_idx;
103 memset(sq, qp->swqe_polarity ? 0 : 0xFF,
104 128 * sizeof(*sq));
106 memset(sq, qp->swqe_polarity ? 0xFF : 0,
107 128 * sizeof(*sq));
229 * @info: post sq information
230 * @post_sq: flag to post sq
323 * @info: post sq informatio
[all...]
H A Duser.h364 struct irdma_qp_quanta *sq; member in struct:irdma_qp_uk_init_info
H A Dtype.h231 struct irdma_cqp_quanta *sq; member in struct:irdma_cqp_init_info
954 bool sq:1; member in struct:irdma_aeqe_info
1139 bool sq:1; member in struct:irdma_qp_flush_info
1484 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c202 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
629 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
632 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
683 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
685 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
703 spin_lock_irqsave(&qp->sq.lock, flags);
715 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
721 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
723 if (unlikely(wr->num_sge > qp->sq
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Dvcn_v4_0_3.c136 fw_shared->sq.is_enabled = true;
180 fw_shared->sq.is_enabled = cpu_to_le32(false);
830 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
840 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
843 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1204 fw_shared->sq.queue_mode &=
1260 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
H A Dvcn_v4_0_5.c149 fw_shared->sq.is_enabled = 1;
195 fw_shared->sq.is_enabled = 0;
927 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
938 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1111 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1122 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1171 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
H A Dvcn_v4_0.c109 fw_shared->sq.is_enabled = 1;
227 fw_shared->sq.is_enabled = 0;
1016 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1027 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1199 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1210 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1509 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
/linux-master/drivers/net/wireless/ath/ath12k/
H A Dqmi.c3100 struct sockaddr_qrtr *sq,
3136 struct sockaddr_qrtr *sq,
3148 struct sockaddr_qrtr *sq,
3188 struct sockaddr_qrtr *sq = &qmi->sq; local
3191 sq->sq_family = AF_QIPCRTR;
3192 sq->sq_node = service->node;
3193 sq->sq_port = service->port;
3195 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
3196 sizeof(*sq),
3099 ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *data) argument
3135 ath12k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) argument
3147 ath12k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) argument
[all...]
/linux-master/drivers/nvme/target/
H A Dzns.c75 u8 zasl = req->sq->ctrl->subsys->zasl;
76 struct nvmet_ctrl *ctrl = req->sq->ctrl;
/linux-master/drivers/staging/ks7010/
H A Dks_wlan.h205 u8 sq; member in struct:local_ap
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_verbs.h217 struct rxe_sq sq; member in struct:rxe_qp
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_provider.c496 qp->sq.db_index = ucmd.sq_db_index;
543 init_attr->cap.max_send_wr = qp->sq.max;
545 init_attr->cap.max_send_sge = qp->sq.max_gs;
564 to_mqp(qp)->sq.db_index);
/linux-master/drivers/iommu/intel/
H A Dirq_remapping.c281 unsigned int sq, unsigned int sid)
286 irte->sq = sq;
1269 irte->sid, irte->sq, irte->svt);
280 set_irte_sid(struct irte *irte, unsigned int svt, unsigned int sq, unsigned int sid) argument
/linux-master/drivers/net/wireless/ath/ath11k/
H A Dqmi.h130 struct sockaddr_qrtr sq; member in struct:ath11k_qmi
H A Dqmi.c3011 struct sockaddr_qrtr *sq,
3057 struct sockaddr_qrtr *sq,
3069 struct sockaddr_qrtr *sq,
3087 struct sockaddr_qrtr *sq,
3101 struct sockaddr_qrtr *sq,
3161 struct sockaddr_qrtr *sq = &qmi->sq; local
3164 sq->sq_family = AF_QIPCRTR;
3165 sq->sq_node = service->node;
3166 sq
3010 ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *data) argument
3056 ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) argument
3068 ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) argument
3086 ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) argument
3100 ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) argument
[all...]
/linux-master/include/linux/
H A Dio_uring_types.h120 * The kernel controls head of the sq ring and the tail of the cq ring,
121 * and the application controls tail of the sq ring and the head of the
124 struct io_uring sq, cq; member in struct:io_rings
152 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
364 struct io_sq_data *sq_data; /* if using sq thread polling */
/linux-master/net/qrtr/
H A Daf_qrtr.c1150 struct sockaddr_qrtr *sq; local
1177 sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
1178 *sq = ipc->us;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
H A Dipoib.c131 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
707 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; local
711 mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
/linux-master/drivers/scsi/qedi/
H A Dqedi_fw.c1042 task_params.sqe = &ep->sq[sq_idx];
1116 task_params.sqe = &ep->sq[sq_idx];
1490 task_params.sqe = &ep->sq[sq_idx];
1614 task_params.sqe = &ep->sq[sq_idx];
1731 task_params.sqe = &ep->sq[sq_idx];
2093 task_params.sqe = &ep->sq[sq_idx];
2148 task_params.sqe = &ep->sq[sq_idx];
/linux-master/drivers/target/
H A Dtarget_core_transport.c1882 struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); local
1888 cmd_list = llist_del_all(&sq->cmd_list);
1915 struct se_cmd_queue *sq; local
1917 sq = &se_dev->queues[cpu].sq;
1918 llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
1919 queue_work_on(cpu, target_submission_wq, &sq->work);
/linux-master/drivers/crypto/hisilicon/sec2/
H A Dsec_crypto.c1052 struct skcipher_request *sq = req->c_req.sk_req; local
1054 return sec_cipher_map(ctx, req, sq->src, sq->dst);
1059 struct skcipher_request *sq = req->c_req.sk_req; local
1061 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
/linux-master/kernel/rcu/
H A Dtree_nocb.h199 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) argument
201 swake_up_all(sq);
1756 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) argument
/linux-master/drivers/net/ethernet/cavium/thunder/
H A Dnic.h589 struct sq_cfg_msg sq; member in union:nic_mbx

Completed in 498 milliseconds

12345678910