Searched refs:sq (Results 26 - 50 of 239) sorted by relevance

12345678910

/linux-master/drivers/infiniband/hw/mlx5/
H A Dmem.c116 spin_lock_irqsave(&qp->sq.lock, flags);
118 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
119 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
124 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
128 qp->sq.wrid[idx] = wr_id;
129 qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
130 qp->sq.wqe_head[idx] = qp->sq.head + 1;
131 qp->sq
[all...]
H A Dwr.c88 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
91 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size,
285 handle_post_send_edge(&qp->sq, wqe,
514 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
523 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
591 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
597 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
673 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
678 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
681 mlx5r_memcpy_send_wqe(&qp->sq, cur_edg
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Daso.c159 void *sqc_data, struct mlx5_aso *sq)
162 struct mlx5_wq_cyc *wq = &sq->wq;
166 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
170 err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
179 void *sqc_data, struct mlx5_aso *sq)
186 sizeof(u64) * sq->wq_ctrl.buf.npages;
195 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
207 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
209 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
211 mlx5_fill_page_frag_array(&sq
158 mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node, void *sqc_data, struct mlx5_aso *sq) argument
178 create_aso_sq(struct mlx5_core_dev *mdev, int pdn, void *sqc_data, struct mlx5_aso *sq) argument
242 mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn, void *sqc_data, struct mlx5_aso *sq) argument
258 mlx5_aso_free_sq(struct mlx5_aso *sq) argument
263 mlx5_aso_destroy_sq(struct mlx5_aso *sq) argument
269 mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node, u32 pdn, struct mlx5_aso *sq) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dqos.c79 struct mlx5e_txqsq *sq; local
117 sq = kzalloc(sizeof(*sq), GFP_KERNEL);
119 if (!sq)
128 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
134 err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &param_sq, sq, 0, hw_id,
139 rcu_assign_pointer(qos_sqs[qid], sq);
144 mlx5e_close_cq(&sq->cq);
146 kfree(sq);
160 struct mlx5e_txqsq *sq; local
188 struct mlx5e_txqsq *sq; local
211 struct mlx5e_txqsq *sq; local
242 struct mlx5e_txqsq *sq; local
336 struct mlx5e_txqsq *sq; local
[all...]
H A Dptp.c187 struct mlx5e_txqsq *sq = &ptpsq->txqsq; local
208 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
218 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
318 struct mlx5e_txqsq *sq, int tc,
323 struct mlx5_wq_cyc *wq = &sq->wq;
327 sq->pdev = c->pdev;
328 sq->clock = &mdev->clock;
329 sq->mkey_be = c->mkey_be;
330 sq
315 mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_txqsq *sq, int tc, struct mlx5e_ptpsq *ptpsq) argument
496 struct mlx5e_txqsq *sq = &ptpsq->txqsq; local
[all...]
H A Dhealth.h19 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
20 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dktls_tx.c525 static void tx_fill_wi(struct mlx5e_txqsq *sq, argument
529 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
549 post_static_params(struct mlx5e_txqsq *sq, argument
557 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
558 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
559 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
563 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
564 sq->pc += num_wqebbs;
568 post_progress_params(struct mlx5e_txqsq *sq, argument
584 tx_post_fence_nop(struct mlx5e_txqsq *sq) argument
595 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct mlx5e_ktls_offload_context_tx *priv_tx, bool skip_static_post, bool fence_first_post) argument
677 tx_post_resync_params(struct mlx5e_txqsq *sq, struct mlx5e_ktls_offload_context_tx *priv_tx, u64 rcd_sn) argument
715 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn) argument
755 mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, u32 *dma_fifo_cc) argument
772 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, struct mlx5e_txqsq *sq, int datalen, u32 seq) argument
829 mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) argument
[all...]
H A Den_accel.h119 struct mlx5e_txqsq *sq,
129 if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
135 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
153 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, argument
157 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
187 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, argument
197 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
118 mlx5e_accel_tx_begin(struct net_device *dev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_state *state) argument
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
93 struct hinic_sq *sq, u16 global_qid)
100 wq = sq->wq;
214 * alloc_sq_skb_arr - allocate sq array for saved skb
215 * @sq: HW Send Queue
219 static int alloc_sq_skb_arr(struct hinic_sq *sq) argument
221 struct hinic_wq *wq = sq
92 hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, struct hinic_sq *sq, u16 global_qid) argument
236 free_sq_skb_arr(struct hinic_sq *sq) argument
281 hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, dma_addr_t ci_dma_addr, void __iomem *db_base) argument
305 hinic_clean_sq(struct hinic_sq *sq) argument
455 hinic_get_sq_free_wqebbs(struct hinic_sq *sq) argument
594 hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, int nr_sges) argument
615 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) argument
635 hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, unsigned int cos) argument
657 hinic_sq_get_wqe(struct hinic_sq *sq, unsigned int wqe_size, u16 *prod_idx) argument
674 hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) argument
687 hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, struct hinic_sq_wqe *sq_wqe, struct sk_buff *skb, unsigned int wqe_size) argument
711 hinic_sq_read_wqebb(struct hinic_sq *sq, struct sk_buff **skb, unsigned int *wqe_size, u16 *cons_idx) argument
749 hinic_sq_read_wqe(struct hinic_sq *sq, struct sk_buff **skb, unsigned int wqe_size, u16 *cons_idx) argument
766 hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) argument
[all...]
H A Dhinic_tx.c47 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
503 qp = container_of(txq->sq, struct hinic_qp, sq);
512 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
516 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
533 hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
534 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
539 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
564 qp = container_of(txq->sq, struc
668 struct hinic_sq *sq = txq->sq; local
703 struct hinic_sq *sq = txq->sq; local
804 struct hinic_sq *sq = txq->sq; local
844 struct hinic_sq *sq = txq->sq; local
858 hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct net_device *netdev) argument
[all...]
H A Dhinic_tx.h31 struct hinic_sq *sq; member in struct:hinic_txq
49 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
H A Dhinic_debugfs.c23 static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx) argument
25 struct hinic_wq *wq = sq->wq;
29 return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
35 return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask;
37 return sq->msix_entry;
214 struct hinic_sq *sq; local
218 sq = dev->txqs[sq_id].sq;
224 return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq
228 hinic_sq_debug_rem(struct hinic_sq *sq) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_dim.c57 struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); local
61 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
H A Den_main.c287 struct mlx5e_icosq *sq,
299 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
1368 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) argument
1370 kvfree(sq->db.xdpi_fifo.xi);
1371 kvfree(sq->db.wqe_info);
1374 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) argument
1376 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
1377 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1390 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
1391 xdpi_fifo->cc = &sq
286 mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe) argument
1397 mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) argument
1417 mlx5e_alloc_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct xsk_buff_pool *xsk_pool, struct mlx5e_sq_param *param, struct mlx5e_xdpsq *sq, bool is_redirect) argument
1464 mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) argument
1470 mlx5e_free_icosq_db(struct mlx5e_icosq *sq) argument
1475 mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) argument
1490 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, local
1498 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, local
1506 mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, work_func_t recover_work_func) argument
1540 mlx5e_free_icosq(struct mlx5e_icosq *sq) argument
1546 mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) argument
1553 mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) argument
1581 mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_txqsq *sq, int tc) argument
1637 mlx5e_free_txqsq(struct mlx5e_txqsq *sq) argument
1766 mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, struct mlx5e_sq_stats *sq_stats) argument
1805 mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) argument
1821 mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) argument
1845 mlx5e_close_txqsq(struct mlx5e_txqsq *sq) argument
1863 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, local
1869 mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, work_func_t recover_work_func) argument
1915 mlx5e_close_icosq(struct mlx5e_icosq *sq) argument
1926 mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, struct mlx5e_xdpsq *sq, bool is_redirect) argument
1989 mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) argument
2256 mlx5e_set_sq_maxrate(struct net_device *dev, struct mlx5e_txqsq *sq, u32 rate) argument
2310 struct mlx5e_txqsq *sq = priv->txq2sq[index]; local
3011 struct mlx5e_txqsq *sq = &c->sq[tc]; local
3025 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; local
4802 struct mlx5e_txqsq *sq = priv->txq2sq[i]; local
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A Drestrack.c42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
58 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
60 if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
95 static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u1 argument
119 fill_swsqes(struct sk_buff *msg, struct t4_sq *sq, u16 first_idx, struct t4_swsqe *first_sqe, u16 last_idx, struct t4_swsqe *last_sqe) argument
[all...]
H A Dqp.c95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) argument
97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) argument
102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
103 dma_unmap_addr(sq, mapping));
106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) argument
108 if (t4_sq_onchip(sq))
109 dealloc_oc_sq(rdev, sq);
114 alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) argument
129 alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) argument
140 alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) argument
414 build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, const struct ib_send_wr *wr, int max, u32 *plenp) argument
489 build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument
556 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument
607 build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp, struct ib_send_wr *wr) argument
616 build_rdma_write_cmpl(struct t4_sq *sq, struct fw_ri_rdma_write_cmpl_wr *wcwr, const struct ib_send_wr *wr, u8 *len16) argument
820 build_memreg(struct t4_sq *sq, union t4_wr *wqe, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) argument
[all...]
H A Dcq.c195 CQE_QPID_V(wq->sq.qid));
229 CQE_QPID_V(wq->sq.qid));
247 if (wq->sq.flush_cidx == -1)
248 wq->sq.flush_cidx = wq->sq.cidx;
249 idx = wq->sq.flush_cidx;
250 while (idx != wq->sq.pidx) {
251 swsqe = &wq->sq.sw_sq[idx];
254 if (wq->sq.oldest_read == swsqe) {
258 if (++idx == wq->sq
[all...]
H A Dt4.h383 struct t4_sq sq; member in struct:t4_wq
522 static inline int t4_sq_onchip(struct t4_sq *sq) argument
524 return sq->flags & T4_SQ_ONCHIP;
529 return wq->sq.in_use == 0;
534 return wq->sq.size - 1 - wq->sq.in_use;
539 wq->sq.in_use++;
540 if (++wq->sq.pidx == wq->sq.size)
541 wq->sq
[all...]
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dcn10k.c85 aq->sq.cq = pfvf->hw.rx_queues + qidx;
86 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
87 aq->sq.cq_ena = 1;
88 aq->sq.ena = 1;
89 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
90 aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
91 aq->sq.default_chan = pfvf->hw.tx_chan_base;
92 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
93 aq->sq.sqb_aura = sqb_aura;
94 aq->sq
138 cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) argument
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
75 if (!qp->sq.flushed) {
80 qp->sq.flushed = true;
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
178 struct bnxt_qplib_q *sq local
200 struct bnxt_qplib_q *sq = &qp->sq; local
832 struct bnxt_qplib_q *sq = &qp->sq; local
953 struct bnxt_qplib_q *sq; local
975 struct bnxt_qplib_q *sq = &qp->sq; local
1559 struct bnxt_qplib_q *sq = &qp->sq; local
1752 bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq, struct bnxt_qplib_swq *swq, bool hw_retx) argument
1778 struct bnxt_qplib_q *sq = &qp->sq; local
1788 struct bnxt_qplib_q *sq = &qp->sq; local
2272 __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2380 struct bnxt_qplib_q *sq = &qp->sq; local
2481 struct bnxt_qplib_q *sq; local
2859 struct bnxt_qplib_q *sq, *rq; local
[all...]
/linux-master/include/linux/soc/qcom/
H A Dqmi.h150 void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
195 void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
203 * @sq: sockaddr of @sock
220 struct sockaddr_qrtr sq; member in struct:qmi_handle
250 ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
253 ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
256 ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
/linux-master/drivers/net/
H A Dvirtio_net.c166 /* Record whether sq is in reset state. */
258 struct send_queue *sq; member in struct:virtnet_info
380 static void __free_old_xmit(struct send_queue *sq, bool in_napi, argument
386 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
530 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
829 static void free_old_xmit(struct send_queue *sq, bool in_napi) argument
833 __free_old_xmit(sq, in_napi, &stats);
841 u64_stats_update_begin(&sq->stats.syncp);
842 u64_stats_add(&sq->stats.bytes, stats.bytes);
843 u64_stats_add(&sq
857 check_sq_full_and_disable(struct virtnet_info *vi, struct net_device *dev, struct send_queue *sq) argument
892 __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) argument
985 struct send_queue *sq; local
2165 struct send_queue *sq = &vi->sq[index]; local
2212 struct send_queue *sq; local
2311 struct send_queue *sq = container_of(napi, struct send_queue, napi); local
2355 xmit_skb(struct send_queue *sq, struct sk_buff *skb) argument
2406 struct send_queue *sq = &vi->sq[qnum]; local
2484 virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, u32 ring_num) argument
2634 struct send_queue *sq = &vi->sq[i]; local
3023 struct send_queue *sq; local
3361 struct send_queue *sq = &vi->sq[i]; local
4173 struct send_queue *sq = &priv->sq[txqueue]; local
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c221 (n << qp->sq.wqe_shift);
224 (n << qp->sq.wqe_shift)) >>
226 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
505 qp_attr->cap.max_send_wr = qp->sq.max;
507 qp_attr->cap.max_send_sge = qp->sq.max_gs;
619 if (qp->sq.max)
620 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
739 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
840 mthca_wq_reset(&qp->sq);
[all...]
/linux-master/drivers/nvme/target/
H A Dcore.c86 req->sq->qid);
713 if (req->sq->size) {
716 old_sqhd = READ_ONCE(req->sq->sqhd);
718 new_sqhd = (old_sqhd + 1) % req->sq->size;
719 } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
721 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
726 struct nvmet_ctrl *ctrl = req->sq->ctrl;
741 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
757 if (!req->sq->sqhd_disabled)
759 req->cqe->sq_id = cpu_to_le16(req->sq
774 struct nvmet_sq *sq = req->sq; local
788 nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size) argument
800 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); local
805 nvmet_sq_destroy(struct nvmet_sq *sq) argument
838 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); local
843 nvmet_sq_init(struct nvmet_sq *sq) argument
936 nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) argument
[all...]
/linux-master/block/
H A Dblk-throttle.c99 * @sq: the throtl_service_queue of interest
101 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
104 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) argument
106 if (sq && sq->parent_sq)
107 return container_of(sq, struct throtl_grp, service_queue);
114 * @sq: the throtl_service_queue of interest
119 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) argument
121 struct throtl_grp *tg = sq_to_tg(sq);
330 throtl_service_queue_init(struct throtl_service_queue *sq) argument
391 struct throtl_service_queue *sq = &tg->service_queue; local
577 throtl_schedule_pending_timer(struct throtl_service_queue *sq, unsigned long expires) argument
614 throtl_schedule_next_dispatch(struct throtl_service_queue *sq, bool force) argument
988 struct throtl_service_queue *sq = &tg->service_queue; local
1011 struct throtl_service_queue *sq = &tg->service_queue; local
1047 struct throtl_service_queue *sq = &tg->service_queue; local
1090 struct throtl_service_queue *sq = &tg->service_queue; local
1127 struct throtl_service_queue *sq; local
1173 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); local
1314 struct throtl_service_queue *sq = &tg->service_queue; local
1734 struct throtl_service_queue *sq = &tg->service_queue; local
1831 struct throtl_service_queue *sq = &tg->service_queue; local
1934 struct throtl_service_queue *sq = &tg->service_queue; local
2182 struct throtl_service_queue *sq; local
[all...]

Completed in 478 milliseconds

12345678910