Searched refs:qidx (Results 1 - 25 of 65) sorted by relevance

123

/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_trace.h23 __field(u32, qidx)
31 __entry->qidx = txq->qidx;
39 __get_str(devname), __entry->qidx, __entry->sqe_idx,
53 __field(u32, qidx)
61 __entry->qidx = txq->qidx;
69 __get_str(devname), __entry->qidx, __entry->sqe_idx,
84 __field(u32, qidx)
94 __entry->qidx
[all...]
H A Dfuneth_txrx.h117 u16 qidx; /* queue index within net_device */ member in struct:funeth_txq
173 u16 qidx; /* queue index within net_device */ member in struct:funeth_rxq
254 int funeth_txq_create(struct net_device *dev, unsigned int qidx,
259 int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
H A Dfuneth_rx.c432 skb_record_rx_queue(skb, q->qidx);
614 unsigned int qidx,
629 q->qidx = qidx;
673 netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
704 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
750 q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
761 q->qidx, err);
776 q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
788 int funeth_rxq_create(struct net_device *dev, unsigned int qidx, argument
613 fun_rxq_create_sw(struct net_device *dev, unsigned int qidx, unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq) argument
[all...]
H A Dfuneth_tx.c624 unsigned int qidx,
635 numa_node = cpu_to_node(qidx); /* XDP Tx queue */
651 q->qidx = qidx;
661 irq ? "Tx" : "XDP", qidx);
709 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
718 irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
727 irq ? "Tx" : "XDP", q->qidx, err);
740 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
759 int funeth_txq_create(struct net_device *dev, unsigned int qidx, argument
623 fun_txq_create_sw(struct net_device *dev, unsigned int qidx, unsigned int ndesc, struct fun_irq *irq) argument
[all...]
/linux-master/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c32 static int nicvf_poll_reg(struct nicvf *nic, int qidx, argument
43 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
505 struct snd_queue *sq, int q_len, int qidx)
525 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
526 if (qidx < nic->pnicvf->xdp_tx_queues) {
628 struct queue_set *qs, int qidx)
631 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
633 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
636 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
640 struct queue_set *qs, int qidx)
504 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, int qidx) argument
627 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) argument
639 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) argument
649 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) argument
660 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) argument
745 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) argument
818 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) argument
859 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) argument
917 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) argument
991 int qidx; local
1009 int qidx; local
1069 int qidx; local
1160 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) argument
1171 nicvf_sq_disable(struct nicvf *nic, int qidx) argument
1180 nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, int qidx) argument
[all...]
H A Dnicvf_main.c75 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) argument
78 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
80 return qidx;
104 u64 qidx, u64 val)
108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) argument
115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
989 int qidx; local
995 for (qidx = 0; qidx < q
103 nicvf_queue_reg_write(struct nicvf *nic, u64 offset, u64 qidx, u64 val) argument
1043 int qidx = cq_poll->cq_idx; local
1062 u8 qidx; local
1313 int qidx; local
1326 int irq, qidx; local
1451 int cpu, err, qidx; local
1658 int qidx, cpu; local
[all...]
H A Dnicvf_ethtool.c214 int stats, qidx; local
217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
219 sprintf(*data, "rxq%d: %s", qidx + start_qidx,
225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
227 sprintf(*data, "txq%d: %s", qidx + start_qidx,
301 int stat, qidx; local
[all...]
H A Dnicvf_queues.h336 int qidx, bool enable);
338 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
339 void nicvf_sq_disable(struct nicvf *nic, int qidx);
342 struct snd_queue *sq, int qidx);
365 u64 qidx, u64 val);
367 u64 offset, u64 qidx);
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dqos_sq.c33 static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) argument
56 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
76 sq = &qset->sq[qidx];
116 static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) argument
124 sq = &qset->sq[qidx];
140 sq = &qset->sq[qidx];
151 static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) argument
157 incr = (u64)qidx << 32;
165 static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id) argument
176 cn10k_sq_aq->qidx
222 int qidx; local
230 otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx) argument
235 otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx) argument
259 otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) argument
[all...]
H A Dcn10k.h28 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
29 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
H A Dcn10k.c75 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) argument
85 aq->sq.cq = pfvf->hw.rx_queues + qidx;
89 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
102 aq->qidx = qidx;
138 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) argument
321 aq->qidx = rq_idx;
345 int qidx, rc; local
350 for (qidx = 0; qidx < h
468 int qidx, rc; local
[all...]
H A Dotx2_common.c20 struct otx2_nic *pfvf, int qidx)
22 u64 incr = (u64)qidx << 32;
33 struct otx2_nic *pfvf, int qidx)
35 u64 incr = (u64)qidx << 32;
76 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) argument
78 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
83 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
87 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) argument
89 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
94 if (qidx >
19 otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) argument
32 otx2_nix_sq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) argument
509 otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) argument
818 int qidx, sqe_tail, sqe_head; local
853 otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) argument
886 otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) argument
923 otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) argument
985 otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) argument
1080 int qidx; local
1096 int qidx, err; local
1190 int sqb, qidx; local
1428 int qidx, pool_id, stack_pages, num_sqbs; local
1773 int irq, qidx; local
[all...]
H A Dqos.h27 void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx);
28 int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx);
29 void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx);
H A Dotx2_txrx.c196 int qidx)
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
255 int qidx)
268 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
275 struct nix_cqe_rx_s *cqe, int qidx)
283 qidx, parse->errlev, parse->errcode);
334 otx2_free_rcv_seg(pfvf, cqe, qidx);
450 int tx_pkts = 0, tx_bytes = 0, qidx; local
462 qidx = cq->cq_idx - pfvf->hw.rx_queues;
463 sq = &pfvf->qset.sq[qidx];
194 otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, u64 iova, int len, struct nix_rx_parse_s *parse, int qidx) argument
254 otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, int qidx) argument
274 otx2_check_rcv_errors(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, int qidx) argument
595 otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) argument
750 otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct nix_sqe_hdr_s *sqe_hdr, struct sk_buff *skb, u16 qidx) argument
880 otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) argument
1141 otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) argument
1210 otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) argument
1257 int qidx; local
1367 otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) argument
1407 int qidx = cq->cq_idx; local
[all...]
H A Dotx2_dcbnl.c159 cn10k_sq_aq->qidx = prio;
174 sq_aq->qidx = prio;
325 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, argument
333 if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
336 pfvf->queue_to_pfc_map[qidx], qidx);
345 pfvf->queue_to_pfc_map[qidx] = vlan_prio;
357 aq->qidx = qidx;
372 npa_aq->aura_id = qidx;
[all...]
H A Dotx2_pf.c1258 u64 qidx = 0; local
1261 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1263 val = otx2_atomic64_add((qidx << 44), ptr);
1265 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1273 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1277 qidx);
1281 qidx);
1288 for (qidx
1373 int qidx = cq_poll->cint_idx; local
1393 int qidx; local
1407 int qidx; local
1421 int qidx; local
1601 int qidx; local
1765 int err = 0, qidx, vec; local
1975 int qidx, vec, wrk; local
2043 int qidx = skb_get_queue_mapping(skb); local
2585 otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, int qidx) argument
2612 int qidx = smp_processor_id(); local
[all...]
H A Dotx2_ethtool.c86 int qidx, stats; local
88 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
90 sprintf(*data, "rxq%d: %s", qidx + start_qidx,
96 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
98 if (qidx >= pfvf->hw.non_qos_queues)
100 qidx
153 int stat, qidx; local
492 int qidx; local
[all...]
H A Dotx2_txrx.h168 struct sk_buff *skb, u16 qidx);
170 int size, int qidx);
172 int size, int qidx);
H A Dotx2_common.h371 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
373 int size, int qidx);
913 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) argument
917 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
918 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
920 /* check if qidx falls under QOS queues */
921 if (qidx >= pfvf->hw.non_qos_queues)
922 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
961 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
[all...]
/linux-master/drivers/dma/ptdma/
H A Dptdma-dev.c72 u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
84 cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
90 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
136 tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
216 cmd_q->qidx = 0;
/linux-master/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/
H A Dchcr_ipsec.c422 u32 qidx; local
430 qidx = skb->queue_mapping;
431 q = &adap->sge.ethtxq[qidx + pi->first_qset];
472 u32 ctrl0, qidx; local
478 qidx = skb->queue_mapping;
479 q = &adap->sge.ethtxq[qidx + pi->first_qset];
517 unsigned int qidx; local
521 qidx = skb->queue_mapping;
522 q = &adap->sge.ethtxq[qidx + pi->first_qset];
577 int qidx local
715 int qidx, left, credits; local
[all...]
/linux-master/drivers/scsi/csiostor/
H A Dcsio_wr.c740 * @qidx: Egress queue index
745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) argument
747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
756 * @qidx: Ingress queue index
762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) argument
765 struct csio_q *q = wrm->q_arr[qidx];
847 * @qidx: Index of queue.
863 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, argument
867 struct csio_q *q = wrm->q_arr[qidx];
878 CSIO_DB_ASSERT((qidx >
982 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) argument
1262 csio_wr_process_iq_idx(struct csio_hw *hw, int qidx, void (*iq_handler)(struct csio_hw *, void *, uint32_t, struct csio_fl_dma_buf *, void *), void *priv) argument
[all...]
/linux-master/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_dcb.c55 u8 qidx; local
60 qidx = bp->tc_to_qidx[ets->prio_tc[i]];
61 pri2cos[i] = bp->q_info[qidx].queue_id;
108 u8 qidx = bp->tc_to_qidx[i]; local
112 qidx);
115 cos2bw.queue_id = bp->q_info[qidx].queue_id;
131 if (qidx == 0) {
277 u8 qidx = bp->tc_to_qidx[i]; local
279 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
/linux-master/include/linux/
H A Dnvme-fc-driver.h480 unsigned int qidx, u16 qsize,
483 unsigned int qidx, void *handle);
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_amdkfd_gfx_v9.c1027 int qidx; local
1065 for (qidx = 0; qidx < max_queue_cnt; qidx++) {
1070 if (!test_bit(qidx, cp_queue_bitmap))
1073 if (!(queue_map & (1 << qidx)))
1077 get_wave_count(adev, qidx, &wave_cnt, &vmid,

Completed in 481 milliseconds

123