/linux-master/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_csr.h | 87 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ 88 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 91 #define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ 92 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ 95 #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ 96 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 99 #define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ 100 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
|
H A D | hinic_hw_eqs.c | 33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ 34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) 37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ 38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) 41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) 45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) 80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) 83 container_of((eq) - (eq)->q_id, struc 722 init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, struct msix_entry entry) argument 865 int err, i, q_id; local 899 int q_id; local 923 int i, q_id, err; local 953 int q_id; local 963 int q_id; local 983 int q_id; local [all...] |
H A D | hinic_hw_io.c | 30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ 31 (q_id) * CI_Q_ADDR_SIZE) 132 base_qpn + qp->q_id); 176 base_qpn + qp->q_id); 265 * @q_id: the id of the qp 272 struct hinic_qp *qp, int q_id, 281 qp->q_id = q_id; 283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], 291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], 271 init_qp(struct hinic_func_to_io *func_to_io, struct hinic_qp *qp, int q_id, struct msix_entry *sq_msix_entry, struct msix_entry *rq_msix_entry) argument 350 int q_id = qp->q_id; local [all...] |
H A D | hinic_tx.c | 494 u16 prod_idx, q_id = skb->queue_mapping; local 502 txq = &nic_dev->txqs[q_id]; 514 netif_stop_subqueue(netdev, qp->q_id); 518 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 537 netdev_txq = netdev_get_tx_queue(netdev, q_id); 555 u16 prod_idx, q_id = skb->queue_mapping; local 563 txq = &nic_dev->txqs[q_id]; 595 netif_stop_subqueue(netdev, qp->q_id); 602 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 626 netdev_txq = netdev_get_tx_queue(netdev, q_id); [all...] |
H A D | hinic_hw_cmdq.h | 118 u32 q_id; member in struct:hinic_cmdq_arm_bit
|
H A D | hinic_rx.c | 421 skb_record_rx_queue(skb, qp->q_id); 532 intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; 549 cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); 594 "%s_rxq%d", netdev->name, qp->q_id);
|
H A D | hinic_main.c | 818 u16 num_sqs, q_id; local 824 for (q_id = 0; q_id < num_sqs; q_id++) { 825 if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) 828 sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); 833 q_id, sw_pi, hw_ci, sw_ci, 834 nic_dev->txqs[q_id].napi.state);
|
/linux-master/fs/xfs/scrub/ |
H A D | quotacheck_repair.c | 70 error = xfarray_load_sparse(counts, dq->q_id, &xcdq); 97 error = xfarray_store(counts, dq->q_id, &xcdq); 111 trace_xrep_quotacheck_dquot(xqc->sc->mp, dq->q_type, dq->q_id); 115 if (dq->q_id)
|
H A D | quotacheck.c | 44 xfs_dqid_t q_id; member in struct:xqcheck_dqtrx 188 xfs_dqid_t q_id) 195 dqa->dqtrx[i].q_id == q_id)) 271 dqtrx = xqcheck_get_dqtrx(dqa, p->q_type, p->q_id); 276 dqtrx->q_id = p->q_id; 354 dqtrx = xqcheck_get_dqtrx(dqa, p->q_type, p->q_id); 360 error = xqcheck_update_incore_counts(xqc, counts, p->q_id, 566 error = xfarray_load_sparse(counts, dq->q_id, 185 xqcheck_get_dqtrx( struct xqcheck_dqacct *dqa, xfs_dqtype_t q_type, xfs_dqid_t q_id) argument [all...] |
H A D | quota.c | 169 offset = dq->q_id / qi->qi_dqperchunk; 170 if (dq->q_id && dq->q_id <= sqi->last_id) 173 sqi->last_id = dq->q_id; 228 if (dq->q_id == 0)
|
H A D | dqiterate.c | 158 *next_incore_id = dq->q_id; 208 cursor->id = dq->q_id + 1;
|
H A D | quota_repair.c | 83 trace_xrep_dquot_item_fill_bmap_hole(sc->mp, dq->q_type, dq->q_id); 92 xfs_qm_init_dquot_blk(sc->tp, dq->q_id, dq->q_type, bp); 115 xfs_fileoff_t offset = dq->q_id / qi->qi_dqperchunk; 252 trace_xrep_dquot_item(sc->mp, dq->q_type, dq->q_id); 256 if (dq->q_id) {
|
/linux-master/tools/cgroup/ |
H A D | iocost_monitor.py | 64 def __init__(self, root_blkcg, q_id, include_dying=False): 67 self.walk(root_blkcg, q_id, '') 72 def walk(self, blkcg, q_id, parent_path): 80 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) 88 self.walk(c, q_id, path) 224 q_id = None variable 232 q_id = blkg.q.id.value_() 258 for path, blkg in BlkgIterator(blkcg_root, q_id):
|
/linux-master/fs/xfs/ |
H A D | xfs_dquot.c | 101 ASSERT(dq->q_id); 188 ASSERT(dq->q_id); 376 xfs_qm_init_dquot_blk(tp, dqp->q_id, qtype, bp); 500 dqp->q_id = id; 557 if (be32_to_cpu(ddqp->d_id) != dqp->q_id) 566 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) 597 __this_address, dqp->q_id); 642 ddqp->d_id = cpu_to_be32(dqp->q_id); 1209 if (dqp->q_id == 0) 1228 if (dqp->q_id [all...] |
H A D | xfs_trans_dquot.c | 61 if (dqp->q_id != 0 && 166 .q_id = dqp->q_id, 451 .q_id = dqp->q_id, 565 if (dqp->q_id) { 620 .q_id = dqp->q_id, 723 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), 810 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id [all...] |
H A D | xfs_qm.h | 119 xfs_dqid_t q_id; member in struct:xfs_mod_ino_dqtrx_params
|
H A D | xfs_dquot_item.c | 56 qlf->qlf_id = qlip->qli_dquot->q_id;
|
H A D | xfs_dquot.h | 67 xfs_dqid_t q_id; member in struct:xfs_dquot
|
/linux-master/drivers/block/ |
H A D | ublk_drv.c | 132 int q_id; member in struct:ublk_queue 639 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) argument 641 return ublk_get_queue(ub, q_id)->io_cmd_buf; 644 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id) argument 646 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); 1086 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, 1112 __func__, io->cmd->cmd_op, ubq->q_id, 1126 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, 1304 int q_id, ret = 0; local 1323 q_id 1615 ublk_handle_need_get_data(struct ublk_device *ub, int q_id, int tag) argument 1881 u16 tag, q_id; local 1970 ublk_deinit_queue(struct ublk_device *ub, int q_id) argument 1981 ublk_init_queue(struct ublk_device *ub, int q_id) argument [all...] |
/linux-master/drivers/net/wireless/intel/iwlwifi/pcie/ |
H A D | tx-gen2.c | 31 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 146 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 194 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
|
H A D | tx.c | 87 txq_id != trans->txqs.cmd.q_id && 201 if (txq_id != trans->txqs.cmd.q_id) { 213 txq_id == trans->txqs.cmd.q_id) 249 if (txq_id == trans->txqs.cmd.q_id) 312 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 520 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 574 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 736 if (txq_id == trans->txqs.cmd.q_id && 744 if (txq_id != trans->txqs.cmd.q_id) 814 if (txq_id == trans->txqs.cmd.q_id [all...] |
/linux-master/drivers/net/ethernet/intel/ice/ |
H A D | ice_base.c | 809 u16 q_id, q_base; local 819 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 820 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; 836 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 837 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; 1137 &txq_meta->q_id, [all...] |
H A D | ice_idc.c | 119 u16 q_id; local 130 q_id = qset->qs_handle; 135 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
|
/linux-master/drivers/net/ethernet/intel/idpf/ |
H A D | idpf_controlq_api.h | 101 int q_id; member in struct:idpf_ctlq_info
|
/linux-master/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
H A D | hclge_tm.h | 266 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id); 267 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
|