/linux-master/block/ |
H A D | blk-mq-debugfs.c | 14 #include "blk-rq-qos.h" 278 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) argument 280 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 281 const enum req_op op = req_op(rq); 284 seq_printf(m, "%p {.op=", rq); 290 blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK), 293 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 295 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); 296 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 297 rq 352 hctx_show_busy_rq(struct request *rq, void *data) argument [all...] |
H A D | blk-cgroup.h | 456 * @rq: request to merge into 459 * @bio and @rq should belong to the same cgroup and their issue_as_root should 463 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) argument 465 return rq->bio->bi_blkg == bio->bi_blkg && 466 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); 504 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } argument
|
/linux-master/drivers/nvme/host/ |
H A D | fc.c | 61 struct request *rq; member in struct:nvmefc_ls_req_op 102 struct request *rq; member in struct:nvme_fc_fcp_op 230 static void nvme_fc_complete_rq(struct request *rq); 1833 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, argument 1836 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1918 struct request *rq = op->rq; local 1920 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio) 1922 return blkcg_get_fc_appid(rq 1930 struct request *rq = op->rq; local 2088 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, struct request *rq, u32 rqno) argument 2141 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) argument 2566 nvme_fc_timeout(struct request *rq) argument 2596 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, struct nvme_fc_fcp_op *op) argument 2631 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, struct nvme_fc_fcp_op *op) argument 2793 struct request *rq = bd->rq; local 2849 nvme_fc_complete_rq(struct request *rq) argument [all...] |
H A D | multipath.c | 125 void nvme_mpath_start_request(struct request *rq) argument 127 struct nvme_ns *ns = rq->q->queuedata; 130 if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) 133 nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; 134 nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq), 139 void nvme_mpath_end_request(struct request *rq) argument 141 struct nvme_ns *ns = rq->q->queuedata; 143 if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) 145 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), [all...] |
/linux-master/arch/powerpc/platforms/cell/spufs/ |
H A D | context.c | 50 INIT_LIST_HEAD(&ctx->rq); 82 BUG_ON(!list_empty(&ctx->rq));
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | hv_vhca_stats.c | 24 data->rx_packets = stats->rq.packets; 25 data->rx_bytes = stats->rq.bytes;
|
/linux-master/drivers/block/aoe/ |
H A D | aoe.h | 111 struct request *rq; member in struct:buf 184 struct request *rq; member in struct:aoedev::__anon190
|
/linux-master/drivers/net/ethernet/intel/ice/ |
H A D | ice_vf_mbx.c | 290 snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean + 292 snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
|
/linux-master/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 1191 uresp.rq_dbid = qp->rq.dbid; 1193 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); 1194 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); 1195 uresp.num_rqe_allocated = qp->rq.max_cnt; 1258 kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL); 1278 qp->rq.max_sges = attrs->cap.max_recv_sge; 1485 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; 1487 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 1563 return (qp->rq.tail == qp->rq [all...] |
/linux-master/drivers/staging/octeon/ |
H A D | ethernet-mdio.c | 49 * @rq: the request 54 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) argument 62 return phy_mii_ioctl(dev->phydev, rq, cmd);
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_context_types.h | 44 void (*revoke)(struct intel_context *ce, struct i915_request *rq, 55 struct i915_request *rq); 169 * active: Active tracker for the rq activity (inc. external) on this
|
H A D | intel_reset.c | 66 static bool mark_guilty(struct i915_request *rq) argument 73 if (intel_context_is_closed(rq->context)) 77 ctx = rcu_dereference(rq->context->gem_context); 82 return intel_context_is_banned(rq->context); 117 static void mark_innocent(struct i915_request *rq) argument 122 ctx = rcu_dereference(rq->context->gem_context); 128 void __i915_request_reset(struct i915_request *rq, bool guilty) argument 132 RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty)); 133 GEM_BUG_ON(__i915_request_is_complete(rq)); 137 i915_request_set_error_once(rq, [all...] |
/linux-master/drivers/infiniband/hw/hns/ |
H A D | hns_roce_qp.c | 438 hr_qp->rq.rsv_sge = 1; 449 /* If srq exist, set zero for relative number of rq */ 451 hr_qp->rq.wqe_cnt = 0; 452 hr_qp->rq.max_gs = 0; 470 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", 475 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + 476 hr_qp->rq.rsv_sge); 478 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * 479 hr_qp->rq.max_gs); 481 hr_qp->rq [all...] |
/linux-master/kernel/sched/ |
H A D | topology.c | 488 void rq_attach_root(struct rq *rq, struct root_domain *rd) argument 493 rq_lock_irqsave(rq, &rf); 495 if (rq->rd) { 496 old_rd = rq->rd; 498 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 499 set_rq_offline(rq); 501 cpumask_clear_cpu(rq->cpu, old_rd->span); 513 rq->rd = rd; 515 cpumask_set_cpu(rq 726 struct rq *rq = cpu_rq(cpu); local 2395 struct rq *rq = NULL; local [all...] |
H A D | psi.c | 1039 struct rq *rq; local 1052 rq = this_rq_lock_irq(&rf); 1057 rq_unlock_irq(rq, &rf); 1070 struct rq *rq; local 1082 rq = this_rq_lock_irq(&rf); 1087 rq_unlock_irq(rq, &rf); 1131 * This function acquires the task's rq lock to lock out concurrent 1139 struct rq *r local 1213 struct rq *rq = cpu_rq(cpu); local [all...] |
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_scheduler.h | 96 const struct i915_request *rq,
|
H A D | i915_vma.h | 61 struct i915_request *rq, 65 i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, argument 68 return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
|
/linux-master/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_debugfs.h | 87 void hinic_rq_debug_rem(struct hinic_rq *rq);
|
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_job.h | 85 return to_amdgpu_ring(job->base.entity->rq->sched);
|
/linux-master/drivers/infiniband/sw/rxe/ |
H A D | rxe_verbs.c | 15 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr); 478 attr->max_wr = srq->rq.queue->buf->index_mask; 479 attr->max_sge = srq->rq.max_sge; 495 spin_lock_irqsave(&srq->rq.producer_lock, flags); 498 err = post_one_recv(&srq->rq, wr); 504 spin_unlock_irqrestore(&srq->rq.producer_lock, flags); 949 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) argument 958 full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); 965 if (unlikely(num_sge > rq->max_sge)) { 982 recv_wqe = queue_producer_addr(rq 1007 struct rxe_rq *rq = &qp->rq; local [all...] |
/linux-master/drivers/net/ethernet/xscale/ |
H A D | ptp_ixp46x.c | 191 struct ptp_clock_request *rq, int on) 195 switch (rq->type) { 197 switch (rq->extts.index) { 190 ptp_ixp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument
|
/linux-master/drivers/net/ethernet/ti/icssg/ |
H A D | icss_iep.c | 576 struct ptp_clock_request rq; local 594 rq.perout.index = 0; 598 rq.perout.period.sec = 1; 599 rq.perout.period.nsec = 0; 600 rq.perout.start.sec = ts.tv_sec + 2; 601 rq.perout.start.nsec = 0; 602 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); 604 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); 650 struct ptp_clock_request *rq, int on) 654 switch (rq 649 icss_iep_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument [all...] |
/linux-master/include/drm/ |
H A D | gpu_scheduler.h | 93 * @rq under &drm_sched_rq.entities. 95 * Protected by &drm_sched_rq.lock of @rq. 100 * @rq: 108 struct drm_sched_rq *rq; member in struct:drm_sched_entity 120 * This will be set to NULL if &num_sched_list equals 1 and @rq has been 213 * Marks the enity as removed from rq and destined for 247 * @sched: the scheduler to which this rq belongs to. 591 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 593 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
|
/linux-master/drivers/block/mtip32xx/ |
H A D | mtip32xx.c | 956 struct request *rq; local 974 rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); 975 if (IS_ERR(rq)) { 991 blk_mq_free_request(rq); 999 int_cmd = blk_mq_rq_to_pdu(rq); 1003 rq->timeout = timeout; 1006 blk_execute_rq(rq, true); 1037 blk_mq_free_request(rq); 2045 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, argument 2050 dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq 2429 mtip_softirq_done_fn(struct request *rq) argument 3229 is_stopped(struct driver_data *dd, struct request *rq) argument 3249 mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, struct request *rq) argument 3273 mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, struct request *rq) argument 3314 struct request *rq = bd->rq; local 3332 mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) argument 3345 mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) argument [all...] |
/linux-master/net/sunrpc/ |
H A D | cache.c | 830 struct cache_request *rq; local 854 rq = container_of(rp->q.list.next, struct cache_request, q.list); 855 WARN_ON_ONCE(rq->q.reader); 857 rq->readers++; 860 if (rq->len == 0) { 861 err = cache_request(cd, rq); 864 rq->len = err; 867 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { 870 list_move(&rp->q.list, &rq->q.list); 873 if (rp->offset + count > rq [all...] |