Searched refs:rq (Results 126 - 150 of 634) sorted by relevance

1234567891011>>

/linux-master/drivers/gpu/drm/i915/selftests/
H A Dintel_scheduler_helpers.h33 int intel_selftest_wait_for_rq(struct i915_request *rq);
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_context.c74 struct i915_request *rq = NULL; local
87 if (rq) {
88 i915_request_await_dma_fence(this, &rq->fence);
89 i915_request_put(rq);
91 rq = i915_request_get(this);
94 if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
97 i915_request_put(rq);
101 i915_request_put(rq);
116 rq = NULL;
126 if (rq) { /* Forc
200 struct i915_request *rq = NULL; local
243 struct i915_request *rq = NULL; local
945 struct i915_request *rq; local
1049 struct i915_request *rq; local
1095 struct i915_request *rq = NULL; local
1515 struct i915_request *rq; local
1604 struct i915_request *rq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_dim.c47 struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); local
51 mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
H A Den_txrx.c61 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) argument
63 struct mlx5e_rq_stats *stats = rq->stats;
66 if (unlikely(!test_bit(MLX5E_RQ_STATE_DIM, &rq->state)))
69 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
70 net_dim(&rq->dim, dim_sample);
132 struct mlx5e_rq *rq = &c->rq; local
177 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
192 busy |= INDIRECT_CALL_2(rq->post_wqes,
195 rq);
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/
H A Dgk104.c28 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) argument
33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
H A Dg94.c28 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) argument
33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
/linux-master/drivers/gpu/drm/i915/
H A Di915_scheduler_types.h179 void (*kick_backend)(const struct i915_request *rq,
185 void (*bump_inflight_request_prio)(struct i915_request *rq,
192 void (*retire_inflight_request_prio)(struct i915_request *rq);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/rep/
H A Dtc.h39 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
68 mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, argument
69 struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
/linux-master/drivers/s390/block/
H A Dscm_blk.h18 struct request_queue *rq; member in struct:scm_blk_dev
38 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_drv.c229 "%s: rq[%d] error 0x%x\n",
613 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, argument
617 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
618 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
630 void *data = vmxnet3_pp_get_buff(rq->page_pool,
634 rq->stats.rx_buf_alloc_failure++;
645 rq->stats.rx_buf_alloc_failure++;
657 rq->stats.rx_buf_alloc_failure++;
671 rq->stats.rx_buf_alloc_failure++;
682 rq
1304 vmxnet3_create_pp(struct vmxnet3_adapter *adapter, struct vmxnet3_rx_queue *rq, int size) argument
1421 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) argument
1503 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota) argument
1910 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
1972 vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
2028 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
2043 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
2127 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
2258 struct vmxnet3_rx_queue *rq = container_of(napi, local
2322 struct vmxnet3_rx_queue *rq = data; local
2513 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
2823 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
3276 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
3317 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
[all...]
/linux-master/block/
H A Dblk-mq-sched.c60 struct request *rq; local
64 list_for_each_entry(rq, rq_list, queuelist) {
65 if (rq->mq_hctx != hctx) {
66 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
103 struct request *rq; local
118 rq = e->type->ops.dispatch_request(hctx);
119 if (!rq) {
132 blk_mq_set_rq_budget_token(rq, budget_token);
135 * Now this rq owns the budget which has to be released
136 * if this rq wo
221 struct request *rq; local
373 blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free) argument
[all...]
H A Dkyber-iosched.c530 static int rq_get_domain_token(struct request *rq) argument
532 return (long)rq->elv.priv[0];
535 static void rq_set_domain_token(struct request *rq, int token) argument
537 rq->elv.priv[0] = (void *)(long)token;
541 struct request *rq)
546 nr = rq_get_domain_token(rq);
548 sched_domain = kyber_sched_domain(rq->cmd_flags);
550 rq->mq_ctx->cpu);
585 static void kyber_prepare_request(struct request *rq) argument
587 rq_set_domain_token(rq,
540 rq_clear_domain_token(struct kyber_queue_data *kqd, struct request *rq) argument
595 struct request *rq, *next; local
614 kyber_finish_request(struct request *rq) argument
639 kyber_completed_request(struct request *rq, u64 now) argument
759 struct request *rq; local
807 struct request *rq; local
[all...]
H A Dbfq-iosched.c231 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
232 (get_sdist(last_pos, rq) > \
235 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
376 #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
377 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
999 struct request *rq; local
1006 rq = rq_entry_fifo(bfqq->fifo.next);
1008 if (rq
1043 bfq_serv_to_charge(struct request *rq, struct bfq_queue *bfqq) argument
1818 bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, struct bfq_queue *bfqq, int old_wr_coeff, struct request *rq, bool *interactive) argument
2204 bfq_add_request(struct request *rq) argument
2372 get_sdist(sector_t last_pos, struct request *rq) argument
2380 bfq_remove_request(struct request_queue *q, struct request *rq) argument
2560 bfq_requests_merged(struct request_queue *q, struct request *rq, struct request *next) argument
3227 bfq_allow_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio) argument
3432 bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) argument
3450 bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) argument
3591 bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) argument
3653 bfq_dispatch_remove(struct request_queue *q, struct request *rq) argument
5106 struct request *rq = bfqq->next_rq; local
5162 struct request *rq = NULL; local
5249 bfq_update_dispatch_stats(struct request_queue *q, struct request *rq, struct bfq_queue *in_serv_queue, bool idle_timer_disabled) argument
5294 bfq_update_dispatch_stats(struct request_queue *q, struct request *rq, struct bfq_queue *in_serv_queue, bool idle_timer_disabled) argument
5303 struct request *rq; local
5916 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct request *rq) argument
6078 bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct request *rq) argument
6151 __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) argument
6239 bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_insert_t flags) argument
6300 struct request *rq; local
6648 bfq_finish_requeue_request(struct request *rq) argument
6703 bfq_finish_request(struct request *rq) argument
6810 bfq_prepare_request(struct request *rq) argument
6845 bfq_init_rq(struct request *rq) argument
[all...]
H A Dblk-mq-tag.c255 struct request *rq; local
259 rq = tags->rqs[bitnr];
260 if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
261 rq = NULL;
263 return rq;
273 struct request *rq; local
284 * We can hit rq == NULL here, because the tagging functions
287 rq
342 struct request *rq; local
452 blk_mq_tagset_count_completed_rqs(struct request *rq, void *data) argument
678 blk_mq_unique_tag(struct request *rq) argument
[all...]
H A Dblk-zoned.c57 bool blk_req_needs_zone_write_lock(struct request *rq) argument
59 if (!rq->q->disk->seq_zones_wlock)
62 return blk_rq_is_seq_zoned_write(rq);
66 bool blk_req_zone_write_trylock(struct request *rq) argument
68 unsigned int zno = blk_rq_zone_no(rq);
70 if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock))
73 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
74 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
80 void __blk_req_zone_write_lock(struct request *rq) argument
82 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
91 __blk_req_zone_write_unlock(struct request *rq) argument
[all...]
H A Dt10-pi.c134 * @rq: request with PI that should be prepared
142 static void t10_pi_type1_prepare(struct request *rq) argument
144 struct blk_integrity *bi = &rq->q->integrity;
146 u32 ref_tag = t10_pi_ref_tag(rq);
150 __rq_for_each_bio(bio, rq) {
183 * @rq: request with PI that should be prepared
193 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) argument
195 struct blk_integrity *bi = &rq->q->integrity;
198 u32 ref_tag = t10_pi_ref_tag(rq);
202 __rq_for_each_bio(bio, rq) {
249 t10_pi_type3_prepare(struct request *rq) argument
254 t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes) argument
394 ext_pi_type1_prepare(struct request *rq) argument
434 ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) argument
[all...]
H A Dblk-rq-qos.c3 #include "blk-rq-qos.h"
35 void __rq_qos_done(struct rq_qos *rqos, struct request *rq) argument
39 rqos->ops->done(rqos, rq);
44 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq) argument
48 rqos->ops->issue(rqos, rq);
53 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq) argument
57 rqos->ops->requeue(rqos, rq);
71 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) argument
75 rqos->ops->track(rqos, rq, bio);
80 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struc argument
[all...]
/linux-master/drivers/scsi/esas2r/
H A Desas2r.h406 struct esas2r_request *rq);
966 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1004 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1009 struct esas2r_request *rq);
1015 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1022 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1036 struct esas2r_request *rq,
1042 struct esas2r_request *rq,
1048 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1050 struct esas2r_request *rq,
1167 esas2r_sgc_init(struct esas2r_sg_context *sgc, struct esas2r_adapter *a, struct esas2r_request *rq, struct atto_vda_sge *first) argument
1200 esas2r_rq_init_request(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1264 esas2r_rq_free_sg_lists(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1277 esas2r_rq_destroy_request(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1303 esas2r_build_sg_list(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
1393 esas2r_start_ae_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1408 struct esas2r_request *rq; local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dselftest_workarounds.c37 static int request_add_sync(struct i915_request *rq, int err) argument
39 i915_request_get(rq);
40 i915_request_add(rq);
41 if (i915_request_wait(rq, 0, HZ / 5) < 0)
43 i915_request_put(rq);
48 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) argument
52 i915_request_get(rq);
53 i915_request_add(rq);
54 if (spin && !igt_wait_for_spinner(spin, rq))
56 i915_request_put(rq);
104 struct i915_request *rq; local
269 switch_to_scratch_context(struct intel_engine_cs *engine, struct igt_spinner *spin, struct i915_request **rq) argument
303 struct i915_request *rq; local
523 struct i915_request *rq; local
846 struct i915_request *rq; local
889 struct i915_request *rq; local
1243 struct i915_request *rq; local
[all...]
/linux-master/drivers/scsi/
H A Dsd_zbc.c332 struct request *rq = scsi_cmd_to_rq(cmd); local
333 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
334 sector_t sector = blk_rq_pos(rq);
414 struct request *rq = scsi_cmd_to_rq(cmd); local
415 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
416 unsigned int wp_offset, zno = blk_rq_zone_no(rq);
424 if (!blk_rq_zone_is_seq(rq))
428 if (!blk_req_zone_write_trylock(rq))
463 blk_req_zone_write_unlock(rq);
480 struct request *rq local
507 sd_zbc_need_zone_wp_update(struct request *rq) argument
535 struct request *rq = scsi_cmd_to_rq(cmd); local
610 struct request *rq = scsi_cmd_to_rq(cmd); local
[all...]
H A Dhpsa.h491 struct reply_queue_buffer *rq = &h->reply_queue[q]; local
507 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
508 register_value = rq->head[rq->current_entry];
509 rq->current_entry++;
515 if (rq->current_entry == h->max_commands) {
516 rq->current_entry = 0;
517 rq
593 struct reply_queue_buffer *rq = &h->reply_queue[q]; local
[all...]
/linux-master/drivers/nvme/target/
H A Dpassthru.c217 struct request *rq = req->p.rq; local
218 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
219 struct nvme_ns *ns = rq->q->queuedata;
224 status = nvme_execute_rq(rq, false);
241 req->cqe->result = nvme_req(rq)->result;
243 blk_mq_free_request(rq);
249 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, argument
252 struct nvmet_req *req = rq->end_io_data;
254 req->cqe->result = nvme_req(rq)
260 nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) argument
297 struct request *rq = NULL; local
[all...]
/linux-master/include/trace/events/
H A Dnbd.h64 struct request *rq),
66 TP_ARGS(nbd_request, index, rq),
77 __entry->request = rq;
97 struct request *rq),
99 TP_ARGS(nbd_request, index, rq),
/linux-master/drivers/gpu/drm/scheduler/
H A Dsched_entity.c70 entity->rq = NULL;
100 entity->rq = sched_list[0]->sched_rq[entity->priority];
242 if (!entity->rq)
247 drm_sched_rq_remove_entity(entity->rq, entity);
287 if (!entity->rq)
290 sched = entity->rq->sched;
383 drm_sched_wakeup(entity->rq->sched, entity);
409 struct drm_gpu_scheduler *sched = entity->rq->sched;
532 struct drm_sched_rq *rq; local
558 rq
[all...]
/linux-master/drivers/scsi/elx/efct/
H A Defct_hw_queues.c314 struct hw_rq *rq = NULL; local
324 * encapsulates 2 SLI queues (for rq pair)
327 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
328 if (!rq)
331 rqs[i] = rq;
332 rq->instance = hw->hw_rq_count++;
333 rq->cq = cqs[i];
334 rq->type = SLI4_QTYPE_RQ;
335 rq
455 efct_hw_del_rq(struct hw_rq *rq) argument
494 struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; local
531 struct hw_rq *rq; local
607 struct hw_rq *rq = hw->hw_rq[hw_rq_index]; local
[all...]

Completed in 267 milliseconds

1234567891011>>