Searched refs:rq (Results 126 - 150 of 636) sorted by relevance

1234567891011>>

/linux-master/drivers/net/netdevsim/
H A Dnetdev.c33 static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb) argument
35 if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
40 skb_queue_tail(&rq->skb_queue, skb);
45 struct nsim_rq *rq)
47 return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
56 struct nsim_rq *rq; local
71 rq = &peer_ns->rq[rxq];
74 if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
77 napi_schedule(&rq
44 nsim_forward_skb(struct net_device *dev, struct sk_buff *skb, struct nsim_rq *rq) argument
333 nsim_rcv(struct nsim_rq *rq, int budget) argument
351 struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi); local
360 nsim_create_page_pool(struct nsim_rq *rq) argument
385 struct nsim_rq *rq; local
422 struct nsim_rq *rq = &ns->rq[i]; local
449 struct nsim_rq *rq = &ns->rq[i]; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_main.c288 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, argument
297 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
298 rq->mpwqe.umr_mode),
303 cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
306 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
311 static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node) argument
313 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq
320 mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq) argument
325 mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) argument
350 mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) argument
357 mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) argument
540 mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) argument
565 mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) argument
579 mlx5e_init_frags_partition(struct mlx5e_rq *rq) argument
618 mlx5e_init_xsk_buffs(struct mlx5e_rq *rq) argument
640 mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node) argument
674 mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq) argument
682 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); local
687 mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq) argument
702 mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) argument
709 mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, u32 xdp_frag_size, struct mlx5e_rq *rq) argument
738 mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, struct mlx5e_rq *rq, u32 *pool_size, int node) argument
786 mlx5e_rq_free_shampo(struct mlx5e_rq *rq) argument
797 mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_rq_param *rqp, int node, struct mlx5e_rq *rq) argument
992 mlx5e_free_rq(struct mlx5e_rq *rq) argument
1020 mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter) argument
1068 mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) argument
1097 mlx5e_flush_rq_cq(struct mlx5e_rq *rq) argument
1113 mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) argument
1136 mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) argument
1164 mlx5e_destroy_rq(struct mlx5e_rq *rq) argument
1169 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) argument
1189 mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq) argument
1225 mlx5e_free_rx_descs(struct mlx5e_rq *rq) argument
1270 mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, int node, u16 q_counter, struct mlx5e_rq *rq) argument
1342 mlx5e_activate_rq(struct mlx5e_rq *rq) argument
1347 mlx5e_deactivate_rq(struct mlx5e_rq *rq) argument
1353 mlx5e_close_rq(struct mlx5e_rq *rq) argument
3410 mlx5e_free_drop_rq(struct mlx5e_rq *rq) argument
3415 mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq, struct mlx5e_rq_param *param) argument
4975 mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog) argument
[all...]
H A Den_txrx.c61 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) argument
63 struct mlx5e_rq_stats *stats = rq->stats;
66 if (unlikely(!test_bit(MLX5E_RQ_STATE_DIM, &rq->state)))
69 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
70 net_dim(rq->dim, dim_sample);
132 struct mlx5e_rq *rq = &c->rq; local
177 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
192 busy |= INDIRECT_CALL_2(rq->post_wqes,
195 rq);
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_rx.h33 struct hinic_rq *rq; member in struct:hinic_rxq
46 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_xdp.h30 struct vmxnet3_rx_queue *rq,
36 struct vmxnet3_rx_queue *rq,
H A Dvmxnet3_drv.c229 "%s: rq[%d] error 0x%x\n",
613 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, argument
617 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
618 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
630 void *data = vmxnet3_pp_get_buff(rq->page_pool,
634 rq->stats.rx_buf_alloc_failure++;
645 rq->stats.rx_buf_alloc_failure++;
657 rq->stats.rx_buf_alloc_failure++;
671 rq->stats.rx_buf_alloc_failure++;
682 rq
1304 vmxnet3_create_pp(struct vmxnet3_adapter *adapter, struct vmxnet3_rx_queue *rq, int size) argument
1421 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) argument
1503 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota) argument
1910 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
1972 vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
2028 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
2043 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
2127 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
2258 struct vmxnet3_rx_queue *rq = container_of(napi, local
2322 struct vmxnet3_rx_queue *rq = data; local
2513 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
2823 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
3276 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
3317 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_timeline.h73 struct i915_request *rq,
90 const struct i915_request *rq,
96 const struct i915_request *rq)
98 return list_is_last_rcu(&rq->link, &tl->requests);
95 intel_timeline_is_last(const struct intel_timeline *tl, const struct i915_request *rq) argument
H A Dselftest_workarounds.c37 static int request_add_sync(struct i915_request *rq, int err) argument
39 i915_request_get(rq);
40 i915_request_add(rq);
41 if (i915_request_wait(rq, 0, HZ / 5) < 0)
43 i915_request_put(rq);
48 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) argument
52 i915_request_get(rq);
53 i915_request_add(rq);
54 if (spin && !igt_wait_for_spinner(spin, rq))
56 i915_request_put(rq);
104 struct i915_request *rq; local
269 switch_to_scratch_context(struct intel_engine_cs *engine, struct igt_spinner *spin, struct i915_request **rq) argument
303 struct i915_request *rq; local
523 struct i915_request *rq; local
846 struct i915_request *rq; local
889 struct i915_request *rq; local
1243 struct i915_request *rq; local
[all...]
/linux-master/drivers/gpu/drm/i915/selftests/
H A Dintel_scheduler_helpers.h33 int intel_selftest_wait_for_rq(struct i915_request *rq);
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_context.c74 struct i915_request *rq = NULL; local
87 if (rq) {
88 i915_request_await_dma_fence(this, &rq->fence);
89 i915_request_put(rq);
91 rq = i915_request_get(this);
94 if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
97 i915_request_put(rq);
101 i915_request_put(rq);
116 rq = NULL;
126 if (rq) { /* Forc
200 struct i915_request *rq = NULL; local
243 struct i915_request *rq = NULL; local
945 struct i915_request *rq; local
1049 struct i915_request *rq; local
1095 struct i915_request *rq = NULL; local
1515 struct i915_request *rq; local
1604 struct i915_request *rq; local
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/
H A Dgk104.c28 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) argument
33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
H A Dg94.c28 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) argument
33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
/linux-master/drivers/gpu/drm/i915/
H A Di915_scheduler_types.h179 void (*kick_backend)(const struct i915_request *rq,
185 void (*bump_inflight_request_prio)(struct i915_request *rq,
192 void (*retire_inflight_request_prio)(struct i915_request *rq);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/rep/
H A Dtc.h39 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
68 mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, argument
69 struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
/linux-master/drivers/s390/block/
H A Dscm_blk.h18 struct request_queue *rq; member in struct:scm_blk_dev
38 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
/linux-master/block/
H A Dblk-mq-sched.c60 struct request *rq; local
64 list_for_each_entry(rq, rq_list, queuelist) {
65 if (rq->mq_hctx != hctx) {
66 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
103 struct request *rq; local
118 rq = e->type->ops.dispatch_request(hctx);
119 if (!rq) {
132 blk_mq_set_rq_budget_token(rq, budget_token);
135 * Now this rq owns the budget which has to be released
136 * if this rq wo
221 struct request *rq; local
373 blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free) argument
[all...]
H A Dkyber-iosched.c530 static int rq_get_domain_token(struct request *rq) argument
532 return (long)rq->elv.priv[0];
535 static void rq_set_domain_token(struct request *rq, int token) argument
537 rq->elv.priv[0] = (void *)(long)token;
541 struct request *rq)
546 nr = rq_get_domain_token(rq);
548 sched_domain = kyber_sched_domain(rq->cmd_flags);
550 rq->mq_ctx->cpu);
585 static void kyber_prepare_request(struct request *rq) argument
587 rq_set_domain_token(rq,
540 rq_clear_domain_token(struct kyber_queue_data *kqd, struct request *rq) argument
595 struct request *rq, *next; local
614 kyber_finish_request(struct request *rq) argument
639 kyber_completed_request(struct request *rq, u64 now) argument
759 struct request *rq; local
807 struct request *rq; local
[all...]
H A Dbfq-iosched.c231 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
232 (get_sdist(last_pos, rq) > \
235 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
376 #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
377 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
999 struct request *rq; local
1006 rq = rq_entry_fifo(bfqq->fifo.next);
1008 if (rq
1043 bfq_serv_to_charge(struct request *rq, struct bfq_queue *bfqq) argument
1818 bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, struct bfq_queue *bfqq, int old_wr_coeff, struct request *rq, bool *interactive) argument
2204 bfq_add_request(struct request *rq) argument
2372 get_sdist(sector_t last_pos, struct request *rq) argument
2380 bfq_remove_request(struct request_queue *q, struct request *rq) argument
2560 bfq_requests_merged(struct request_queue *q, struct request *rq, struct request *next) argument
3227 bfq_allow_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio) argument
3432 bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) argument
3450 bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) argument
3591 bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) argument
3653 bfq_dispatch_remove(struct request_queue *q, struct request *rq) argument
5106 struct request *rq = bfqq->next_rq; local
5162 struct request *rq = NULL; local
5249 bfq_update_dispatch_stats(struct request_queue *q, struct request *rq, struct bfq_queue *in_serv_queue, bool idle_timer_disabled) argument
5294 bfq_update_dispatch_stats(struct request_queue *q, struct request *rq, struct bfq_queue *in_serv_queue, bool idle_timer_disabled) argument
5303 struct request *rq; local
5916 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct request *rq) argument
6078 bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct request *rq) argument
6151 __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) argument
6239 bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_insert_t flags) argument
6300 struct request *rq; local
6648 bfq_finish_requeue_request(struct request *rq) argument
6703 bfq_finish_request(struct request *rq) argument
6810 bfq_prepare_request(struct request *rq) argument
6845 bfq_init_rq(struct request *rq) argument
[all...]
H A Dblk-mq-tag.c255 struct request *rq; local
259 rq = tags->rqs[bitnr];
260 if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
261 rq = NULL;
263 return rq;
273 struct request *rq; local
284 * We can hit rq == NULL here, because the tagging functions
287 rq
342 struct request *rq; local
452 blk_mq_tagset_count_completed_rqs(struct request *rq, void *data) argument
678 blk_mq_unique_tag(struct request *rq) argument
[all...]
H A Dt10-pi.c134 * @rq: request with PI that should be prepared
142 static void t10_pi_type1_prepare(struct request *rq) argument
144 struct blk_integrity *bi = &rq->q->integrity;
146 u32 ref_tag = t10_pi_ref_tag(rq);
150 __rq_for_each_bio(bio, rq) {
183 * @rq: request with PI that should be prepared
193 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) argument
195 struct blk_integrity *bi = &rq->q->integrity;
198 u32 ref_tag = t10_pi_ref_tag(rq);
202 __rq_for_each_bio(bio, rq) {
249 t10_pi_type3_prepare(struct request *rq) argument
254 t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes) argument
394 ext_pi_type1_prepare(struct request *rq) argument
434 ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) argument
[all...]
/linux-master/drivers/scsi/esas2r/
H A Desas2r.h406 struct esas2r_request *rq);
966 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1004 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1009 struct esas2r_request *rq);
1015 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1022 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1036 struct esas2r_request *rq,
1042 struct esas2r_request *rq,
1048 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1050 struct esas2r_request *rq,
1167 esas2r_sgc_init(struct esas2r_sg_context *sgc, struct esas2r_adapter *a, struct esas2r_request *rq, struct atto_vda_sge *first) argument
1200 esas2r_rq_init_request(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1264 esas2r_rq_free_sg_lists(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1277 esas2r_rq_destroy_request(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1303 esas2r_build_sg_list(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
1393 esas2r_start_ae_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1408 struct esas2r_request *rq; local
[all...]
/linux-master/drivers/nvme/target/
H A Dpassthru.c217 struct request *rq = req->p.rq; local
218 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
219 struct nvme_ns *ns = rq->q->queuedata;
224 status = nvme_execute_rq(rq, false);
241 req->cqe->result = nvme_req(rq)->result;
243 blk_mq_free_request(rq);
249 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, argument
252 struct nvmet_req *req = rq->end_io_data;
254 req->cqe->result = nvme_req(rq)
260 nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) argument
297 struct request *rq = NULL; local
[all...]
/linux-master/include/trace/events/
H A Dnbd.h64 struct request *rq),
66 TP_ARGS(nbd_request, index, rq),
77 __entry->request = rq;
97 struct request *rq),
99 TP_ARGS(nbd_request, index, rq),
/linux-master/drivers/scsi/
H A Dhpsa.h491 struct reply_queue_buffer *rq = &h->reply_queue[q]; local
507 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
508 register_value = rq->head[rq->current_entry];
509 rq->current_entry++;
515 if (rq->current_entry == h->max_commands) {
516 rq->current_entry = 0;
517 rq
593 struct reply_queue_buffer *rq = &h->reply_queue[q]; local
[all...]
/linux-master/drivers/gpu/drm/scheduler/
H A Dsched_entity.c70 entity->rq = NULL;
100 entity->rq = sched_list[0]->sched_rq[entity->priority];
242 if (!entity->rq)
247 drm_sched_rq_remove_entity(entity->rq, entity);
287 if (!entity->rq)
290 sched = entity->rq->sched;
383 drm_sched_wakeup(entity->rq->sched, entity);
409 struct drm_gpu_scheduler *sched = entity->rq->sched;
532 struct drm_sched_rq *rq; local
558 rq
[all...]

Completed in 522 milliseconds

1234567891011>>