/linux-master/drivers/net/ |
H A D | veth.c | 77 struct veth_rq *rq; member in struct:veth_priv 188 if (!priv->rq[i].page_pool) 190 page_pool_get_stats(priv->rq[i].page_pool, &pp_stats); 206 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; 227 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats; 296 static void __veth_xdp_flush(struct veth_rq *rq) argument 300 if (!READ_ONCE(rq->rx_notify_masked) && 301 napi_schedule_prep(&rq->xdp_napi)) { 302 WRITE_ONCE(rq->rx_notify_masked, true); 303 __napi_schedule(&rq 307 veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) argument 317 veth_forward_skb(struct net_device *dev, struct sk_buff *skb, struct veth_rq *rq, bool xdp) argument 347 struct veth_rq *rq = NULL; local 482 struct veth_rq *rq; local 546 veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) argument 570 veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) argument 593 veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, struct veth_xdp_tx_bq *bq) argument 609 veth_xdp_rcv_one(struct veth_rq *rq, struct xdp_frame *frame, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument 680 veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, int n_xdpf, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument 723 veth_convert_skb_to_xdp_buff(struct veth_rq *rq, struct xdp_buff *xdp, struct sk_buff **pskb) argument 762 veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument 873 veth_xdp_rcv(struct veth_rq *rq, int budget, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument 933 struct veth_rq *rq = local 965 veth_create_page_pool(struct veth_rq *rq) argument 997 struct veth_rq *rq = &priv->rq[i]; local 1005 struct veth_rq *rq = &priv->rq[i]; local 1037 struct veth_rq *rq = &priv->rq[i]; local 1046 struct veth_rq *rq = &priv->rq[i]; local 1075 struct veth_rq *rq = &priv->rq[i]; local 1098 struct veth_rq *rq = &priv->rq[i]; local 1115 struct veth_rq *rq = &priv->rq[i]; local 1173 struct veth_rq *rq = &priv->rq[i]; local 1181 struct veth_rq *rq = &priv->rq[i]; local [all...] |
/linux-master/drivers/infiniband/sw/rdmavt/ |
H A D | srq.c | 54 srq->rq.size = srq_init_attr->attr.max_wr + 1; 55 srq->rq.max_sge = srq_init_attr->attr.max_sge; 56 sz = sizeof(struct ib_sge) * srq->rq.max_sge + 58 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz, 69 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; 71 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); 86 spin_lock_init(&srq->rq.lock); 110 rvt_free_rq(&srq->rq); 145 srq->rq [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_execlists_submission.c | 213 struct i915_request *rq, 216 struct i915_request *active = rq; 218 list_for_each_entry_from_reverse(rq, &tl->requests, link) { 219 if (__i915_request_is_complete(rq)) 223 i915_request_set_error_once(rq, error); 224 __i915_request_skip(rq); 226 active = rq; 233 active_request(const struct intel_timeline * const tl, struct i915_request *rq) argument 235 return __active_request(tl, rq, 0); 256 static int rq_prio(const struct i915_request *rq) argument 212 __active_request(const struct intel_timeline * const tl, struct i915_request *rq, int error) argument 261 effective_prio(const struct i915_request *rq) argument 297 need_preempt(const struct intel_engine_cs *engine, const struct i915_request *rq) argument 369 struct i915_request *rq, *rn, *active = NULL; local 418 execlists_context_status_change(struct i915_request *rq, unsigned long status) argument 431 reset_active(struct i915_request *rq, struct intel_engine_cs *engine) argument 469 bad_request(const struct i915_request *rq) argument 475 __execlists_schedule_in(struct i915_request *rq) argument 530 execlists_schedule_in(struct i915_request *rq, int idx) argument 547 resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve) argument 560 kick_siblings(struct i915_request *rq, struct intel_context *ce) argument 589 __execlists_schedule_out(struct i915_request * const rq, struct intel_context * const ce) argument 651 execlists_schedule_out(struct i915_request *rq) argument 675 execlists_update_context(struct i915_request *rq) argument 738 dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) argument 784 struct i915_request * const *port, *rq, *prev = NULL; local 932 struct i915_request *rq = execlists->pending[n]; local 962 i915_request_flags(const struct i915_request *rq) argument 996 virtual_matches(const struct virtual_engine *ve, const struct i915_request *rq, const struct intel_engine_cs *engine) argument 1033 struct i915_request *rq = READ_ONCE(ve->request); local 1075 defer_request(struct i915_request *rq, struct list_head * const pl) argument 1125 struct i915_request *rq; local 1136 timeslice_yield(const struct intel_engine_execlists *el, const struct i915_request *rq) argument 1154 needs_timeslice(const struct intel_engine_cs *engine, const struct i915_request *rq) argument 1190 timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq) argument 1239 active_preempt_timeout(struct intel_engine_cs *engine, const struct i915_request *rq) argument 1255 set_preempt_timeout(struct intel_engine_cs *engine, const struct i915_request *rq) argument 1265 completed(const struct i915_request *rq) argument 1414 struct i915_request *rq; local 1493 struct i915_request *rq, *rn; local 1968 struct i915_request *rq = *execlists->active; local 2056 __execlists_hold(struct i915_request *rq) argument 2099 execlists_hold(struct intel_engine_cs *engine, struct i915_request *rq) argument 2128 hold_request(const struct i915_request *rq) argument 2154 __execlists_unhold(struct i915_request *rq) argument 2197 execlists_unhold(struct intel_engine_cs *engine, struct i915_request *rq) argument 2218 struct i915_request *rq; member in struct:execlists_capture 2293 struct i915_request * const *port, *rq; local 2443 const struct i915_request *rq = *engine->execlists.active; local 2550 queue_request(struct intel_engine_cs *engine, struct i915_request *rq) argument 2560 submit_queue(struct intel_engine_cs *engine, const struct i915_request *rq) argument 2572 ancestor_on_hold(const struct intel_engine_cs *engine, const struct i915_request *rq) argument 2642 execlists_context_cancel_request(struct intel_context *ce, struct i915_request *rq) argument 2714 emit_pdps(struct i915_request *rq) argument 3030 struct i915_request *rq; local 3151 struct i915_request *rq, *rn; local 3280 add_to_engine(struct i915_request *rq) argument 3286 remove_from_engine(struct i915_request *rq) argument 3325 kick_execlists(const struct i915_request *rq, int prio) argument 3791 struct i915_request *rq; local 3898 virtual_submit_request(struct i915_request *rq) argument 4080 intel_execlists_show_requests(struct intel_engine_cs *engine, struct drm_printer *m, void (*show_request)(struct drm_printer *m, const struct i915_request *rq, const char *prefix, int indent), unsigned int max) argument 4090 struct i915_request *rq, *last; local 4144 struct i915_request *rq = READ_ONCE(ve->request); local [all...] |
H A D | intel_migrate.c | 334 static int emit_no_arbitration(struct i915_request *rq) argument 338 cs = intel_ring_begin(rq, 2); 345 intel_ring_advance(rq, cs); 350 static int max_pte_pkt_size(struct i915_request *rq, int pkt) argument 352 struct intel_ring *ring = rq->ring; 354 pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5); 362 static int emit_pte(struct i915_request *rq, argument 369 bool has_64K_pages = HAS_64K_PAGES(rq->i915); 370 const u64 encode = rq->context->vm->pte_encode(0, pat_index, 372 struct intel_ring *ring = rq 530 emit_copy_ccs(struct i915_request *rq, u32 dst_offset, u8 dst_access, u32 src_offset, u8 src_access, int size) argument 581 emit_copy(struct i915_request *rq, u32 dst_offset, u32 src_offset, int size) argument 694 struct i915_request *rq; local 917 emit_clear(struct i915_request *rq, u32 offset, int size, u32 value, bool is_lmem) argument 994 struct i915_request *rq; local [all...] |
H A D | selftest_lrc.c | 38 static bool is_active(struct i915_request *rq) argument 40 if (i915_request_is_active(rq)) 43 if (i915_request_on_hold(rq)) 46 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) 53 struct i915_request *rq, 63 if (i915_request_completed(rq)) /* that was quick! */ 68 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) 83 struct i915_request *rq; local 86 rq 52 wait_for_submit(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) argument 110 struct i915_request *rq; local 406 struct i915_request *rq; local 534 struct i915_request *rq; local 569 struct i915_request *rq; local 620 struct i915_request *rq; local 742 struct i915_request *rq; local 800 struct i915_request *rq; local 1057 struct i915_request *rq; local 1210 struct i915_request *rq; local 1406 struct i915_request *rq; local 1560 struct i915_request *rq; local 1741 garbage_reset(struct intel_engine_cs *engine, struct i915_request *rq) argument 1763 struct i915_request *rq; local 1879 struct i915_request *rq; local [all...] |
H A D | intel_engine_pm.c | 101 struct i915_request *rq = to_request(fence); local 103 ewma__engine_latency_add(&rq->engine->latency, 104 ktime_us_delta(rq->fence.timestamp, 105 rq->duration.emitted)); 109 __queue_and_release_pm(struct i915_request *rq, argument 123 GEM_BUG_ON(rq->context->active_count != 1); 125 rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref); 143 __i915_request_queue_bh(rq); 154 struct i915_request *rq; local 212 rq [all...] |
H A D | selftest_migrate.c | 46 struct i915_request *rq; local 90 err = fn(migrate, &ww, src, dst, &rq); 96 if (rq) { 97 i915_request_wait(rq, 0, HZ); 98 i915_request_put(rq); 107 if (rq) { 108 if (i915_request_wait(rq, 0, HZ) < 0) { 112 i915_request_put(rq); 147 struct i915_request *rq; local 163 rq 265 struct i915_request *rq; local 553 struct i915_request *rq, *prev; local 854 struct i915_request *rq; local 933 struct i915_request *rq; local [all...] |
H A D | intel_gt_buffer_pool.h | 25 struct i915_request *rq) 30 return i915_active_add_request(&node->active, rq); 24 intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node, struct i915_request *rq) argument
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | setup.c | 56 mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq); 64 struct mlx5e_rq *rq) 70 rq->wq_type = params->rq_wq_type; 71 rq->pdev = c->pdev; 72 rq->netdev = c->netdev; 73 rq->priv = c->priv; 74 rq->tstamp = c->tstamp; 75 rq->clock = &mdev->clock; 76 rq->icosq = &c->icosq; 77 rq 60 mlx5e_init_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk, struct mlx5e_rq *rq) argument [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_dim.c | 48 struct mlx5e_rq *rq = dim->priv; local 52 mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); 97 int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) argument 99 if (enable == !!rq->dim) 103 struct mlx5e_channel *c = rq->channel; 106 dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu, 107 c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq); 111 rq [all...] |
/linux-master/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_qp.c | 62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) 155 struct hinic_rq *rq, u16 global_qid) 162 wq = rq->wq; 181 HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); 202 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); 203 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); 242 * alloc_rq_skb_arr - allocate rq array for saved skb 243 * @rq: HW Receive Queue 247 static int alloc_rq_skb_arr(struct hinic_rq *rq) argument 154 hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, struct hinic_rq *rq, u16 global_qid) argument 264 free_rq_skb_arr(struct hinic_rq *rq) argument 316 alloc_rq_cqe(struct hinic_rq *rq) argument 360 free_rq_cqe(struct hinic_rq *rq) argument 384 hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry) argument 435 hinic_clean_rq(struct hinic_rq *rq) argument 468 hinic_get_rq_free_wqebbs(struct hinic_rq *rq) argument 796 hinic_rq_get_wqe(struct hinic_rq *rq, unsigned int wqe_size, u16 *prod_idx) argument 815 hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) argument 837 hinic_rq_read_wqe(struct hinic_rq *rq, unsigned int wqe_size, struct sk_buff **skb, u16 *cons_idx) argument 872 hinic_rq_read_next_wqe(struct hinic_rq *rq, unsigned int wqe_size, struct sk_buff **skb, u16 *cons_idx) argument 899 hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, unsigned int wqe_size) argument 922 hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, u16 cons_idx, struct hinic_sge *sge) argument 940 hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) argument 968 hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) argument [all...] |
H A D | hinic_rx.c | 138 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); 142 addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, 170 dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, 204 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); 217 rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, 224 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); 226 hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); 233 hinic_rq_update(rxq->rq, prod_idx); 245 struct hinic_rq *rq = rxq->rq; local 359 struct hinic_rq *rq = rxq->rq; local 461 struct hinic_rq *rq = rxq->rq; local 496 struct hinic_rq *rq = rxq->rq; local 519 struct hinic_rq *rq = rxq->rq; local 565 struct hinic_rq *rq = rxq->rq; local 580 hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, struct net_device *netdev) argument [all...] |
/linux-master/block/ |
H A D | mq-deadline.c | 116 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) argument 118 return &per_prio->sort_list[rq_data_dir(rq)]; 125 static u8 dd_rq_ioclass(struct request *rq) argument 127 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 137 struct request *rq, *res = NULL; local 142 rq = rb_entry_rq(node); 144 rq = rb_entry_rq(node); 145 if (blk_rq_pos(rq) >= pos) { 146 res = rq; 156 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) argument 164 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) argument 166 elv_rb_del(deadline_rb_root(per_prio, rq), rq); local 172 deadline_remove_request(struct request_queue *q, struct dd_per_prio *per_prio, struct request *rq) argument 242 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, struct request *rq) argument 268 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); local 303 started_after(struct deadline_data *dd, struct request *rq, unsigned long latest_start) argument 321 struct request *rq, *next_rq; local 433 struct request *rq; local 466 struct request *rq; local 607 dd_request_merge(struct request_queue *q, struct request **rq, struct bio *bio) argument 659 dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_insert_t flags, struct list_head *free) argument 720 struct request *rq; local 732 dd_prepare_request(struct request *rq) argument 740 dd_finish_request(struct request *rq) argument [all...] |
H A D | blk-mq.h | 53 void blk_mq_put_rq_ref(struct request *rq); 258 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) argument 263 if (rq->q->mq_ops->set_rq_budget_token) 264 rq->q->mq_ops->set_rq_budget_token(rq, token); 267 static inline int blk_mq_get_rq_budget_token(struct request *rq) argument 269 if (rq->q->mq_ops->get_rq_budget_token) 270 return rq->q->mq_ops->get_rq_budget_token(rq); 335 struct request *rq) 334 __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) argument 342 blk_mq_put_driver_tag(struct request *rq) argument 352 blk_mq_get_driver_tag(struct request *rq) argument 372 struct request *rq = list_entry_rq(list->next); local [all...] |
H A D | blk-crypto.c | 193 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) argument 195 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); 228 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) argument 230 return blk_crypto_get_keyslot(rq->q->crypto_profile, 231 rq->crypt_ctx->bc_key, 232 &rq->crypt_keyslot); 235 void __blk_crypto_rq_put_keyslot(struct request *rq) argument 237 blk_crypto_put_keyslot(rq->crypt_keyslot); 238 rq->crypt_keyslot = NULL; 241 void __blk_crypto_free_request(struct request *rq) argument 303 __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) argument [all...] |
H A D | blk-merge.c | 18 #include "blk-rq-qos.h" 409 unsigned int blk_recalc_rq_segments(struct request *rq) argument 416 if (!rq->bio) 419 switch (bio_op(rq->bio)) { 422 if (queue_max_discard_segments(rq->q) > 1) { 423 struct bio *bio = rq->bio; 436 rq_for_each_bvec(bv, rq, iter) 437 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes, 562 * must make sure sg can hold rq->nr_phys_segments entries 564 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, argument 587 blk_rq_get_max_sectors(struct request *rq, sector_t offset) argument 730 blk_rq_set_mixed_merge(struct request *rq) argument 890 attempt_back_merge(struct request_queue *q, struct request *rq) argument 901 attempt_front_merge(struct request_queue *q, struct request *rq) argument 917 blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next) argument 923 blk_rq_merge_ok(struct request *rq, struct bio *bio) argument 957 blk_try_merge(struct request *rq, struct bio *bio) argument 1068 blk_attempt_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio, unsigned int nr_segs, bool sched_allow_merge) argument 1119 struct request *rq; local 1149 struct request *rq; local 1174 struct request *rq; local [all...] |
/linux-master/kernel/sched/ |
H A D | stats.c | 6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p, argument 11 wait_start = rq_clock(rq); 20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p, argument 23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start); 47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, argument 56 u64 delta = rq_clock(rq) - sleep_start; 74 u64 delta = rq_clock(rq) 126 struct rq *rq; local [all...] |
/linux-master/drivers/gpu/drm/i915/gem/selftests/ |
H A D | igt_gem_utils.c | 24 struct i915_request *rq; local 35 rq = intel_context_create_request(ce); 38 return rq; 115 struct i915_request *rq; local 127 rq = intel_context_create_request(ce); 128 if (IS_ERR(rq)) { 129 err = PTR_ERR(rq); 133 err = igt_vma_move_to_active_unlocked(batch, rq, 0); 137 err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); 145 err = rq [all...] |
H A D | igt_gem_utils.h | 35 igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq, argument 41 err = i915_vma_move_to_active(vma, rq, flags);
|
/linux-master/include/linux/ |
H A D | blktrace_api.h | 75 extern void blk_add_driver_data(struct request *rq, void *data, size_t len); 85 # define blk_add_driver_data(rq, data, len) do {} while (0) 115 static inline sector_t blk_rq_trace_sector(struct request *rq) argument 121 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) 123 return blk_rq_pos(rq); 126 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) argument 128 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
|
/linux-master/drivers/net/vmxnet3/ |
H A D | vmxnet3_xdp.c | 251 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, argument 259 rq->stats.xdp_packets++; 267 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); 269 rq->stats.xdp_redirects++; 271 rq->stats.xdp_drops++; 272 page_pool_recycle_direct(rq->page_pool, page); 278 vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) { 279 rq->stats.xdp_drops++; 280 page_pool_recycle_direct(rq->page_pool, page); 282 rq 303 vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page, const struct xdp_buff *xdp) argument 325 vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter, struct vmxnet3_rx_queue *rq, void *data, int len, struct sk_buff **skb_xdp_pass) argument 368 vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_buf_info *rbi, struct Vmxnet3_RxDesc *rxd, struct sk_buff **skb_xdp_pass) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_throttle.c | 64 struct i915_request *rq, *target = NULL; local 70 list_for_each_entry_reverse(rq, 73 if (i915_request_completed(rq)) 76 if (time_after(rq->emitted_jiffies, 80 target = i915_request_get(rq);
|
/linux-master/fs/erofs/ |
H A D | compress.h | 25 int (*decompress)(struct z_erofs_decompress_req *rq, 84 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, 95 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, 97 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, 99 int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
|
/linux-master/drivers/s390/char/ |
H A D | raw3270.c | 144 struct raw3270_request *rq; local 147 rq = kzalloc(sizeof(*rq), GFP_KERNEL | GFP_DMA); 148 if (!rq) 153 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); 154 if (!rq->buffer) { 155 kfree(rq); 159 rq->size = size; 160 INIT_LIST_HEAD(&rq->list); 165 if (rq 176 raw3270_request_free(struct raw3270_request *rq) argument 186 raw3270_request_reset(struct raw3270_request *rq) argument 204 raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd) argument 213 raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size) argument 226 raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size) argument 236 raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) argument 248 __raw3270_start(struct raw3270 *rp, struct raw3270_view *view, struct raw3270_request *rq) argument 274 raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) argument 293 raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq, int cmd, void *data, size_t len) argument 309 raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) argument 325 raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq) argument 344 struct raw3270_request *rq; local 565 raw3270_read_modified_cb(struct raw3270_request *rq, void *data) argument 612 raw3270_reset_device_cb(struct raw3270_request *rq, void *data) argument 679 struct raw3270_request *rq; local 698 raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, struct irb *irb) argument [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | txrx.h | 68 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); 69 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); 71 void mlx5e_free_rx_descs(struct mlx5e_rq *rq); 72 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq); 198 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 200 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); 214 struct mlx5e_rq *rq; member in struct:mlx5e_icosq_wqe_info::__anon1819::__anon1820 345 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) argument 347 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 348 mlx5_wq_ll_reset(&rq 371 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) argument 381 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) argument 391 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) argument 401 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) argument 503 mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i) argument [all...] |