/linux-master/drivers/staging/rtl8712/ |
H A D | osdep_intf.h | 30 int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
/linux-master/drivers/gpu/drm/i915/display/ |
H A D | intel_display_rps.c | 25 struct i915_request *rq = wait->request; local 32 if (!i915_request_started(rq)) 33 intel_rps_boost(rq); 34 i915_request_put(rq);
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_ring_submission.c | 354 struct i915_request *pos, *rq; local 358 rq = NULL; 363 rq = pos; 391 if (rq) { 407 __i915_request_reset(rq, stalled); 409 GEM_BUG_ON(rq->ring != engine->legacy.ring); 410 head = rq->head; 604 struct i915_request *rq, 609 if (!rq || !i915_request_is_active(rq)) 603 ring_context_revoke(struct intel_context *ce, struct i915_request *rq, unsigned int preempt_timeout_ms) argument 622 ring_context_cancel_request(struct intel_context *ce, struct i915_request *rq) argument 654 load_pd_dir(struct i915_request *rq, struct i915_address_space *vm, u32 valid) argument 688 mi_set_context(struct i915_request *rq, struct intel_context *ce, u32 flags) argument 805 remap_l3_slice(struct i915_request *rq, int slice) argument 835 remap_l3(struct i915_request *rq) argument 856 switch_mm(struct i915_request *rq, struct i915_address_space *vm) argument 882 clear_residuals(struct i915_request *rq) argument 913 switch_context(struct i915_request *rq) argument 1100 add_to_engine(struct i915_request *rq) argument 1106 remove_from_engine(struct i915_request *rq) argument [all...] |
H A D | selftest_engine_pm.c | 79 struct i915_request *rq; local 82 rq = intel_context_create_request(ce); 83 if (IS_ERR(rq)) 84 return PTR_ERR(rq); 86 cs = intel_ring_begin(rq, 28); 88 i915_request_add(rq); 105 intel_ring_advance(rq, cs); 106 i915_request_get(rq); 107 i915_request_add(rq); 125 if (i915_request_wait(rq, 261 struct i915_request *rq; local [all...] |
H A D | intel_timeline.h | 73 struct i915_request *rq, 90 const struct i915_request *rq, 96 const struct i915_request *rq) 98 return list_is_last_rcu(&rq->link, &tl->requests); 95 intel_timeline_is_last(const struct intel_timeline *tl, const struct i915_request *rq) argument
|
/linux-master/block/ |
H A D | blk-rq-qos.h | 104 void __rq_qos_done(struct rq_qos *rqos, struct request *rq); 105 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq); 106 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq); 108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 119 static inline void rq_qos_done(struct request_queue *q, struct request *rq) argument 121 if (q->rq_qos && !blk_rq_is_passthrough(rq)) 122 __rq_qos_done(q->rq_qos, rq); 125 static inline void rq_qos_issue(struct request_queue *q, struct request *rq) argument 128 __rq_qos_issue(q->rq_qos, rq); 131 rq_qos_requeue(struct request_queue *q, struct request *rq) argument 155 rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) argument 162 rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) argument [all...] |
H A D | blk-map.c | 131 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, argument 160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); 192 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { 229 ret = blk_rq_append_bio(rq, bio); 253 static struct bio *blk_rq_map_bio_alloc(struct request *rq, argument 258 if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) { 259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, 267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); 272 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, argument 276 unsigned int max_sectors = queue_max_hw_sectors(rq 537 blk_rq_append_bio(struct request *rq, struct bio *bio) argument 562 blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) argument 632 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) argument 687 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) argument 782 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) argument [all...] |
H A D | bsg-lib.c | 32 struct request *rq; local 43 rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ? 45 if (IS_ERR(rq)) 46 return PTR_ERR(rq); 47 rq->timeout = timeout; 49 job = blk_mq_rq_to_pdu(rq); 64 job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0); 70 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, 84 ret = blk_rq_map_user(rq->q, rq, NUL 159 struct request *rq = blk_mq_rq_from_pdu(job); local 192 struct request *rq = blk_mq_rq_from_pdu(job); local 205 bsg_complete(struct request *rq) argument 335 bsg_timeout(struct request *rq) argument [all...] |
/linux-master/fs/erofs/ |
H A D | compress.h | 25 int (*decompress)(struct z_erofs_decompress_req *rq, 84 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, 93 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, 95 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
/linux-master/include/linux/ |
H A D | blk-integrity.h | 103 static inline bool blk_integrity_rq(struct request *rq) argument 105 return rq->cmd_flags & REQ_INTEGRITY; 112 static inline struct bio_vec *rq_integrity_vec(struct request *rq) argument 114 if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) 116 return rq->bio->bi_integrity->bip_vec; 175 static inline int blk_integrity_rq(struct request *rq) argument 180 static inline struct bio_vec *rq_integrity_vec(struct request *rq) argument
|
/linux-master/drivers/net/ethernet/intel/ice/ |
H A D | ice_controlq.c | 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 126 cq->rq [all...] |
/linux-master/drivers/usb/misc/ |
H A D | uss720.c | 80 struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count); local 81 struct parport_uss720_private *priv = rq->priv; 84 if (likely(rq->urb)) 85 usb_free_urb(rq->urb); 86 kfree(rq->dr); 88 list_del_init(&rq->asynclist); 90 kfree(rq); 98 struct uss720_async_request *rq; local 103 rq = urb->context; 104 priv = rq 128 struct uss720_async_request *rq; local 178 struct uss720_async_request *rq; local 196 struct uss720_async_request *rq; local 232 struct uss720_async_request *rq; local [all...] |
/linux-master/drivers/gpu/drm/i915/selftests/ |
H A D | i915_request.c | 367 struct i915_request *rq; local 372 rq = t->request_alloc(ce); 374 if (IS_ERR(rq)) { 375 err = PTR_ERR(rq); 380 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 384 requests[n] = i915_request_get(rq); 385 i915_request_add(rq); 389 &rq->fence, 394 i915_request_put(rq); 406 struct i915_request *rq local 421 struct i915_request *rq = requests[n]; local 647 struct i915_request *rq; local 700 struct i915_request *rq; local 761 struct i915_request *rq; local 822 struct i915_request *rq, *nop; local 1008 emit_bb_start(struct i915_request *rq, struct i915_vma *batch) argument 1275 struct i915_request *rq = request[idx]; local 1300 struct i915_request *rq = request[idx]; local 1476 struct i915_request *rq; local 1514 struct i915_request *rq; local 1560 struct i915_request *rq; local 1690 struct i915_request *rq; local 1891 struct i915_request *rq; local 2020 struct i915_request *rq; local 2111 struct i915_request *rq; local 2183 struct i915_request *rq; local 2243 struct i915_request *rq; local 2298 struct i915_request *rq; local 2388 struct i915_request *rq; local 2479 struct i915_request *rq; local 2591 struct i915_request *rq; local 2739 struct i915_request *rq; local 2773 struct i915_request *rq; local 2807 struct i915_request *rq; local 2998 struct i915_request *rq; local 3073 struct i915_request *rq; local 3149 struct i915_request *rq; local [all...] |
/linux-master/kernel/sched/ |
H A D | fair.c | 318 struct rq *rq = rq_of(cfs_rq); local 319 int cpu = cpu_of(rq); 322 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; 350 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 356 * cfs rq without parent should be put 360 &rq->leaf_cfs_rq_list); 365 rq 387 struct rq *rq = rq_of(cfs_rq); local 404 assert_list_leaf_cfs_rq(struct rq *rq) argument 492 assert_list_leaf_cfs_rq(struct rq *rq) argument 1112 update_curr_se(struct rq *rq, struct sched_entity *curr) argument 1147 update_curr_common(struct rq *rq) argument 1184 update_curr_fair(struct rq *rq) argument 1499 account_numa_enqueue(struct rq *rq, struct task_struct *p) argument 1505 account_numa_dequeue(struct rq *rq, struct task_struct *p) argument 2052 struct rq *rq = cpu_rq(cpu); local 2084 struct rq *rq = cpu_rq(env->dst_cpu); local 3498 task_tick_numa(struct rq *rq, struct task_struct *curr) argument 3563 task_tick_numa(struct rq *rq, struct task_struct *curr) argument 3567 account_numa_enqueue(struct rq *rq, struct task_struct *p) argument 3571 account_numa_dequeue(struct rq *rq, struct task_struct *p) argument 3587 struct rq *rq = rq_of(cfs_rq); local 3999 struct rq *rq = rq_of(cfs_rq); local 4066 struct rq *rq = rq_of(cfs_rq); local 4154 clear_tg_offline_cfs_rqs(struct rq *rq) argument 4474 clear_tg_offline_cfs_rqs(struct rq *rq) argument 4490 struct rq *rq; local 5105 update_misfit_status(struct task_struct *p, struct rq *rq) argument 5151 newidle_balance(struct rq *rq, struct rq_flags *rf) argument 5165 update_misfit_status(struct task_struct *p, struct rq *rq) argument 5332 struct rq *rq = rq_of(cfs_rq); local 5710 struct rq *rq = data; local 5739 struct rq *rq = data; local 5758 struct rq *rq = rq_of(cfs_rq); local 5847 struct rq *rq = rq_of(cfs_rq); local 5935 struct rq *rq = arg; local 5973 struct rq *rq = rq_of(cfs_rq); local 6015 struct rq *rq; local 6072 struct rq *rq = rq_of(cfs_rq); local 6453 struct rq *rq = cpu_rq(i); local 6474 update_runtime_enabled(struct rq *rq) argument 6493 unthrottle_offline_cfs_rqs(struct rq *rq) argument 6548 sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) argument 6611 update_runtime_enabled(struct rq *rq) argument 6612 unthrottle_offline_cfs_rqs(struct rq *rq) argument 6622 sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) argument 6630 hrtick_start_fair(struct rq *rq, struct task_struct *p) argument 6655 hrtick_update(struct rq *rq) argument 6666 hrtick_start_fair(struct rq *rq, struct task_struct *p) argument 6670 hrtick_update(struct rq *rq) argument 6685 update_overutilized_status(struct rq *rq) argument 6693 update_overutilized_status(struct rq *rq) argument 6697 sched_idle_rq(struct rq *rq) argument 6716 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) argument 6809 dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) argument 6899 cpu_load(struct rq *rq) argument 6917 cpu_load_without(struct rq *rq, struct task_struct *p) argument 6935 cpu_runnable(struct rq *rq) argument 6940 cpu_runnable_without(struct rq *rq, struct task_struct *p) argument 7134 struct rq *rq = cpu_rq(i); local 7268 __update_idle_core(struct rq *rq) argument 8019 struct rq *rq = cpu_rq(cpu); local 8263 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 8286 check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int wake_flags) argument 8365 pick_task_fair(struct rq *rq) argument 8398 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 8542 __pick_next_task_fair(struct rq *rq) argument 8550 put_prev_task_fair(struct rq *rq, struct task_struct *prev) argument 8564 yield_task_fair(struct rq *rq) argument 8593 yield_to_task_fair(struct rq *rq, struct task_struct *p) argument 9198 attach_task(struct rq *rq, struct task_struct *p) argument 9211 attach_one_task(struct rq *rq, struct task_struct *p) argument 9256 others_have_blocked(struct rq *rq) argument 9273 update_blocked_load_tick(struct rq *rq) argument 9278 update_blocked_load_status(struct rq *rq, bool has_blocked) argument 9285 others_have_blocked(struct rq *rq) argument 9286 update_blocked_load_tick(struct rq *rq) argument 9287 update_blocked_load_status(struct rq *rq, bool has_blocked) argument 9290 __update_blocked_others(struct rq *rq, bool *done) argument 9318 __update_blocked_fair(struct rq *rq, bool *done) argument 9368 struct rq *rq = rq_of(cfs_rq); local 9408 __update_blocked_fair(struct rq *rq, bool *done) argument 9429 struct rq *rq = cpu_rq(cpu); local 9509 struct rq *rq = cpu_rq(cpu); local 9614 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) argument 9625 check_misfit_status(struct rq *rq, struct sched_domain *sd) argument 9870 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) argument 9903 struct rq *rq = cpu_rq(i); local 10132 fbq_classify_rq(struct rq *rq) argument 10146 fbq_classify_rq(struct rq *rq) argument 10180 struct rq *rq = cpu_rq(cpu); local 10218 struct rq *rq = cpu_rq(i); local 10981 struct rq *busiest = NULL, *rq; local 11689 rebalance_domains(struct rq *rq, enum cpu_idle_type idle) argument 11771 on_null_domain(struct rq *rq) argument 11847 nohz_balancer_kick(struct rq *rq) argument 11979 nohz_balance_exit_idle(struct rq *rq) argument 12015 struct rq *rq = cpu_rq(cpu); local 12070 update_nohz_stats(struct rq *rq) argument 12102 struct rq *rq; local 12270 nohz_balancer_kick(struct rq *rq) argument 12440 trigger_load_balance(struct rq *rq) argument 12455 rq_online_fair(struct rq *rq) argument 12462 rq_offline_fair(struct rq *rq) argument 12486 task_tick_core(struct rq *rq, struct task_struct *curr) argument 12529 task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) argument 12542 struct rq *rq = task_rq(a); local 12599 task_tick_core(struct rq *rq, struct task_struct *curr) argument 12610 task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) argument 12638 struct rq *rq = this_rq(); local 12657 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) argument 12758 switched_from_fair(struct rq *rq, struct task_struct *p) argument 12763 switched_to_fair(struct rq *rq, struct task_struct *p) argument 12785 set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) argument 12897 struct rq *rq; local 12914 struct rq *rq; local 12942 struct rq *rq = cpu_rq(cpu); local 12990 struct rq *rq = cpu_rq(i); local 13041 struct rq *rq = cpu_rq(i); local 13097 get_rr_interval_fair(struct rq *rq, struct task_struct *task) argument [all...] |
H A D | loadavg.c | 78 long calc_load_fold_active(struct rq *this_rq, long adjust) 233 static void calc_load_nohz_fold(struct rq *rq) argument 237 delta = calc_load_fold_active(rq, 0); 258 void calc_load_nohz_remote(struct rq *rq) argument 260 calc_load_nohz_fold(rq); 265 struct rq *this_rq = this_rq(); 385 void calc_global_load_tick(struct rq *this_rq)
|
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_trace.h | 267 TP_PROTO(struct i915_request *rq, u32 flags), 268 TP_ARGS(rq, flags), 280 __entry->dev = rq->i915->drm.primary->index; 281 __entry->class = rq->engine->uabi_class; 282 __entry->instance = rq->engine->uabi_instance; 283 __entry->ctx = rq->fence.context; 284 __entry->seqno = rq->fence.seqno; 294 TP_PROTO(struct i915_request *rq), 295 TP_ARGS(rq), 307 __entry->dev = rq 500 trace_i915_request_guc_submit(struct i915_request *rq) argument 505 trace_i915_request_submit(struct i915_request *rq) argument 510 trace_i915_request_execute(struct i915_request *rq) argument 515 trace_i915_request_in(struct i915_request *rq, unsigned int port) argument 520 trace_i915_request_out(struct i915_request *rq) argument [all...] |
H A D | i915_active.h | 81 * @rq: the request to watch 83 * i915_active_fence_set() watches the given @rq for completion. While 84 * that @rq is busy, the @active reports busy. When that @rq is signaled 89 struct i915_request *rq); 167 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq); 181 int i915_request_await_active(struct i915_request *rq, 211 void i915_request_add_active_barriers(struct i915_request *rq); 220 static inline int __i915_request_await_exclusive(struct i915_request *rq, argument 228 err = i915_request_await_dma_fence(rq, fenc [all...] |
/linux-master/drivers/gpu/drm/scheduler/ |
H A D | sched_main.c | 158 struct drm_sched_rq *rq = entity->rq; local 161 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); 169 * Both locks need to be grabbed, one to protect from entity->rq change 174 spin_lock(&entity->rq->lock); 180 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, 183 spin_unlock(&entity->rq->lock); 191 * @rq: scheduler run queue 196 struct drm_sched_rq *rq) 198 spin_lock_init(&rq 195 drm_sched_rq_init(struct drm_gpu_scheduler *sched, struct drm_sched_rq *rq) argument 213 drm_sched_rq_add_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) argument 235 drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) argument 268 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, struct drm_sched_rq *rq) argument 333 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, struct drm_sched_rq *rq) argument 1345 struct drm_sched_rq *rq = sched->sched_rq[i]; local 1397 struct drm_sched_rq *rq = sched->sched_rq[i]; local [all...] |
/linux-master/drivers/scsi/esas2r/ |
H A D | esas2r_flash.c | 134 struct esas2r_request *rq) 136 struct atto_vda_flash_req *vrq = &rq->vrq->flash; 138 (struct esas2r_flash_context *)rq->interrupt_cx; 140 if (rq->req_stat == RS_SUCCESS) { 148 rq->req_stat = RS_PENDING; 154 rq->req_stat = RS_PENDING; 155 rq->interrupt_cb = fc->interrupt_cb; 163 if (rq->req_stat != RS_PENDING) 169 (*fc->interrupt_cb)(a, rq); 177 struct esas2r_request *rq) 133 esas2r_fmapi_callback(struct esas2r_adapter *a, struct esas2r_request *rq) argument 176 build_flash_msg(struct esas2r_adapter *a, struct esas2r_request *rq) argument 227 load_image(struct esas2r_adapter *a, struct esas2r_request *rq) argument 302 complete_fmapi_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 fi_stat) argument 323 fw_download_proc(struct esas2r_adapter *a, struct esas2r_request *rq) argument 827 esas2r_complete_fs_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) argument 845 esas2r_process_fs_ioctl(struct esas2r_adapter *a, struct esas2r_ioctl_fs *fs, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument 1212 esas2r_nvram_callback(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1258 esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *nvram) argument 1390 esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument [all...] |
H A D | esas2r_main.c | 145 struct esas2r_request *rq; local 148 rq = esas2r_alloc_request(a); 149 if (rq == NULL) 152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) 155 esas2r_free_request(a, rq); 825 struct esas2r_request *rq; local 838 rq = esas2r_alloc_request(a); 839 if (unlikely(rq == NULL)) { 844 rq->cmd = cmd; 849 rq 892 complete_task_management_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 912 struct esas2r_request *rq; local 1111 struct esas2r_request *rq; local 1198 esas2r_log_request_failure(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1236 esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1484 esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1496 struct esas2r_request *rq; local 1516 esas2r_complete_request_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument [all...] |
/linux-master/drivers/net/ |
H A D | virtio_net.c | 259 struct receive_queue *rq; member in struct:virtnet_info 438 static void give_pages(struct receive_queue *rq, struct page *page) argument 442 /* Find end of list, sew whole thing into vi->rq.pages. */ 444 end->private = (unsigned long)rq->pages; 445 rq->pages = page; 448 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) argument 450 struct page *p = rq->pages; 453 rq->pages = (struct page *)p->private; 462 struct receive_queue *rq, void *buf) 467 give_pages(rq, bu 461 virtnet_rq_free_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf) argument 576 page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, unsigned int headroom) argument 676 virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) argument 705 virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) argument 716 virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) argument 741 virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) argument 818 struct receive_queue *rq; local 983 struct receive_queue *rq = vi->rq; local 1125 xdp_linearize_page(struct receive_queue *rq, int *num_buf, struct page *p, int offset, int page_off, unsigned int *len) argument 1204 receive_small_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *buf, unsigned int xdp_headroom, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument 1291 receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument 1338 receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, struct virtnet_rq_stats *stats) argument 1361 mergeable_buf_free(struct receive_queue *rq, int num_buf, struct net_device *dev, struct virtnet_rq_stats *stats) argument 1433 virtnet_build_xdp_buff_mrg(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct xdp_buff *xdp, void *buf, unsigned int len, unsigned int frame_sz, int *num_buf, unsigned int *xdp_frags_truesize, struct virtnet_rq_stats *stats) argument 1522 mergeable_xdp_get_buf(struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *ctx, unsigned int *frame_sz, int *num_buf, struct page **page, int offset, unsigned int *len, struct virtio_net_hdr_mrg_rxbuf *hdr) argument 1596 receive_mergeable_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument 1656 receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument 1803 receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument 1863 add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument 1892 add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument 1941 get_mergeable_buf_len(struct receive_queue *rq, struct ewma_pkt_len *avg_pkt_len, unsigned int room) argument 1958 add_recvbuf_mergeable(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument 2014 try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument 2046 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; local 2097 struct receive_queue *rq = &vi->rq[i]; local 2111 virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) argument 2161 virtnet_poll_cleantx(struct receive_queue *rq) argument 2189 virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) argument 2209 struct receive_queue *rq = local 2459 virtnet_rx_resize(struct virtnet_info *vi, struct receive_queue *rq, u32 ring_num) argument 2633 struct receive_queue *rq = &vi->rq[i]; local 3022 struct receive_queue *rq; local 3346 struct receive_queue *rq = &vi->rq[i]; local 3559 struct receive_queue *rq = container_of(dim, local [all...] |
/linux-master/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_qp.h | 123 struct hinic_rq rq; member in struct:hinic_qp 136 struct hinic_rq *rq, u16 global_qid); 144 int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, 147 void hinic_clean_rq(struct hinic_rq *rq); 151 int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); 206 struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, 209 void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, 212 struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, 216 struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, 221 void hinic_rq_put_wqe(struct hinic_rq *rq, u1 [all...] |
H A D | hinic_rx.h | 33 struct hinic_rq *rq; member in struct:hinic_rxq 46 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
|
/linux-master/drivers/net/vmxnet3/ |
H A D | vmxnet3_xdp.h | 30 struct vmxnet3_rx_queue *rq, 36 struct vmxnet3_rx_queue *rq,
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_main.c | 286 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, argument 295 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, 296 rq->mpwqe.umr_mode), 301 cseg->umr_mkey = rq->mpwqe.umr_mkey_be; 304 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode); 309 static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node) argument 311 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq 318 mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq) argument 323 mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) argument 348 mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) argument 355 mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) argument 538 mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) argument 563 mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) argument 577 mlx5e_init_frags_partition(struct mlx5e_rq *rq) argument 616 mlx5e_init_xsk_buffs(struct mlx5e_rq *rq) argument 638 mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node) argument 672 mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq) argument 680 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); local 685 mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq) argument 700 mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) argument 707 mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, u32 xdp_frag_size, struct mlx5e_rq *rq) argument 736 mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, struct mlx5e_rq *rq, u32 *pool_size, int node) argument 784 mlx5e_rq_free_shampo(struct mlx5e_rq *rq) argument 795 mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_rq_param *rqp, int node, struct mlx5e_rq *rq) argument 1001 mlx5e_free_rq(struct mlx5e_rq *rq) argument 1028 mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter) argument 1076 mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) argument 1105 mlx5e_flush_rq_cq(struct mlx5e_rq *rq) argument 1121 mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state) argument 1144 mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) argument 1172 mlx5e_destroy_rq(struct mlx5e_rq *rq) argument 1177 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) argument 1197 mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq) argument 1233 mlx5e_free_rx_descs(struct mlx5e_rq *rq) argument 1278 mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, int node, u16 q_counter, struct mlx5e_rq *rq) argument 1337 mlx5e_activate_rq(struct mlx5e_rq *rq) argument 1342 mlx5e_deactivate_rq(struct mlx5e_rq *rq) argument 1348 mlx5e_close_rq(struct mlx5e_rq *rq) argument 3310 mlx5e_free_drop_rq(struct mlx5e_rq *rq) argument 3315 mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq, struct mlx5e_rq_param *param) argument 4838 mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog) argument [all...] |