Searched refs:rq (Results 101 - 125 of 636) sorted by relevance

1234567891011>>

/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_srq.c56 srq->rq.max_wr = init->attr.max_wr;
57 srq->rq.max_sge = init->attr.max_sge;
60 srq->rq.max_sge*sizeof(struct ib_sge);
62 spin_lock_init(&srq->rq.producer_lock);
63 spin_lock_init(&srq->rq.consumer_lock);
65 q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
80 srq->rq.queue = q;
81 init->attr.max_wr = srq->rq.max_wr;
137 if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
140 srq->rq
[all...]
/linux-master/drivers/scsi/esas2r/
H A Desas2r_ioctl.c85 struct esas2r_request *rq)
113 struct esas2r_request *rq; local
120 rq = esas2r_alloc_request(a);
121 if (rq == NULL) {
153 rq->comp_cb = complete_fm_api_req;
157 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
176 esas2r_free_request(a, (struct esas2r_request *)rq);
184 struct esas2r_request *rq)
201 struct esas2r_request *rq)
210 struct esas2r_request *rq; local
84 complete_fm_api_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
183 complete_nvr_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
200 complete_buffered_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
295 smp_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) argument
332 esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
343 csmi_ioctl_tunnel(struct esas2r_adapter *a, union atto_ioctl_csmi *ci, struct esas2r_request *rq, struct esas2r_sg_context *sgc, u32 ctrl_code, u16 target_id) argument
392 csmi_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) argument
608 csmi_ioctl_done_callback(struct esas2r_adapter *a, struct esas2r_request *rq, void *context) argument
669 hba_ioctl_tunnel(struct esas2r_adapter *a, struct atto_ioctl *hi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
689 scsi_passthru_comp_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
746 hba_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) argument
1198 hba_ioctl_done_callback(struct esas2r_adapter *a, struct esas2r_request *rq, void *context) argument
1244 esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *data) argument
1272 struct esas2r_request *rq; local
1801 vda_complete_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1825 struct esas2r_request *rq; local
1919 fs_api_complete_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1947 struct esas2r_request *rq; local
[all...]
H A Desas2r_flash.c134 struct esas2r_request *rq)
136 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
138 (struct esas2r_flash_context *)rq->interrupt_cx;
140 if (rq->req_stat == RS_SUCCESS) {
148 rq->req_stat = RS_PENDING;
154 rq->req_stat = RS_PENDING;
155 rq->interrupt_cb = fc->interrupt_cb;
163 if (rq->req_stat != RS_PENDING)
169 (*fc->interrupt_cb)(a, rq);
177 struct esas2r_request *rq)
133 esas2r_fmapi_callback(struct esas2r_adapter *a, struct esas2r_request *rq) argument
176 build_flash_msg(struct esas2r_adapter *a, struct esas2r_request *rq) argument
227 load_image(struct esas2r_adapter *a, struct esas2r_request *rq) argument
302 complete_fmapi_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 fi_stat) argument
323 fw_download_proc(struct esas2r_adapter *a, struct esas2r_request *rq) argument
827 esas2r_complete_fs_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) argument
845 esas2r_process_fs_ioctl(struct esas2r_adapter *a, struct esas2r_ioctl_fs *fs, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
1212 esas2r_nvram_callback(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1258 esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *nvram) argument
1390 esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
[all...]
H A Desas2r_main.c145 struct esas2r_request *rq; local
148 rq = esas2r_alloc_request(a);
149 if (rq == NULL)
152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
155 esas2r_free_request(a, rq);
825 struct esas2r_request *rq; local
838 rq = esas2r_alloc_request(a);
839 if (unlikely(rq == NULL)) {
844 rq->cmd = cmd;
849 rq
892 complete_task_management_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
912 struct esas2r_request *rq; local
1111 struct esas2r_request *rq; local
1198 esas2r_log_request_failure(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1236 esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1484 esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1496 struct esas2r_request *rq; local
1516 esas2r_complete_request_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
[all...]
/linux-master/drivers/s390/char/
H A Dfs3270.c48 static void fs3270_wake_up(struct raw3270_request *rq, void *data) argument
62 static int fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) argument
68 rq->callback = fs3270_wake_up;
69 rq->callback_data = &fp->wait;
79 rc = raw3270_start(view, rq);
82 wait_event(fp->wait, raw3270_request_final(rq));
91 static void fs3270_reset_callback(struct raw3270_request *rq, void *data) argument
95 fp = (struct fs3270 *)rq->view;
96 raw3270_request_reset(rq);
100 static void fs3270_restore_callback(struct raw3270_request *rq, voi argument
160 fs3270_save_callback(struct raw3270_request *rq, void *data) argument
217 fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) argument
242 struct raw3270_request *rq; local
287 struct raw3270_request *rq; local
[all...]
/linux-master/drivers/staging/rtl8712/
H A Dosdep_intf.h30 int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_display_rps.c25 struct i915_request *rq = wait->request; local
32 if (!i915_request_started(rq))
33 intel_rps_boost(rq);
34 i915_request_put(rq);
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_ring_submission.c354 struct i915_request *pos, *rq; local
358 rq = NULL;
363 rq = pos;
391 if (rq) {
407 __i915_request_reset(rq, stalled);
409 GEM_BUG_ON(rq->ring != engine->legacy.ring);
410 head = rq->head;
604 struct i915_request *rq,
609 if (!rq || !i915_request_is_active(rq))
603 ring_context_revoke(struct intel_context *ce, struct i915_request *rq, unsigned int preempt_timeout_ms) argument
622 ring_context_cancel_request(struct intel_context *ce, struct i915_request *rq) argument
654 load_pd_dir(struct i915_request *rq, struct i915_address_space *vm, u32 valid) argument
688 mi_set_context(struct i915_request *rq, struct intel_context *ce, u32 flags) argument
805 remap_l3_slice(struct i915_request *rq, int slice) argument
835 remap_l3(struct i915_request *rq) argument
856 switch_mm(struct i915_request *rq, struct i915_address_space *vm) argument
882 clear_residuals(struct i915_request *rq) argument
913 switch_context(struct i915_request *rq) argument
1100 add_to_engine(struct i915_request *rq) argument
1106 remove_from_engine(struct i915_request *rq) argument
[all...]
H A Dselftest_engine_pm.c79 struct i915_request *rq; local
82 rq = intel_context_create_request(ce);
83 if (IS_ERR(rq))
84 return PTR_ERR(rq);
86 cs = intel_ring_begin(rq, 28);
88 i915_request_add(rq);
105 intel_ring_advance(rq, cs);
106 i915_request_get(rq);
107 i915_request_add(rq);
125 if (i915_request_wait(rq,
261 struct i915_request *rq; local
[all...]
/linux-master/block/
H A Dblk-rq-qos.h104 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
105 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
106 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
119 static inline void rq_qos_done(struct request_queue *q, struct request *rq) argument
121 if (q->rq_qos && !blk_rq_is_passthrough(rq))
122 __rq_qos_done(q->rq_qos, rq);
125 static inline void rq_qos_issue(struct request_queue *q, struct request *rq) argument
128 __rq_qos_issue(q->rq_qos, rq);
131 rq_qos_requeue(struct request_queue *q, struct request *rq) argument
155 rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) argument
162 rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) argument
[all...]
H A Dblk-map.c131 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, argument
160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
192 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
229 ret = blk_rq_append_bio(rq, bio);
253 static struct bio *blk_rq_map_bio_alloc(struct request *rq, argument
258 if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
272 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, argument
276 unsigned int max_sectors = queue_max_hw_sectors(rq
537 blk_rq_append_bio(struct request *rq, struct bio *bio) argument
562 blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) argument
632 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) argument
687 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) argument
782 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) argument
[all...]
H A Dbsg-lib.c32 struct request *rq; local
43 rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
45 if (IS_ERR(rq))
46 return PTR_ERR(rq);
47 rq->timeout = timeout;
49 job = blk_mq_rq_to_pdu(rq);
64 job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
70 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
84 ret = blk_rq_map_user(rq->q, rq, NUL
159 struct request *rq = blk_mq_rq_from_pdu(job); local
192 struct request *rq = blk_mq_rq_from_pdu(job); local
205 bsg_complete(struct request *rq) argument
335 bsg_timeout(struct request *rq) argument
[all...]
H A Dblk.h139 static inline bool rq_mergeable(struct request *rq) argument
141 if (blk_rq_is_passthrough(rq))
144 if (req_op(rq) == REQ_OP_FLUSH)
147 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
150 if (req_op(rq) == REQ_OP_ZONE_APPEND)
153 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
155 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
177 static inline unsigned int blk_rq_get_max_segments(struct request *rq) argument
179 if (req_op(rq) == REQ_OP_DISCARD)
180 return queue_max_discard_segments(rq
237 blk_integrity_merge_rq(struct request_queue *rq, struct request *r1, struct request *r2) argument
242 blk_integrity_merge_bio(struct request_queue *rq, struct request *r, struct bio *b) argument
363 blk_do_io_stat(struct request *rq) argument
422 blk_zone_update_request_bio(struct request *rq, struct bio *bio) argument
448 blk_zone_finish_request(struct request *rq) argument
475 blk_zone_write_plug_init_request(struct request *rq) argument
478 blk_zone_update_request_bio(struct request *rq, struct bio *bio) argument
485 blk_zone_finish_request(struct request *rq) argument
[all...]
/linux-master/include/linux/
H A Dblk-integrity.h103 static inline bool blk_integrity_rq(struct request *rq) argument
105 return rq->cmd_flags & REQ_INTEGRITY;
112 static inline struct bio_vec *rq_integrity_vec(struct request *rq) argument
114 if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
116 return rq->bio->bi_integrity->bip_vec;
175 static inline int blk_integrity_rq(struct request *rq) argument
180 static inline struct bio_vec *rq_integrity_vec(struct request *rq) argument
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_controlq.c17 (qinfo)->rq.head = prefix##_ARQH; \
18 (qinfo)->rq.tail = prefix##_ARQT; \
19 (qinfo)->rq.len = prefix##_ARQLEN; \
20 (qinfo)->rq.bah = prefix##_ARQBAH; \
21 (qinfo)->rq.bal = prefix##_ARQBAL; \
22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
126 cq->rq
[all...]
/linux-master/drivers/usb/misc/
H A Duss720.c80 struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count); local
81 struct parport_uss720_private *priv = rq->priv;
84 if (likely(rq->urb))
85 usb_free_urb(rq->urb);
86 kfree(rq->dr);
88 list_del_init(&rq->asynclist);
90 kfree(rq);
98 struct uss720_async_request *rq; local
103 rq = urb->context;
104 priv = rq
128 struct uss720_async_request *rq; local
178 struct uss720_async_request *rq; local
196 struct uss720_async_request *rq; local
232 struct uss720_async_request *rq; local
[all...]
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_request.c367 struct i915_request *rq; local
372 rq = t->request_alloc(ce);
374 if (IS_ERR(rq)) {
375 err = PTR_ERR(rq);
380 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
384 requests[n] = i915_request_get(rq);
385 i915_request_add(rq);
389 &rq->fence,
394 i915_request_put(rq);
406 struct i915_request *rq local
421 struct i915_request *rq = requests[n]; local
647 struct i915_request *rq; local
700 struct i915_request *rq; local
761 struct i915_request *rq; local
822 struct i915_request *rq, *nop; local
1008 emit_bb_start(struct i915_request *rq, struct i915_vma *batch) argument
1275 struct i915_request *rq = request[idx]; local
1300 struct i915_request *rq = request[idx]; local
1476 struct i915_request *rq; local
1514 struct i915_request *rq; local
1560 struct i915_request *rq; local
1690 struct i915_request *rq; local
1891 struct i915_request *rq; local
2020 struct i915_request *rq; local
2111 struct i915_request *rq; local
2183 struct i915_request *rq; local
2243 struct i915_request *rq; local
2298 struct i915_request *rq; local
2388 struct i915_request *rq; local
2479 struct i915_request *rq; local
2591 struct i915_request *rq; local
2739 struct i915_request *rq; local
2773 struct i915_request *rq; local
2807 struct i915_request *rq; local
2998 struct i915_request *rq; local
3073 struct i915_request *rq; local
3149 struct i915_request *rq; local
[all...]
/linux-master/kernel/sched/
H A Dloadavg.c78 long calc_load_fold_active(struct rq *this_rq, long adjust)
233 static void calc_load_nohz_fold(struct rq *rq) argument
237 delta = calc_load_fold_active(rq, 0);
258 void calc_load_nohz_remote(struct rq *rq) argument
260 calc_load_nohz_fold(rq);
265 struct rq *this_rq = this_rq();
385 void calc_global_load_tick(struct rq *this_rq)
H A Dfair.c311 struct rq *rq = rq_of(cfs_rq); local
312 int cpu = cpu_of(rq);
315 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
343 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
349 * cfs rq without parent should be put
353 &rq->leaf_cfs_rq_list);
358 rq
380 struct rq *rq = rq_of(cfs_rq); local
397 assert_list_leaf_cfs_rq(struct rq *rq) argument
485 assert_list_leaf_cfs_rq(struct rq *rq) argument
1106 update_curr_se(struct rq *rq, struct sched_entity *curr) argument
1141 update_curr_common(struct rq *rq) argument
1178 update_curr_fair(struct rq *rq) argument
1493 account_numa_enqueue(struct rq *rq, struct task_struct *p) argument
1499 account_numa_dequeue(struct rq *rq, struct task_struct *p) argument
2046 struct rq *rq = cpu_rq(cpu); local
2078 struct rq *rq = cpu_rq(env->dst_cpu); local
3492 task_tick_numa(struct rq *rq, struct task_struct *curr) argument
3557 task_tick_numa(struct rq *rq, struct task_struct *curr) argument
3561 account_numa_enqueue(struct rq *rq, struct task_struct *p) argument
3565 account_numa_dequeue(struct rq *rq, struct task_struct *p) argument
3581 struct rq *rq = rq_of(cfs_rq); local
3993 struct rq *rq = rq_of(cfs_rq); local
4060 struct rq *rq = rq_of(cfs_rq); local
4148 clear_tg_offline_cfs_rqs(struct rq *rq) argument
4468 clear_tg_offline_cfs_rqs(struct rq *rq) argument
4484 struct rq *rq; local
5108 update_misfit_status(struct task_struct *p, struct rq *rq) argument
5158 sched_balance_newidle(struct rq *rq, struct rq_flags *rf) argument
5172 update_misfit_status(struct task_struct *p, struct rq *rq) argument
5339 struct rq *rq = rq_of(cfs_rq); local
5717 struct rq *rq = data; local
5746 struct rq *rq = data; local
5765 struct rq *rq = rq_of(cfs_rq); local
5854 struct rq *rq = rq_of(cfs_rq); local
5942 struct rq *rq = arg; local
5980 struct rq *rq = rq_of(cfs_rq); local
6022 struct rq *rq; local
6079 struct rq *rq = rq_of(cfs_rq); local
6460 struct rq *rq = cpu_rq(i); local
6481 update_runtime_enabled(struct rq *rq) argument
6500 unthrottle_offline_cfs_rqs(struct rq *rq) argument
6555 sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) argument
6618 update_runtime_enabled(struct rq *rq) argument
6619 unthrottle_offline_cfs_rqs(struct rq *rq) argument
6629 sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) argument
6637 hrtick_start_fair(struct rq *rq, struct task_struct *p) argument
6662 hrtick_update(struct rq *rq) argument
6673 hrtick_start_fair(struct rq *rq, struct task_struct *p) argument
6677 hrtick_update(struct rq *rq) argument
6714 check_update_overutilized_status(struct rq *rq) argument
6725 check_update_overutilized_status(struct rq *rq) argument
6729 sched_idle_rq(struct rq *rq) argument
6748 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) argument
6841 dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) argument
6931 cpu_load(struct rq *rq) argument
6949 cpu_load_without(struct rq *rq, struct task_struct *p) argument
6967 cpu_runnable(struct rq *rq) argument
6972 cpu_runnable_without(struct rq *rq, struct task_struct *p) argument
7166 struct rq *rq = cpu_rq(i); local
7300 __update_idle_core(struct rq *rq) argument
8050 struct rq *rq = cpu_rq(cpu); local
8324 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
8349 check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int wake_flags) argument
8428 pick_task_fair(struct rq *rq) argument
8461 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
8605 __pick_next_task_fair(struct rq *rq) argument
8613 put_prev_task_fair(struct rq *rq, struct task_struct *prev) argument
8627 yield_task_fair(struct rq *rq) argument
8656 yield_to_task_fair(struct rq *rq, struct task_struct *p) argument
9261 attach_task(struct rq *rq, struct task_struct *p) argument
9274 attach_one_task(struct rq *rq, struct task_struct *p) argument
9319 others_have_blocked(struct rq *rq) argument
9336 update_blocked_load_tick(struct rq *rq) argument
9341 update_blocked_load_status(struct rq *rq, bool has_blocked) argument
9348 others_have_blocked(struct rq *rq) argument
9349 update_blocked_load_tick(struct rq *rq) argument
9350 update_blocked_load_status(struct rq *rq, bool has_blocked) argument
9353 __update_blocked_others(struct rq *rq, bool *done) argument
9381 __update_blocked_fair(struct rq *rq, bool *done) argument
9431 struct rq *rq = rq_of(cfs_rq); local
9471 __update_blocked_fair(struct rq *rq, bool *done) argument
9492 struct rq *rq = cpu_rq(cpu); local
9572 struct rq *rq = cpu_rq(cpu); local
9673 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) argument
9680 check_misfit_status(struct rq *rq) argument
9923 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) argument
9958 struct rq *rq = cpu_rq(i); local
10186 fbq_classify_rq(struct rq *rq) argument
10200 fbq_classify_rq(struct rq *rq) argument
10234 struct rq *rq = cpu_rq(cpu); local
10272 struct rq *rq = cpu_rq(i); local
11026 struct rq *busiest = NULL, *rq; local
11756 sched_balance_domains(struct rq *rq, enum cpu_idle_type idle) argument
11838 on_null_domain(struct rq *rq) argument
11914 nohz_balancer_kick(struct rq *rq) argument
12046 nohz_balance_exit_idle(struct rq *rq) argument
12082 struct rq *rq = cpu_rq(cpu); local
12137 update_nohz_stats(struct rq *rq) argument
12169 struct rq *rq; local
12337 nohz_balancer_kick(struct rq *rq) argument
12509 sched_balance_trigger(struct rq *rq) argument
12524 rq_online_fair(struct rq *rq) argument
12531 rq_offline_fair(struct rq *rq) argument
12555 task_tick_core(struct rq *rq, struct task_struct *curr) argument
12598 task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) argument
12611 struct rq *rq = task_rq(a); local
12668 task_tick_core(struct rq *rq, struct task_struct *curr) argument
12679 task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) argument
12707 struct rq *rq = this_rq(); local
12728 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) argument
12829 switched_from_fair(struct rq *rq, struct task_struct *p) argument
12834 switched_to_fair(struct rq *rq, struct task_struct *p) argument
12858 set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) argument
12970 struct rq *rq; local
12987 struct rq *rq; local
13015 struct rq *rq = cpu_rq(cpu); local
13063 struct rq *rq = cpu_rq(i); local
13114 struct rq *rq = cpu_rq(i); local
13170 get_rr_interval_fair(struct rq *rq, struct task_struct *task) argument
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_trace.h267 TP_PROTO(struct i915_request *rq, u32 flags),
268 TP_ARGS(rq, flags),
280 __entry->dev = rq->i915->drm.primary->index;
281 __entry->class = rq->engine->uabi_class;
282 __entry->instance = rq->engine->uabi_instance;
283 __entry->ctx = rq->fence.context;
284 __entry->seqno = rq->fence.seqno;
294 TP_PROTO(struct i915_request *rq),
295 TP_ARGS(rq),
307 __entry->dev = rq
500 trace_i915_request_guc_submit(struct i915_request *rq) argument
505 trace_i915_request_submit(struct i915_request *rq) argument
510 trace_i915_request_execute(struct i915_request *rq) argument
515 trace_i915_request_in(struct i915_request *rq, unsigned int port) argument
520 trace_i915_request_out(struct i915_request *rq) argument
[all...]
H A Di915_active.h81 * @rq: the request to watch
83 * i915_active_fence_set() watches the given @rq for completion. While
84 * that @rq is busy, the @active reports busy. When that @rq is signaled
89 struct i915_request *rq);
167 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
181 int i915_request_await_active(struct i915_request *rq,
211 void i915_request_add_active_barriers(struct i915_request *rq);
220 static inline int __i915_request_await_exclusive(struct i915_request *rq, argument
228 err = i915_request_await_dma_fence(rq, fenc
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dchannels.c32 *rqn = c->rq.rqn;
56 *rqn = c->rq.rqn;
65 int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable);
101 if (chs->c[i]->rq.dim) {
104 mlx5e_dim_rx_change(&chs->c[i]->rq, false);
105 err = mlx5e_dim_rx_change(&chs->c[i]->rq, true);
/linux-master/drivers/net/
H A Dvirtio_net.c382 struct receive_queue *rq; member in struct:virtnet_info
578 static void give_pages(struct receive_queue *rq, struct page *page) argument
582 /* Find end of list, sew whole thing into vi->rq.pages. */
584 end->private = (unsigned long)rq->pages;
585 rq->pages = page;
588 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) argument
590 struct page *p = rq->pages;
593 rq->pages = (struct page *)p->private;
602 struct receive_queue *rq, void *buf)
607 give_pages(rq, bu
601 virtnet_rq_free_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf) argument
716 page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, unsigned int headroom) argument
815 virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) argument
844 virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) argument
855 virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) argument
875 virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) argument
947 struct receive_queue *rq; local
1118 struct receive_queue *rq = vi->rq; local
1260 xdp_linearize_page(struct receive_queue *rq, int *num_buf, struct page *p, int offset, int page_off, unsigned int *len) argument
1339 receive_small_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *buf, unsigned int xdp_headroom, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument
1426 receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument
1473 receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, struct virtnet_rq_stats *stats) argument
1496 mergeable_buf_free(struct receive_queue *rq, int num_buf, struct net_device *dev, struct virtnet_rq_stats *stats) argument
1568 virtnet_build_xdp_buff_mrg(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct xdp_buff *xdp, void *buf, unsigned int len, unsigned int frame_sz, int *num_buf, unsigned int *xdp_frags_truesize, struct virtnet_rq_stats *stats) argument
1657 mergeable_xdp_get_buf(struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *ctx, unsigned int *frame_sz, int *num_buf, struct page **page, int offset, unsigned int *len, struct virtio_net_hdr_mrg_rxbuf *hdr) argument
1731 receive_mergeable_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, struct bpf_prog *xdp_prog, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument
1791 receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument
1938 receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) argument
1998 add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument
2026 add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument
2075 get_mergeable_buf_len(struct receive_queue *rq, struct ewma_pkt_len *avg_pkt_len, unsigned int room) argument
2092 add_recvbuf_mergeable(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument
2147 try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) argument
2179 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; local
2230 struct receive_queue *rq = &vi->rq[i]; local
2244 virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) argument
2298 virtnet_poll_cleantx(struct receive_queue *rq) argument
2332 virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) argument
2352 struct receive_queue *rq = local
2612 virtnet_rx_resize(struct virtnet_info *vi, struct receive_queue *rq, u32 ring_num) argument
2800 struct receive_queue *rq = &vi->rq[i]; local
3203 struct receive_queue *rq; local
4171 struct receive_queue *rq = &vi->rq[i]; local
4398 struct receive_queue *rq = container_of(dim, local
4741 struct receive_queue *rq = &vi->rq[i]; local
[all...]
/linux-master/drivers/gpu/drm/scheduler/
H A Dsched_main.c158 struct drm_sched_rq *rq = entity->rq; local
161 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
169 * Both locks need to be grabbed, one to protect from entity->rq change
174 spin_lock(&entity->rq->lock);
180 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
183 spin_unlock(&entity->rq->lock);
191 * @rq: scheduler run queue
196 struct drm_sched_rq *rq)
198 spin_lock_init(&rq
195 drm_sched_rq_init(struct drm_gpu_scheduler *sched, struct drm_sched_rq *rq) argument
213 drm_sched_rq_add_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) argument
235 drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) argument
268 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, struct drm_sched_rq *rq) argument
333 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, struct drm_sched_rq *rq) argument
1345 struct drm_sched_rq *rq = sched->sched_rq[i]; local
1397 struct drm_sched_rq *rq = sched->sched_rq[i]; local
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.h123 struct hinic_rq rq; member in struct:hinic_qp
136 struct hinic_rq *rq, u16 global_qid);
144 int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
147 void hinic_clean_rq(struct hinic_rq *rq);
151 int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
206 struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
209 void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
212 struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
216 struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
221 void hinic_rq_put_wqe(struct hinic_rq *rq, u1
[all...]

Completed in 327 milliseconds

1234567891011>>