Searched refs:rq (Results 26 - 50 of 634) sorted by relevance

1234567891011>>

/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_ksyms_weak.c21 extern const struct rq runqueues __ksym __weak; /* typed */
39 struct rq *rq; local
42 rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
43 if (rq && bpf_ksym_exists(&runqueues))
44 out__existing_typed = rq->cpu;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) argument
21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
22 struct mlx5e_icosq *icosq = rq->icosq;
30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe)))
35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs,
36 rq->mpwqe.pages_per_wqe);
44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool);
50 pi = mlx5e_icosq_get_next_pi(icosq, rq
160 mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument
198 mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument
225 mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp) argument
247 mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument
304 mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dgen2_engine_cs.c16 int gen2_emit_flush(struct i915_request *rq, u32 mode) argument
25 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
38 intel_ring_advance(rq, cs);
43 int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) argument
79 if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5)
87 cs = intel_ring_begin(rq, i);
105 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
115 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
124 intel_ring_advance(rq, c
129 gen4_emit_flush_vcs(struct i915_request *rq, u32 mode) argument
144 __gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int flush, int post) argument
172 gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) argument
177 gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) argument
186 i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) argument
251 gen3_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) argument
271 gen4_emit_bb_start(struct i915_request *rq, u64 offset, u32 length, unsigned int dispatch_flags) argument
[all...]
H A Dintel_breadcrumbs.c111 check_signal_order(struct intel_context *ce, struct i915_request *rq) argument
113 if (rq->context != ce)
116 if (!list_is_last(&rq->signal_link, &ce->signals) &&
117 i915_seqno_passed(rq->fence.seqno,
118 list_next_entry(rq, signal_link)->fence.seqno))
121 if (!list_is_first(&rq->signal_link, &ce->signals) &&
122 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
123 rq->fence.seqno))
212 struct i915_request *rq; local
214 list_for_each_entry_rcu(rq,
250 struct i915_request *rq = local
338 irq_signal_request(struct i915_request *rq, struct intel_breadcrumbs *b) argument
349 insert_breadcrumb(struct i915_request *rq) argument
411 i915_request_enable_breadcrumb(struct i915_request *rq) argument
436 i915_request_cancel_breadcrumb(struct i915_request *rq) argument
463 struct i915_request *rq, *rn; local
496 struct i915_request *rq; local
[all...]
H A Dintel_engine_heartbeat.c25 struct i915_request *rq; local
30 rq = engine->heartbeat.systole;
41 if (rq && rq->sched.attr.priority >= I915_PRIORITY_BARRIER &&
70 struct i915_request *rq; local
73 rq = __i915_request_create(ce, gfp);
76 return rq;
79 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) argument
82 i915_request_add_active_barriers(rq);
84 engine->heartbeat.systole = i915_request_get(rq);
87 heartbeat_commit(struct i915_request *rq, const struct i915_sched_attr *attr) argument
96 show_heartbeat(const struct i915_request *rq, struct intel_engine_cs *engine) argument
117 reset_engine(struct intel_engine_cs *engine, struct i915_request *rq) argument
142 struct i915_request *rq; local
279 struct i915_request *rq; local
384 struct i915_request *rq; local
[all...]
H A Dselftest_execlists.c28 static bool is_active(struct i915_request *rq) argument
30 if (i915_request_is_active(rq))
33 if (i915_request_on_hold(rq))
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
43 struct i915_request *rq,
53 if (i915_request_completed(rq)) /* that was quick! */
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
69 struct i915_request *rq,
81 if (i915_request_completed(rq))
42 wait_for_submit(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) argument
68 wait_for_reset(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) argument
125 struct i915_request *rq; local
182 struct i915_request *rq[2]; local
344 struct i915_request *rq; local
493 struct i915_request *rq; local
601 struct i915_request *rq; local
720 struct i915_request *rq; local
820 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) argument
860 struct i915_request *rq; local
895 struct i915_request *rq; local
946 struct i915_request *rq; local
1056 struct i915_request *rq; local
1130 struct i915_request *rq[3] = {}; local
1259 struct i915_request *rq; local
1330 struct i915_request *rq, *nop; local
1430 struct i915_request *rq; local
1721 struct i915_request *rq; local
1759 struct i915_request *rq; local
1852 struct i915_request *rq; local
2058 struct i915_request *rq; local
2102 struct i915_request *rq[2] = {}; local
2173 struct i915_request *rq[3] = {}; local
2268 struct i915_request *rq; local
2325 struct i915_request *rq; local
2576 struct i915_request *rq; local
2706 struct i915_request *rq; local
2801 struct i915_request *rq; local
2975 struct i915_request *rq = NULL; local
3153 struct i915_request *rq; local
3214 struct i915_request *rq; local
3299 struct i915_request *rq; local
3398 struct i915_request *rq; local
3490 struct i915_request *rq; local
3754 struct i915_request *rq; local
3771 struct i915_request *rq; local
4027 struct i915_request *rq; local
4094 struct i915_request *rq; local
4226 struct i915_request *rq; local
4337 struct i915_request *rq; local
[all...]
/linux-master/drivers/gpu/drm/i915/pxp/
H A Dintel_pxp_cmd.c83 static void pxp_request_commit(struct i915_request *rq) argument
86 struct intel_timeline * const tl = i915_request_timeline(rq);
88 lockdep_unpin_lock(&tl->mutex, rq->cookie);
90 trace_i915_request_add(rq);
91 __i915_request_commit(rq);
92 __i915_request_queue(rq, &attr);
99 struct i915_request *rq; local
107 rq = i915_request_create(ce);
108 if (IS_ERR(rq))
109 return PTR_ERR(rq);
[all...]
/linux-master/kernel/sched/
H A Ddeadline.c68 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
70 return container_of(dl_rq, struct rq, dl);
73 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
75 struct rq *rq = dl_se->rq; local
78 rq = task_rq(dl_task_of(dl_se));
80 return rq;
154 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
190 struct rq *r local
326 struct rq *rq; local
410 struct rq *rq = rq_of_dl_se(dl_se); local
548 dl_overloaded(struct rq *rq) argument
553 dl_set_overload(struct rq *rq) argument
569 dl_clear_overload(struct rq *rq) argument
586 has_pushable_dl_tasks(struct rq *rq) argument
595 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument
613 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument
636 need_pull_dl_task(struct rq *rq, struct task_struct *prev) argument
647 deadline_queue_push_tasks(struct rq *rq) argument
655 deadline_queue_pull_task(struct rq *rq) argument
662 dl_task_offline_migration(struct rq *rq, struct task_struct *p) argument
735 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument
740 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument
754 deadline_queue_push_tasks(struct rq *rq) argument
758 deadline_queue_pull_task(struct rq *rq) argument
769 replenish_dl_new_period(struct sched_dl_entity *dl_se, struct rq *rq) argument
792 struct rq *rq = rq_of_dl_rq(dl_rq); local
834 struct rq *rq = rq_of_dl_rq(dl_rq); local
951 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) argument
1014 struct rq *rq = rq_of_dl_se(dl_se); local
1049 struct rq *rq = rq_of_dl_rq(dl_rq); local
1091 __push_dl_task(struct rq *rq, struct rq_flags *rf) argument
1130 struct rq *rq; local
1133 struct rq *rq = rq_of_dl_se(dl_se); local
1268 struct rq *rq = rq_of_dl_se(dl_se); local
1303 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) argument
1326 update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) argument
1430 dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, dl_server_pick_f pick) argument
1443 update_curr_dl(struct rq *rq) argument
1471 struct rq *rq; local
1537 struct rq *rq = rq_of_dl_rq(dl_rq); local
1550 struct rq *rq = rq_of_dl_rq(dl_rq); local
1788 enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) argument
1845 dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) argument
1867 yield_task_dl(struct rq *rq) argument
1889 dl_task_is_earliest_deadline(struct task_struct *p, struct rq *rq) argument
1904 struct rq *rq; local
1951 struct rq *rq; local
1981 check_preempt_equal_dl(struct rq *rq, struct task_struct *p) argument
2002 balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) argument
2024 wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags) argument
2044 start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) argument
2049 start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) argument
2054 set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) argument
2085 pick_task_dl(struct rq *rq) argument
2114 pick_next_task_dl(struct rq *rq) argument
2131 put_prev_task_dl(struct rq *rq, struct task_struct *p) argument
2141 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); local
2154 task_tick_dl(struct rq *rq, struct task_struct *p, int queued) argument
2158 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); local
2182 pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) argument
2194 pick_earliest_pushable_dl_task(struct rq *rq, int cpu) argument
2308 find_lock_later_rq(struct task_struct *task, struct rq *rq) argument
2362 pick_next_pushable_dl_task(struct rq *rq) argument
2386 push_dl_task(struct rq *rq) argument
2461 push_dl_tasks(struct rq *rq) argument
2564 task_woken_dl(struct rq *rq, struct task_struct *p) argument
2580 struct rq *rq; local
2610 rq_online_dl(struct rq *rq) argument
2621 rq_offline_dl(struct rq *rq) argument
2642 struct rq *rq; local
2674 switched_from_dl(struct rq *rq, struct task_struct *p) argument
2728 switched_to_dl(struct rq *rq, struct task_struct *p) argument
2756 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); local
2764 prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) argument
[all...]
H A Dcore.c119 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
240 void sched_core_enqueue(struct rq *rq, struct task_struct *p) argument
242 rq->core->core_task_seq++;
247 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
250 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) argument
252 rq->core->core_task_seq++;
255 rb_erase(&p->core_node, &rq->core_tree);
264 if (!(flags & DEQUEUE_SAVE) && rq
300 sched_core_find(struct rq *rq, unsigned long cookie) argument
454 sched_core_enqueue(struct rq *rq, struct task_struct *p) argument
456 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) argument
552 raw_spin_rq_lock_nested(struct rq *rq, int subclass) argument
577 raw_spin_rq_trylock(struct rq *rq) argument
601 raw_spin_rq_unlock(struct rq *rq) argument
631 struct rq *rq; variable in typeref:struct:rq
637 raw_spin_rq_lock(rq); variable
642 raw_spin_rq_unlock(rq); variable
656 struct rq *rq; variable in typeref:struct:rq
661 raw_spin_rq_lock(rq); variable
683 raw_spin_rq_unlock(rq); variable
695 update_rq_clock_task(struct rq *rq, s64 delta) argument
751 update_rq_clock(struct rq *rq) argument
778 hrtick_clear(struct rq *rq) argument
790 struct rq *rq = container_of(timer, struct rq, hrtick_timer); local
805 __hrtick_restart(struct rq *rq) argument
818 struct rq *rq = arg; local
831 hrtick_start(struct rq *rq, u64 delay) argument
855 hrtick_start(struct rq *rq, u64 delay) argument
868 hrtick_rq_init(struct rq *rq) argument
877 hrtick_clear(struct rq *rq) argument
881 hrtick_rq_init(struct rq *rq) argument
1041 resched_curr(struct rq *rq) argument
1067 struct rq *rq = cpu_rq(cpu); local
1130 struct rq *rq = cpu_rq(cpu); local
1196 struct rq *rq = info; local
1216 __need_bw_check(struct rq *rq, struct task_struct *p) argument
1230 sched_can_stop_tick(struct rq *rq) argument
1439 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) argument
1455 uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) argument
1466 uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) argument
1584 uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) argument
1622 uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) argument
1688 uclamp_rq_inc(struct rq *rq, struct task_struct *p) argument
1712 uclamp_rq_dec(struct rq *rq, struct task_struct *p) argument
1732 uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) argument
1754 struct rq *rq; local
2027 init_uclamp_rq(struct rq *rq) argument
2067 uclamp_rq_inc(struct rq *rq, struct task_struct *p) argument
2068 uclamp_rq_dec(struct rq *rq, struct task_struct *p) argument
2105 enqueue_task(struct rq *rq, struct task_struct *p, int flags) argument
2122 dequeue_task(struct rq *rq, struct task_struct *p, int flags) argument
2139 activate_task(struct rq *rq, struct task_struct *p, int flags) argument
2152 deactivate_task(struct rq *rq, struct task_struct *p, int flags) argument
2224 check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio) argument
2237 wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) argument
2296 struct rq *rq; local
2398 migrate_disable_switch(struct rq *rq, struct task_struct *p) argument
2466 rq_has_pinned_tasks(struct rq *rq) argument
2520 move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) argument
2566 __migrate_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int dest_cpu) argument
2588 struct rq *rq = this_rq(); local
2691 struct rq *lowest_rq = NULL, *rq = this_rq(); local
2756 struct rq *rq = task_rq(p); local
3127 update_rq_clock(rq); variable
3206 struct rq *rq; local
3250 struct rq *rq; local
3710 migrate_disable_switch(struct rq *rq, struct task_struct *p) argument
3712 rq_has_pinned_tasks(struct rq *rq) argument
3727 struct rq *rq; local
3773 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) argument
3853 struct rq *rq; local
3878 struct rq *rq = this_rq(); local
3936 struct rq *rq = cpu_rq(cpu); local
3946 struct rq *rq = cpu_rq(cpu); local
4049 struct rq *rq = cpu_rq(cpu); local
4437 struct rq *rq = NULL; local
4880 struct rq *rq; local
5039 do_balance_callbacks(struct rq *rq, struct balance_callback *head) argument
5075 __splice_balance_callbacks(struct rq *rq, bool split) argument
5099 splice_balance_callbacks(struct rq *rq) argument
5104 __balance_callbacks(struct rq *rq) argument
5109 balance_callbacks(struct rq *rq, struct balance_callback *head) argument
5122 __balance_callbacks(struct rq *rq) argument
5126 splice_balance_callbacks(struct rq *rq) argument
5131 balance_callbacks(struct rq *rq, struct balance_callback *head) argument
5138 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) argument
5154 finish_lock_switch(struct rq *rq) argument
5208 prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) argument
5243 struct rq *rq = this_rq(); variable in typeref:struct:rq
5281 finish_lock_switch(rq); variable
5353 context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next, struct rq_flags *rf) argument
5576 struct rq *rq; local
5613 cpu_resched_latency(struct rq *rq) argument
5658 cpu_resched_latency(struct rq *rq) argument
5668 struct rq *rq = cpu_rq(cpu); local
5683 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); local
5749 struct rq *rq = cpu_rq(cpu); local
5990 put_prev_task_balance(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6016 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6089 pick_task(struct rq *rq) argument
6108 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6407 sched_core_balance(struct rq *rq) argument
6428 queue_core_balance(struct rq *rq) argument
6450 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; local
6489 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; local
6539 struct rq *rq = cpu_rq(cpu); local
6552 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6622 struct rq *rq; local
7156 struct rq *rq; local
7277 struct rq *rq; local
7413 struct rq *rq = cpu_rq(cpu); local
7460 struct rq *rq = cpu_rq(cpu); local
7496 struct rq *rq = cpu_rq(cpu); local
7715 struct rq *rq; local
8558 struct rq *rq; local
8961 struct rq *rq, *p_rq; local
9120 struct rq *rq = scope.rq; local
9268 struct rq *rq = cpu_rq(cpu); local
9394 struct rq *rq; local
9438 struct rq *rq = this_rq(); local
9468 balance_push(struct rq *rq) argument
9533 struct rq *rq = cpu_rq(cpu); local
9554 struct rq *rq = this_rq(); local
9563 balance_push(struct rq *rq) argument
9577 set_rq_online(struct rq *rq) argument
9592 set_rq_offline(struct rq *rq) argument
9660 struct rq *rq = cpu_rq(cpu); local
9705 struct rq *rq = cpu_rq(cpu); local
9771 struct rq *rq = cpu_rq(cpu); local
9813 calc_load_migrate(struct rq *rq) argument
9821 dump_rq_tasks(struct rq *rq, const char *loglvl) argument
9842 struct rq *rq = cpu_rq(cpu); local
9986 struct rq *rq; local
10488 struct rq *rq; local
10881 struct rq *rq = cfs_rq->rq; local
11547 call_trace_sched_update_nr_running(struct rq *rq, int count) argument
11826 struct rq *rq = cpu_rq(cpu); local
11881 struct rq *rq = cpu_rq(cpu); local
11982 task_tick_mm_cid(struct rq *rq, struct task_struct *curr) argument
11998 struct rq *rq; local
12020 struct rq *rq; local
12042 struct rq *rq; local
[all...]
H A Drt.c178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 return rt_rq->rq;
188 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
192 return rt_rq->rq;
221 struct rq *rq = cpu_rq(cpu); local
225 rt_rq->rq = rq;
235 rt_se->rt_rq = &rq->rt;
293 static inline struct rq *rq_of_rt_r
307 struct rq *rq = rq_of_rt_se(rt_se); local
324 need_pull_rt_task(struct rq *rq, struct task_struct *prev) argument
330 rt_overloaded(struct rq *rq) argument
335 rt_set_overload(struct rq *rq) argument
354 rt_clear_overload(struct rq *rq) argument
364 has_pushable_tasks(struct rq *rq) argument
375 rt_queue_push_tasks(struct rq *rq) argument
383 rt_queue_pull_task(struct rq *rq) argument
388 enqueue_pushable_task(struct rq *rq, struct task_struct *p) argument
404 dequeue_pushable_task(struct rq *rq, struct task_struct *p) argument
425 enqueue_pushable_task(struct rq *rq, struct task_struct *p) argument
429 dequeue_pushable_task(struct rq *rq, struct task_struct *p) argument
433 rt_queue_push_tasks(struct rq *rq) argument
534 struct rq *rq = rq_of_rt_rq(rt_rq); local
635 struct rq *rq = rq_of_rt_rq(rt_rq); local
736 __disable_runtime(struct rq *rq) argument
818 __enable_runtime(struct rq *rq) argument
879 struct rq *rq = rq_of_rt_rq(rt_rq); local
1001 update_curr_rt(struct rq *rq) argument
1037 struct rq *rq = rq_of_rt_rq(rt_rq); local
1054 struct rq *rq = rq_of_rt_rq(rt_rq); local
1078 struct rq *rq = rq_of_rt_rq(rt_rq); local
1094 struct rq *rq = rq_of_rt_rq(rt_rq); local
1449 struct rq *rq = rq_of_rt_se(rt_se); local
1461 struct rq *rq = rq_of_rt_se(rt_se); local
1480 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) argument
1496 dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) argument
1524 requeue_task_rt(struct rq *rq, struct task_struct *p, int head) argument
1535 yield_task_rt(struct rq *rq) argument
1547 struct rq *rq; local
1615 check_preempt_equal_prio(struct rq *rq, struct task_struct *p) argument
1642 balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) argument
1663 wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags) argument
1688 set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) argument
1732 _pick_next_task_rt(struct rq *rq) argument
1747 pick_task_rt(struct rq *rq) argument
1759 pick_next_task_rt(struct rq *rq) argument
1769 put_prev_task_rt(struct rq *rq, struct task_struct *p) argument
1779 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); local
1794 pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) argument
1807 pick_highest_pushable_task(struct rq *rq, int cpu) argument
1917 find_lock_lowest_rq(struct task_struct *task, struct rq *rq) argument
1977 pick_next_pushable_task(struct rq *rq) argument
2002 push_rt_task(struct rq *rq, bool pull) argument
2122 push_rt_tasks(struct rq *rq) argument
2229 tell_cpu_to_push(struct rq *rq) argument
2267 struct rq *rq; local
2415 task_woken_rt(struct rq *rq, struct task_struct *p) argument
2429 rq_online_rt(struct rq *rq) argument
2440 rq_offline_rt(struct rq *rq) argument
2454 switched_from_rt(struct rq *rq, struct task_struct *p) argument
2485 switched_to_rt(struct rq *rq, struct task_struct *p) argument
2492 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); local
2516 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) argument
2553 watchdog(struct rq *rq, struct task_struct *p) argument
2577 watchdog(struct rq *rq, struct task_struct *p) argument
2588 task_tick_rt(struct rq *rq, struct task_struct *p, int queued) argument
2593 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); local
2622 get_rr_interval_rt(struct rq *rq, struct task_struct *task) argument
[all...]
/linux-master/include/linux/
H A Dt10-pi.h40 static inline u32 t10_pi_ref_tag(struct request *rq) argument
42 unsigned int shift = ilog2(queue_logical_block_size(rq->q));
45 if (rq->q->integrity.interval_exp)
46 shift = rq->q->integrity.interval_exp;
48 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
71 static inline u64 ext_pi_ref_tag(struct request *rq) argument
73 unsigned int shift = ilog2(queue_logical_block_size(rq->q));
76 if (rq->q->integrity.interval_exp)
77 shift = rq->q->integrity.interval_exp;
79 return lower_48_bits(blk_rq_pos(rq) >> (shif
[all...]
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_perf.c161 static int write_timestamp(struct i915_request *rq, int slot) argument
166 cs = intel_ring_begin(rq, 6);
171 if (GRAPHICS_VER(rq->i915) >= 8)
183 intel_ring_advance(rq, cs);
188 static ktime_t poll_status(struct i915_request *rq, int slot) argument
190 while (!intel_read_status_page(rq->engine, slot) &&
191 !i915_request_completed(rq))
201 struct i915_request *rq; local
224 rq = intel_engine_create_kernel_request(stream->engine);
225 if (IS_ERR(rq)) {
294 struct i915_request *rq; local
[all...]
H A Digt_spinner.c117 const struct i915_request *rq)
119 return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
128 struct i915_request *rq = NULL; local
148 rq = intel_context_create_request(ce);
149 if (IS_ERR(rq))
150 return ERR_CAST(rq);
152 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
156 err = igt_vma_move_to_active_unlocked(hws, rq, 0);
162 if (GRAPHICS_VER(rq->i915) >= 8) {
164 *batch++ = lower_32_bits(hws_address(hws, rq));
116 hws_address(const struct i915_vma *hws, const struct i915_request *rq) argument
220 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) argument
253 igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) argument
[all...]
/linux-master/drivers/s390/char/
H A Draw3270.h30 void (*callback)(struct raw3270_request *rq, void *data);
35 void raw3270_request_free(struct raw3270_request *rq);
36 int raw3270_request_reset(struct raw3270_request *rq);
37 void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd);
38 int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size);
39 void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size);
40 void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib);
43 raw3270_request_final(struct raw3270_request *rq) argument
45 return list_empty(&rq->list);
54 int (*activate)(struct raw3270_view *rq);
[all...]
/linux-master/block/
H A Dblk-mq-sched.h14 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
37 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, argument
40 if (rq->rq_flags & RQF_USE_SCHED) {
44 return e->type->ops.allow_merge(q, rq, bio);
49 static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) argument
51 if (rq->rq_flags & RQF_USE_SCHED) {
52 struct elevator_queue *e = rq->q->elevator;
55 e->type->ops.completed_request(rq, now);
59 static inline void blk_mq_sched_requeue_request(struct request *rq) argument
61 if (rq
[all...]
H A Dblk-mq.c41 #include "blk-rq-qos.h"
46 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
47 static void blk_mq_request_bypass_insert(struct request *rq,
90 static bool blk_mq_check_inflight(struct request *rq, void *priv) argument
94 if (rq->part && blk_do_io_stat(rq) &&
95 (!mi->part->bd_partno || rq->part == mi->part) &&
96 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
97 mi->inflight[rq_data_dir(rq)]++;
313 void blk_rq_init(struct request_queue *q, struct request *rq) argument
331 blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) argument
352 struct request *rq = tags->static_rqs[tag]; local
408 struct request *rq; local
440 struct request *rq; local
532 struct request *rq; local
550 struct request *rq; local
583 struct request *rq; local
623 struct request *rq; local
689 blk_mq_finish_request(struct request *rq) argument
704 __blk_mq_free_request(struct request *rq) argument
725 blk_mq_free_request(struct request *rq) argument
744 struct request *rq; local
750 blk_dump_rq_flags(struct request *rq, char *msg) argument
764 req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, blk_status_t error) argument
1026 __blk_mq_end_request_acct(struct request *rq, u64 now) argument
1035 __blk_mq_end_request(struct request *rq, blk_status_t error) argument
1052 blk_mq_end_request(struct request *rq, blk_status_t error) argument
1077 struct request *rq; local
1126 struct request *rq, *next; local
1148 blk_mq_complete_need_ipi(struct request *rq) argument
1175 blk_mq_complete_send_ipi(struct request *rq) argument
1184 blk_mq_raise_softirq(struct request *rq) argument
1195 blk_mq_complete_request_remote(struct request *rq) argument
1229 blk_mq_complete_request(struct request *rq) argument
1244 blk_mq_start_request(struct request *rq) argument
1285 blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) argument
1324 blk_execute_rq_nowait(struct request *rq, bool at_head) argument
1353 blk_end_sync_rq(struct request *rq, blk_status_t ret) argument
1362 blk_rq_is_poll(struct request *rq) argument
1372 blk_rq_poll_completion(struct request *rq, struct completion *wait) argument
1390 blk_execute_rq(struct request *rq, bool at_head) argument
1416 __blk_mq_requeue_request(struct request *rq) argument
1431 blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) argument
1456 struct request *rq; local
1503 blk_is_flush_data_rq(struct request *rq) argument
1508 blk_mq_rq_inflight(struct request *rq, void *priv) argument
1562 blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) argument
1582 blk_mq_put_rq_ref(struct request *rq) argument
1592 blk_mq_check_expired(struct request *rq, void *priv) argument
1610 blk_mq_handle_expired(struct request *rq, void *priv) argument
1714 struct request *rq; member in struct:dispatch_rq_data
1752 __blk_mq_alloc_driver_tag(struct request *rq) argument
1804 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) argument
1917 blk_mq_handle_dev_resource(struct request *rq, struct list_head *list) argument
1924 blk_mq_handle_zone_resource(struct request *rq, struct list_head *zone_list) argument
1943 blk_mq_prep_dispatch_rq(struct request *rq, bool need_budget) argument
1984 struct request *rq; local
2019 struct request *rq; local
2450 blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) argument
2466 struct request *rq; local
2499 blk_mq_insert_request(struct request *rq, blk_insert_t flags) argument
2561 blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) argument
2580 __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) argument
2613 blk_mq_get_budget_and_tag(struct request *rq) argument
2638 blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) argument
2669 blk_mq_request_issue_directly(struct request *rq, bool last) argument
2686 struct request *rq; local
2741 struct request *rq = rq_list_pop(&plug->mq_list); local
2778 struct request *rq; local
2828 struct request *rq = list_first_entry(list, struct request, local
2876 struct request *rq; local
2902 struct request *rq; local
2917 blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) argument
2955 struct request *rq; local
3037 blk_insert_cloned_request(struct request *rq) argument
3103 blk_rq_unprep_clone(struct request *rq) argument
3132 blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) argument
3190 blk_steal_bios(struct bio_list *list, struct request *rq) argument
3232 struct request *rq = drv_tags->rqs[i]; local
3270 struct request *rq = tags->static_rqs[i]; local
3367 blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) argument
3440 struct request *rq = p; local
3464 blk_mq_has_request(struct request *rq, void *data) argument
4832 blk_rq_poll(struct request *rq, struct io_comp_batch *iob, unsigned int poll_flags) argument
4850 blk_mq_rq_cpu(struct request *rq) argument
[all...]
/linux-master/drivers/scsi/esas2r/
H A Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
160 struct esas2r_request *rq = &a->general_req; local
313 struct esas2r_request *rq = &a->general_req; local
386 esas2r_disc_continue(struct esas2r_adapter *a, struct esas2r_request *rq) argument
459 esas2r_disc_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
488 esas2r_disc_local_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
502 esas2r_disc_abort(struct esas2r_adapter *a, struct esas2r_request *rq) argument
517 esas2r_disc_block_dev_scan(struct esas2r_adapter *a, struct esas2r_request *rq) argument
548 esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
577 esas2r_disc_raid_grp_info(struct esas2r_adapter *a, struct esas2r_request *rq) argument
624 esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
687 esas2r_disc_part_info(struct esas2r_adapter *a, struct esas2r_request *rq) argument
739 esas2r_disc_part_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
788 esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, struct esas2r_request *rq) argument
825 esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
881 esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, struct esas2r_request *rq) argument
939 esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1044 esas2r_disc_dev_remove(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1083 esas2r_disc_dev_add(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1163 struct esas2r_request *rq; local
[all...]
H A Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq,
93 clear_vda_request(rq);
95 rq->vrq->scsi.function = vi->function;
96 rq->interrupt_cb = esas2r_complete_vda_ioctl;
97 rq->interrupt_cx = vi;
112 rq->vrq->flash.length = cpu_to_le32(datalen);
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
115 memcpy(rq
65 esas2r_process_vda_ioctl(struct esas2r_adapter *a, struct atto_ioctl_vda *vi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
269 esas2r_complete_vda_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) argument
346 esas2r_build_flash_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u8 cksum, u32 addr, u32 length) argument
372 esas2r_build_mgt_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u8 scan_gen, u16 dev_index, u32 length, void *data) argument
420 esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
448 esas2r_build_cli_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, u32 cmd_rsp_len) argument
465 esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, u8 sub_func) argument
482 esas2r_build_cfg_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u32 length, void *data) argument
504 clear_vda_request(struct esas2r_request *rq) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gt/uc/
H A Dselftest_guc_hangcheck.c17 struct i915_request *rq; local
19 rq = intel_engine_create_kernel_request(engine);
20 if (IS_ERR(rq))
21 return rq;
23 i915_request_get(rq);
24 i915_request_add(rq);
26 return rq;
36 struct i915_request *rq; local
77 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
79 if (IS_ERR(rq)) {
[all...]
H A Dintel_gsc_uc_heci_cmd_submit.c20 static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt) argument
24 cs = intel_ring_begin(rq, 8);
37 intel_ring_advance(rq, cs);
47 struct i915_request *rq; local
59 rq = i915_request_create(ce);
60 if (IS_ERR(rq))
61 return PTR_ERR(rq);
64 err = ce->engine->emit_init_breadcrumb(rq);
69 err = emit_gsc_heci_pkt(rq, &pkt);
74 err = ce->engine->emit_flush(rq,
143 struct i915_request *rq; local
[all...]
H A Dselftest_guc.c11 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) argument
15 i915_request_get(rq);
16 i915_request_add(rq);
17 if (spin && !igt_wait_for_spinner(spin, rq))
26 struct i915_request *rq; local
29 rq = intel_context_create_request(ce);
30 if (IS_ERR(rq))
31 return rq;
34 ret = i915_sw_fence_await_dma_fence(&rq->submit,
38 i915_request_put(rq);
54 struct i915_request *last[3] = {NULL, NULL, NULL}, *rq; local
153 struct i915_request *spin_rq = NULL, *rq, *last = NULL; local
315 struct i915_request *rq; local
[all...]
/linux-master/fs/erofs/
H A Ddecompressor_deflate.c97 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, argument
101 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
103 PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
104 struct super_block *sb = rq->sb;
112 kin = kmap_local_page(*rq->in);
113 err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
114 min_t(unsigned int, rq->inputsize,
115 sb->s_blocksize - rq
[all...]
H A Ddecompressor_lzma.c150 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, argument
154 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
156 PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
164 kin = kmap(*rq->in);
165 err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
166 min_t(unsigned int, rq->inputsize,
167 rq->sb->s_blocksize - rq
[all...]
/linux-master/drivers/platform/chrome/wilco_ec/
H A Dmailbox.c89 * @rq: EC request structure to fill.
92 struct wilco_ec_request *rq)
94 memset(rq, 0, sizeof(*rq));
95 rq->struct_version = EC_MAILBOX_PROTO_VERSION;
96 rq->mailbox_id = msg->type;
97 rq->mailbox_version = EC_MAILBOX_VERSION;
98 rq->data_size = msg->request_size;
101 rq->checksum = wilco_ec_checksum(rq, sizeo
91 wilco_ec_prepare(struct wilco_ec_message *msg, struct wilco_ec_request *rq) argument
115 wilco_ec_transfer(struct wilco_ec_device *ec, struct wilco_ec_message *msg, struct wilco_ec_request *rq) argument
198 struct wilco_ec_request *rq; local
[all...]
/linux-master/drivers/scsi/
H A Dscsi_bsg.c16 struct request *rq; local
28 rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
30 if (IS_ERR(rq))
31 return PTR_ERR(rq);
32 rq->timeout = timeout;
34 scmd = blk_mq_rq_to_pdu(rq);
50 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
53 ret = blk_rq_map_user(rq->q, rq, NUL
[all...]

Completed in 380 milliseconds

1234567891011>>