Searched refs:rq (Results 1 - 25 of 634) sorted by relevance

1234567891011>>

/linux-master/kernel/sched/
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
21 return sched_stop_runnable(rq);
26 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) argument
31 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) argument
33 stop->se.exec_start = rq_clock_task(rq);
36 static struct task_struct *pick_task_stop(struct rq *rq) argument
44 pick_next_task_stop(struct rq *rq) argument
55 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) argument
61 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) argument
66 yield_task_stop(struct rq *rq) argument
71 put_prev_task_stop(struct rq *rq, struct task_struct *prev) argument
84 task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) argument
88 switched_to_stop(struct rq *rq, struct task_struct *p) argument
94 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) argument
99 update_curr_stop(struct rq *rq) argument
[all...]
H A Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) argument
15 return READ_ONCE(rq->avg_thermal.load_avg);
19 update_thermal_load_avg(u64 now, struct rq *r argument
24 thermal_load_avg(struct rq *rq) argument
34 update_irq_load_avg(struct rq *rq, u64 running) argument
64 rq_clock_pelt(struct rq *rq) argument
73 _update_idle_rq_clock_pelt(struct rq *rq) argument
95 update_rq_clock_pelt(struct rq *rq, s64 delta) argument
133 update_idle_rq_clock_pelt(struct rq *rq) argument
193 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) argument
199 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) argument
205 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) argument
210 thermal_load_avg(struct rq *rq) argument
216 update_irq_load_avg(struct rq *rq, u64 running) argument
221 rq_clock_pelt(struct rq *rq) argument
227 update_rq_clock_pelt(struct rq *rq, s64 delta) argument
230 update_idle_rq_clock_pelt(struct rq *rq) argument
[all...]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) argument
15 if (rq) {
16 rq->rq_sched_info.run_delay += delta;
17 rq->rq_sched_info.pcount++;
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) argument
27 if (rq)
28 rq->rq_cpu_time += delta;
32 rq_sched_info_dequeue(struct rq *r argument
72 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) argument
73 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) argument
74 rq_sched_info_depart(struct rq *rq, unsigned long long delta) argument
170 struct rq *rq; local
205 sched_info_dequeue(struct rq *rq, struct task_struct *t) argument
224 sched_info_arrive(struct rq *rq, struct task_struct *t) argument
246 sched_info_enqueue(struct rq *rq, struct task_struct *t) argument
260 sched_info_depart(struct rq *rq, struct task_struct *t) argument
276 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) argument
[all...]
H A Dsched.h93 struct rq;
105 extern void calc_global_load_tick(struct rq *this_rq);
106 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
108 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
270 /* nests inside the rq lock: */
322 * dl_se::rq -- runqueue we belong to.
341 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
642 struct rq *r member in struct:cfs_rq
719 struct rq *rq; member in struct:rt_rq
985 struct rq { struct
1202 cpu_of(struct rq *rq) argument
1236 sched_core_enabled(struct rq *rq) argument
1250 rq_lockp(struct rq *rq) argument
1258 __rq_lockp(struct rq *rq) argument
1276 sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) argument
1285 sched_core_cookie_match(struct rq *rq, struct task_struct *p) argument
1308 sched_group_cookie_match(struct rq *rq, struct task_struct *p, struct sched_group *group) argument
1338 sched_core_enabled(struct rq *rq) argument
1348 rq_lockp(struct rq *rq) argument
1353 __rq_lockp(struct rq *rq) argument
1358 sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) argument
1363 sched_core_cookie_match(struct rq *rq, struct task_struct *p) argument
1368 sched_group_cookie_match(struct rq *rq, struct task_struct *p, struct sched_group *group) argument
1376 lockdep_assert_rq_held(struct rq *rq) argument
1385 raw_spin_rq_lock(struct rq *rq) argument
1390 raw_spin_rq_lock_irq(struct rq *rq) argument
1396 raw_spin_rq_unlock_irq(struct rq *rq) argument
1402 _raw_spin_rq_lock_irqsave(struct rq *rq) argument
1410 raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) argument
1424 update_idle_core(struct rq *rq) argument
1431 update_idle_core(struct rq *rq) argument
1470 struct rq *rq = task_rq(p); local
1511 assert_clock_updated(struct rq *rq) argument
1520 rq_clock(struct rq *rq) argument
1528 rq_clock_task(struct rq *rq) argument
1549 rq_clock_thermal(struct rq *rq) argument
1554 rq_clock_skip_update(struct rq *rq) argument
1564 rq_clock_cancel_skipupdate(struct rq *rq) argument
1579 rq_clock_start_loop_update(struct rq *rq) argument
1586 rq_clock_stop_loop_update(struct rq *rq) argument
1617 rq_pin_lock(struct rq *rq, struct rq_flags *rf) argument
1630 rq_unpin_lock(struct rq *rq, struct rq_flags *rf) argument
1640 rq_repin_lock(struct rq *rq, struct rq_flags *rf) argument
1663 raw_spin_rq_unlock(rq); variable
1672 raw_spin_rq_unlock(rq); variable
1693 raw_spin_rq_lock_irq(rq); variable
1701 raw_spin_rq_lock(rq); variable
1718 raw_spin_rq_unlock_irq(rq); variable
1726 raw_spin_rq_unlock(rq); variable
1748 struct rq *rq; variable in typeref:struct:rq
1804 queue_balance_callback(struct rq *rq, struct balance_callback *head, void (*func)(struct rq *rq)) argument
1986 sched_core_account_forceidle(struct rq *rq) argument
1994 sched_core_tick(struct rq *rq) argument
2002 sched_core_account_forceidle(struct rq *rq) argument
2004 sched_core_tick(struct rq *rq) argument
2153 task_current(struct rq *rq, struct task_struct *p) argument
2158 task_on_cpu(struct rq *rq, struct task_struct *p) argument
2325 put_prev_task(struct rq *rq, struct task_struct *prev) argument
2331 set_next_task(struct rq *rq, struct task_struct *next) argument
2370 sched_stop_runnable(struct rq *rq) argument
2375 sched_dl_runnable(struct rq *rq) argument
2380 sched_rt_runnable(struct rq *rq) argument
2385 sched_fair_runnable(struct rq *rq) argument
2406 get_push_task(struct rq *rq) argument
2430 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) argument
2436 idle_get_state(struct rq *rq) argument
2443 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) argument
2448 idle_get_state(struct rq *rq) argument
2495 sched_update_tick_dependency(struct rq *rq) argument
2509 sched_update_tick_dependency(struct rq *rq) argument
2512 add_nr_running(struct rq *rq, unsigned count) argument
2531 sub_nr_running(struct rq *rq, unsigned count) argument
2578 hrtick_enabled(struct rq *rq) argument
2585 hrtick_enabled_fair(struct rq *rq) argument
2592 hrtick_enabled_dl(struct rq *rq) argument
2603 hrtick_enabled_fair(struct rq *rq) argument
2608 hrtick_enabled_dl(struct rq *rq) argument
2613 hrtick_enabled(struct rq *rq) argument
2924 nohz_balance_exit_idle(struct rq *rq) argument
2988 cpufreq_update_util(struct rq *rq, unsigned int flags) argument
2998 cpufreq_update_util(struct rq *rq, unsigned int flags) argument
3034 cpu_bw_dl(struct rq *rq) argument
3039 cpu_util_dl(struct rq *rq) argument
3048 cpu_util_rt(struct rq *rq) argument
3057 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) argument
3063 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) argument
3069 uclamp_rq_is_idle(struct rq *rq) argument
3075 uclamp_rq_is_capped(struct rq *rq) argument
3111 uclamp_rq_is_capped(struct rq *rq) argument
3118 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) argument
3127 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) argument
3132 uclamp_rq_is_idle(struct rq *rq) argument
3139 cpu_util_irq(struct rq *rq) argument
3154 cpu_util_irq(struct rq *rq) argument
3193 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) argument
3209 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) argument
3340 mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) argument
3348 __mm_cid_get(struct rq *rq, struct mm_struct *mm) argument
3400 mm_cid_get(struct rq *rq, struct mm_struct *mm) argument
3422 switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) argument
3474 switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) argument
3477 task_tick_mm_cid(struct rq *rq, struct task_struct *curr) argument
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_ksyms_btf.c19 extern const struct rq runqueues __ksym; /* struct type global var. */
25 struct rq *rq; local
35 rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
36 if (rq)
37 out__rq_cpu = rq->cpu;
42 rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
43 if (rq) /* shoul
[all...]
H A Dtest_ksyms_btf_null_check.c8 extern const struct rq runqueues __ksym; /* struct type global var. */
14 struct rq *rq; local
19 rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
24 /* !rq has not been tested, so verifier should reject. */
25 *(volatile int *)(&rq->cpu);
/linux-master/drivers/scsi/
H A Dscsi_debugfs.h5 void scsi_show_rq(struct seq_file *m, struct request *rq);
/linux-master/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) argument
21 unsigned int i, j, count = rq->ring.desc_count;
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
26 if (!rq->bufs[i])
31 buf = rq->bufs[i];
34 buf->desc = (u8 *)rq->ring.descs +
35 rq->ring.desc_size * buf->index;
37 buf->next = rq->bufs[0];
40 buf->next = rq->bufs[i + 1];
48 rq
53 vnic_rq_free(struct vnic_rq *rq) argument
72 vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
101 vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
125 vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
133 vnic_rq_error_status(struct vnic_rq *rq) argument
138 vnic_rq_enable(struct vnic_rq *rq) argument
143 vnic_rq_disable(struct vnic_rq *rq) argument
171 vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) argument
[all...]
H A Dvnic_rq.h84 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) argument
87 return rq->ring.desc_avail;
90 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) argument
93 return rq->ring.desc_count - rq->ring.desc_avail - 1;
96 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) argument
98 return rq->to_use->desc;
101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) argument
103 return rq->to_use->index;
106 static inline void vnic_rq_post(struct vnic_rq *rq, argument
141 vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) argument
151 vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc, u16 completed_index, int desc_return, void (*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque), void *opaque) argument
179 vnic_rq_fill(struct vnic_rq *rq, int (*buf_fill)(struct vnic_rq *rq)) argument
[all...]
/linux-master/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) argument
18 unsigned int i, j, count = rq->ring.desc_count;
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
23 if (!rq->bufs[i]) {
30 buf = rq->bufs[i];
33 buf->desc = (u8 *)rq->ring.descs +
34 rq->ring.desc_size * buf->index;
36 buf->next = rq->bufs[0];
39 buf->next = rq->bufs[i + 1];
47 rq
53 vnic_rq_free(struct vnic_rq *rq) argument
70 vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
99 vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
125 vnic_rq_error_status(struct vnic_rq *rq) argument
130 vnic_rq_enable(struct vnic_rq *rq) argument
135 vnic_rq_disable(struct vnic_rq *rq) argument
153 vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) argument
[all...]
H A Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) argument
96 return rq->ring.desc_avail;
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1;
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) argument
107 return rq->to_use->desc;
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) argument
112 return rq->to_use->index;
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) argument
120 vnic_rq_post(struct vnic_rq *rq, void *os_buf, unsigned int os_buf_index, dma_addr_t dma_addr, unsigned int len) argument
153 vnic_rq_posting_soon(struct vnic_rq *rq) argument
158 vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) argument
168 vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc, u16 completed_index, int desc_return, void (*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque), void *opaque) argument
196 vnic_rq_fill(struct vnic_rq *rq, int (*buf_fill)(struct vnic_rq *rq)) argument
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \
65 const struct i915_request *rq__ = (rq); \
276 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
378 void __i915_request_skip(struct i915_request *rq);
379 bool i915_request_set_error_once(struct i915_request *rq, int error);
380 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
383 void __i915_request_queue(struct i915_request *rq,
385 void __i915_request_queue_bh(struct i915_request *rq);
387 bool i915_request_retire(struct i915_request *rq);
388 void i915_request_retire_upto(struct i915_request *rq);
400 i915_request_get(struct i915_request *rq) argument
406 i915_request_get_rcu(struct i915_request *rq) argument
412 i915_request_put(struct i915_request *rq) argument
454 i915_request_signaled(const struct i915_request *rq) argument
460 i915_request_is_active(const struct i915_request *rq) argument
465 i915_request_in_priority_queue(const struct i915_request *rq) argument
471 i915_request_has_initial_breadcrumb(const struct i915_request *rq) argument
484 __hwsp_seqno(const struct i915_request *rq) argument
504 hwsp_seqno(const struct i915_request *rq) argument
515 __i915_request_has_started(const struct i915_request *rq) argument
546 i915_request_started(const struct i915_request *rq) argument
571 i915_request_is_running(const struct i915_request *rq) argument
601 i915_request_is_ready(const struct i915_request *rq) argument
606 __i915_request_is_complete(const struct i915_request *rq) argument
611 i915_request_completed(const struct i915_request *rq) argument
627 i915_request_mark_complete(struct i915_request *rq) argument
633 i915_request_has_waitboost(const struct i915_request *rq) argument
638 i915_request_has_nopreempt(const struct i915_request *rq) argument
644 i915_request_has_sentinel(const struct i915_request *rq) argument
649 i915_request_on_hold(const struct i915_request *rq) argument
654 i915_request_set_hold(struct i915_request *rq) argument
659 i915_request_clear_hold(struct i915_request *rq) argument
665 i915_request_timeline(const struct i915_request *rq) argument
674 i915_request_gem_context(const struct i915_request *rq) argument
681 i915_request_active_timeline(const struct i915_request *rq) argument
693 i915_request_active_seqno(const struct i915_request *rq) argument
[all...]
H A Di915_request.c114 struct i915_request *rq = to_request(fence); local
116 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
117 rq->guc_prio != GUC_PRIO_FINI);
119 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
120 if (rq->batch_res) {
121 i915_vma_resource_put(rq->batch_res);
122 rq->batch_res = NULL;
132 i915_sw_fence_fini(&rq->submit);
133 i915_sw_fence_fini(&rq->semaphore);
139 * very careful in what rq
191 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) argument
204 __notify_execute_cb_irq(struct i915_request *rq) argument
215 i915_request_notify_execute_cb_imm(struct i915_request *rq) argument
220 __i915_request_fill(struct i915_request *rq, u8 val) argument
244 i915_request_active_engine(struct i915_request *rq, struct intel_engine_cs **active) argument
276 __rq_init_watchdog(struct i915_request *rq) argument
283 struct i915_request *rq = local
297 __rq_arm_watchdog(struct i915_request *rq) argument
316 __rq_cancel_watchdog(struct i915_request *rq) argument
356 i915_request_retire(struct i915_request *rq) argument
418 i915_request_retire_upto(struct i915_request *rq) argument
440 struct i915_request * const *port, *rq; local
501 __await_execution(struct i915_request *rq, struct i915_request *signal, gfp_t gfp) argument
552 __i915_request_skip(struct i915_request *rq) argument
570 i915_request_set_error_once(struct i915_request *rq, int error) argument
588 i915_request_mark_eio(struct i915_request *rq) argument
761 i915_request_cancel(struct i915_request *rq, int error) argument
810 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); local
826 struct i915_request *rq, *rn; local
838 struct i915_request *rq; local
874 struct i915_request *rq = arg; local
897 struct i915_request *rq; local
1030 struct i915_request *rq; local
1059 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) argument
1125 already_busywaiting(struct i915_request *rq) argument
1334 mark_external(struct i915_request *rq) argument
1348 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) argument
1358 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) argument
1383 is_parallel_rq(struct i915_request *rq) argument
1388 request_to_parent(struct i915_request *rq) argument
1403 i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence) argument
1500 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) argument
1568 i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps) argument
1620 i915_request_await_huc(struct i915_request *rq) argument
1635 __i915_request_ensure_parallel_ordering(struct i915_request *rq, struct intel_timeline *timeline) argument
1670 __i915_request_ensure_ordering(struct i915_request *rq, struct intel_timeline *timeline) argument
1719 __i915_request_add_to_timeline(struct i915_request *rq) argument
1786 __i915_request_commit(struct i915_request *rq) argument
1816 __i915_request_queue_bh(struct i915_request *rq) argument
1822 __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr) argument
1844 i915_request_add(struct i915_request *rq) argument
1901 __i915_spin_request(struct i915_request * const rq, int state) argument
1980 i915_request_wait_timeout(struct i915_request *rq, unsigned int flags, long timeout) argument
2119 i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) argument
2146 queue_status(const struct i915_request *rq) argument
2157 run_status(const struct i915_request *rq) argument
2171 fence_status(const struct i915_request *rq) argument
2182 i915_request_show(struct drm_printer *m, const struct i915_request *rq, const char *prefix, int indent) argument
2234 engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq) argument
2241 match_ring(struct i915_request *rq) argument
2261 i915_test_request_state(struct i915_request *rq) argument
[all...]
/linux-master/include/linux/sched/
H A Dnohz.h18 void calc_load_nohz_remote(struct rq *rq);
22 static inline void calc_load_nohz_remote(struct rq *rq) { } argument
/linux-master/drivers/gpu/drm/i915/gt/
H A Dgen6_engine_cs.h16 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
17 int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
18 int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
19 u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
20 u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
22 int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
23 u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
24 u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
26 int gen6_emit_bb_start(struct i915_request *rq,
29 int hsw_emit_bb_start(struct i915_request *rq,
[all...]
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) argument
58 intel_gt_scratch_offset(rq->engine->gt,
62 cs = intel_ring_begin(rq, 6);
72 intel_ring_advance(rq, cs);
74 cs = intel_ring_begin(rq, 6);
84 intel_ring_advance(rq, cs);
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) argument
92 intel_gt_scratch_offset(rq->engine->gt,
98 ret = gen6_emit_post_sync_nonzero_flush(rq);
130 cs = intel_ring_begin(rq,
143 gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) argument
178 mi_flush_dw(struct i915_request *rq, u32 flags) argument
214 gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) argument
219 gen6_emit_flush_xcs(struct i915_request *rq, u32 mode) argument
224 gen6_emit_flush_vcs(struct i915_request *rq, u32 mode) argument
229 gen6_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) argument
251 hsw_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) argument
272 gen7_stall_cs(struct i915_request *rq) argument
289 gen7_emit_flush_rcs(struct i915_request *rq, u32 mode) argument
353 gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) argument
375 gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) argument
393 gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) argument
[all...]
H A Dgen8_engine_cs.c13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) argument
42 if (GRAPHICS_VER(rq->i915) == 9)
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
58 cs = intel_ring_begin(rq, len);
74 intel_ring_advance(rq, cs);
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) argument
83 cs = intel_ring_begin(rq, 4);
99 if (rq->engine->class == VIDEO_DECODE_CLASS)
107 intel_ring_advance(rq, c
112 gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) argument
226 mtl_dummy_pipe_control(struct i915_request *rq) argument
247 gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) argument
365 gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) argument
425 hwsp_offset(const struct i915_request *rq) argument
437 gen8_emit_init_breadcrumb(struct i915_request *rq) argument
484 __xehp_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags, u32 arb) argument
525 xehp_emit_bb_start_noarb(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) argument
532 xehp_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) argument
539 gen8_emit_bb_start_noarb(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) argument
575 gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) argument
603 assert_request_valid(struct i915_request *rq) argument
616 gen8_emit_wa_tail(struct i915_request *rq, u32 *cs) argument
629 emit_preempt_busywait(struct i915_request *rq, u32 *cs) argument
645 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) argument
660 emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) argument
665 gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) argument
670 gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) argument
690 gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) argument
730 gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs) argument
747 ccs_semaphore_offset(struct i915_request *rq) argument
754 ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs) argument
783 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) argument
802 gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) argument
809 gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) argument
[all...]
H A Dgen2_engine_cs.h14 int gen2_emit_flush(struct i915_request *rq, u32 mode);
15 int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
16 int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
18 u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs);
19 u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
21 int i830_emit_bb_start(struct i915_request *rq,
24 int gen3_emit_bb_start(struct i915_request *rq,
27 int gen4_emit_bb_start(struct i915_request *rq,
H A Dintel_context_sseu.c16 static int gen8_emit_rpcs_config(struct i915_request *rq, argument
23 cs = intel_ring_begin(rq, 4);
33 *cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
35 intel_ring_advance(rq, cs);
43 struct i915_request *rq; local
57 rq = intel_engine_create_kernel_request(ce->engine);
58 if (IS_ERR(rq)) {
59 ret = PTR_ERR(rq);
64 ret = intel_context_prepare_remote_request(ce, rq);
66 ret = gen8_emit_rpcs_config(rq, c
[all...]
/linux-master/block/
H A Dblk-pm.h19 static inline void blk_pm_mark_last_busy(struct request *rq) argument
21 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
22 pm_runtime_mark_last_busy(rq->q->dev);
30 static inline void blk_pm_mark_last_busy(struct request *rq) argument
H A Dblk-crypto-internal.h31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
57 static inline void blk_crypto_rq_set_defaults(struct request *rq) argument
59 rq->crypt_ctx = NULL;
60 rq->crypt_keyslot = NULL;
63 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) argument
65 return rq->crypt_ctx;
68 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) argument
70 return rq->crypt_keyslot;
96 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, argument
120 static inline void blk_crypto_rq_set_defaults(struct request *rq) { } argument
122 blk_crypto_rq_is_encrypted(struct request *rq) argument
127 blk_crypto_rq_has_keyslot(struct request *rq) argument
148 bio_crypt_do_front_merge(struct request *rq, struct bio *bio) argument
167 blk_crypto_rq_get_keyslot(struct request *rq) argument
175 blk_crypto_rq_put_keyslot(struct request *rq) argument
182 blk_crypto_free_request(struct request *rq) argument
200 blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) argument
[all...]
H A Dblk-flush.c103 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) argument
107 if (blk_rq_sectors(rq))
111 if (rq->cmd_flags & REQ_PREFLUSH)
114 (rq->cmd_flags & REQ_FUA))
120 static unsigned int blk_flush_cur_seq(struct request *rq) argument
122 return 1 << ffz(rq->flush.seq);
125 static void blk_flush_restore_request(struct request *rq) argument
128 * After flush data completion, @rq->bio is %NULL but we need to
129 * complete the bio again. @rq->biotail is guaranteed to equal the
130 * original @rq
139 blk_account_io_flush(struct request *rq) argument
163 blk_flush_complete_seq(struct request *rq, struct blk_flush_queue *fq, unsigned int seq, blk_status_t error) argument
221 struct request *rq, *n; local
271 is_flush_rq(struct request *rq) argument
351 mq_flush_data_end_io(struct request *rq, blk_status_t error) argument
383 blk_rq_init_flush(struct request *rq) argument
396 blk_insert_flush(struct request *rq) argument
[all...]
/linux-master/include/linux/
H A Dblk-mq.h118 * rq sectors used for blk stats. It has the same value
119 * with blk_rq_sectors(rq), except that it never be zeroed
198 static inline bool blk_rq_is_passthrough(struct request *rq) argument
200 return blk_op_is_passthrough(rq->cmd_flags);
208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
210 #define rq_dma_dir(rq) \
211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
213 #define rq_list_add(listptr, rq) do { \
214 (rq)
259 rq_list_move(struct request **src, struct request **dst, struct request *rq, struct request *prev) argument
524 struct request *rq; member in struct:blk_mq_queue_data
785 blk_mq_rq_state(struct request *rq) argument
790 blk_mq_request_started(struct request *rq) argument
795 blk_mq_request_completed(struct request *rq) argument
807 blk_mq_set_request_complete(struct request *rq) argument
816 blk_mq_complete_request_direct(struct request *rq, void (*complete)(struct request *rq)) argument
832 blk_mq_need_time_stamp(struct request *rq) argument
843 blk_mq_is_reserved_rq(struct request *rq) argument
942 blk_mq_rq_to_pdu(struct request *rq) argument
954 blk_mq_cleanup_rq(struct request *rq) argument
960 blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs) argument
972 rq_is_sync(struct request *rq) argument
1036 blk_rq_pos(const struct request *rq) argument
1041 blk_rq_bytes(const struct request *rq) argument
1046 blk_rq_cur_bytes(const struct request *rq) argument
1055 blk_rq_sectors(const struct request *rq) argument
1060 blk_rq_cur_sectors(const struct request *rq) argument
1065 blk_rq_stats_sectors(const struct request *rq) argument
1076 blk_rq_payload_bytes(struct request *rq) argument
1087 req_bvec(struct request *rq) argument
1094 blk_rq_count_bios(struct request *rq) argument
1126 blk_rq_nr_phys_segments(struct request *rq) argument
1137 blk_rq_nr_discard_segments(struct request *rq) argument
1144 blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist) argument
1154 blk_rq_zone_no(struct request *rq) argument
1159 blk_rq_zone_is_seq(struct request *rq) argument
1170 blk_rq_is_seq_zoned_write(struct request *rq) argument
1181 blk_req_zone_write_lock(struct request *rq) argument
1187 blk_req_zone_write_unlock(struct request *rq) argument
1193 blk_req_zone_is_write_locked(struct request *rq) argument
1199 blk_req_can_dispatch_to_zone(struct request *rq) argument
1206 blk_rq_is_seq_zoned_write(struct request *rq) argument
1211 blk_req_needs_zone_write_lock(struct request *rq) argument
1216 blk_req_zone_write_lock(struct request *rq) argument
1220 blk_req_zone_write_unlock(struct request *rq) argument
1223 blk_req_zone_is_write_locked(struct request *rq) argument
1228 blk_req_can_dispatch_to_zone(struct request *rq) argument
[all...]
/linux-master/fs/erofs/
H A Ddecompressor.c19 struct z_erofs_decompress_req *rq; member in struct:z_erofs_lz4_decompress_ctx
67 struct z_erofs_decompress_req *rq = ctx->rq; local
72 EROFS_SB(rq->sb)->lz4.max_distance_pages;
78 struct page *const page = rq->out[i];
85 if (!rq->fillgaps && test_bit(j, bounced)) {
88 availables[top++] = rq->out[i - lz4_max_distance_pages];
114 victim = erofs_allocpage(pagepool, rq->gfp);
119 rq->out[i] = victim;
128 struct z_erofs_decompress_req *rq local
194 z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize) argument
210 struct z_erofs_decompress_req *rq = ctx->rq; local
271 z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, struct page **pagepool) argument
315 z_erofs_transform_plain(struct z_erofs_decompress_req *rq, struct page **pagepool) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.h11 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
12 int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
13 int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
14 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
20 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,

Completed in 281 milliseconds

1234567891011>>