/linux-master/include/linux/ |
H A D | blk-mq.h | 118 * rq sectors used for blk stats. It has the same value 119 * with blk_rq_sectors(rq), except that it never be zeroed 198 static inline bool blk_rq_is_passthrough(struct request *rq) argument 200 return blk_op_is_passthrough(rq->cmd_flags); 208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 210 #define rq_dma_dir(rq) \ 211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 213 #define rq_list_add(listptr, rq) do { \ 214 (rq) 259 rq_list_move(struct request **src, struct request **dst, struct request *rq, struct request *prev) argument 524 struct request *rq; member in struct:blk_mq_queue_data 785 blk_mq_rq_state(struct request *rq) argument 790 blk_mq_request_started(struct request *rq) argument 795 blk_mq_request_completed(struct request *rq) argument 807 blk_mq_set_request_complete(struct request *rq) argument 816 blk_mq_complete_request_direct(struct request *rq, void (*complete)(struct request *rq)) argument 832 blk_mq_need_time_stamp(struct request *rq) argument 843 blk_mq_is_reserved_rq(struct request *rq) argument 942 blk_mq_rq_to_pdu(struct request *rq) argument 954 blk_mq_cleanup_rq(struct request *rq) argument 960 blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs) argument 972 rq_is_sync(struct request *rq) argument 1036 blk_rq_pos(const struct request *rq) argument 1041 blk_rq_bytes(const struct request *rq) argument 1046 blk_rq_cur_bytes(const struct request *rq) argument 1055 blk_rq_sectors(const struct request *rq) argument 1060 blk_rq_cur_sectors(const struct request *rq) argument 1065 blk_rq_stats_sectors(const struct request *rq) argument 1076 blk_rq_payload_bytes(struct request *rq) argument 1087 req_bvec(struct request *rq) argument 1094 blk_rq_count_bios(struct request *rq) argument 1126 blk_rq_nr_phys_segments(struct request *rq) argument 1137 blk_rq_nr_discard_segments(struct request *rq) argument 1144 blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist) argument [all...] |
H A D | t10-pi.h | 40 static inline u32 t10_pi_ref_tag(struct request *rq) argument 42 unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 45 if (rq->q->integrity.interval_exp) 46 shift = rq->q->integrity.interval_exp; 48 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; 71 static inline u64 ext_pi_ref_tag(struct request *rq) argument 73 unsigned int shift = ilog2(queue_logical_block_size(rq->q)); 76 if (rq->q->integrity.interval_exp) 77 shift = rq->q->integrity.interval_exp; 79 return lower_48_bits(blk_rq_pos(rq) >> (shif [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | gen2_engine_cs.c | 16 int gen2_emit_flush(struct i915_request *rq, u32 mode) argument 25 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); 38 intel_ring_advance(rq, cs); 43 int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) argument 79 if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5) 87 cs = intel_ring_begin(rq, i); 105 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 115 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 124 intel_ring_advance(rq, c 129 gen4_emit_flush_vcs(struct i915_request *rq, u32 mode) argument 144 __gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int flush, int post) argument 172 gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) argument 177 gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) argument 186 i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) argument 251 gen3_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) argument 271 gen4_emit_bb_start(struct i915_request *rq, u64 offset, u32 length, unsigned int dispatch_flags) argument [all...] |
H A D | intel_breadcrumbs.c | 111 check_signal_order(struct intel_context *ce, struct i915_request *rq) argument 113 if (rq->context != ce) 116 if (!list_is_last(&rq->signal_link, &ce->signals) && 117 i915_seqno_passed(rq->fence.seqno, 118 list_next_entry(rq, signal_link)->fence.seqno)) 121 if (!list_is_first(&rq->signal_link, &ce->signals) && 122 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, 123 rq->fence.seqno)) 212 struct i915_request *rq; local 214 list_for_each_entry_rcu(rq, 250 struct i915_request *rq = local 338 irq_signal_request(struct i915_request *rq, struct intel_breadcrumbs *b) argument 349 insert_breadcrumb(struct i915_request *rq) argument 411 i915_request_enable_breadcrumb(struct i915_request *rq) argument 436 i915_request_cancel_breadcrumb(struct i915_request *rq) argument 463 struct i915_request *rq, *rn; local 496 struct i915_request *rq; local [all...] |
H A D | intel_engine_heartbeat.c | 25 struct i915_request *rq; local 30 rq = engine->heartbeat.systole; 41 if (rq && rq->sched.attr.priority >= I915_PRIORITY_BARRIER && 70 struct i915_request *rq; local 73 rq = __i915_request_create(ce, gfp); 76 return rq; 79 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) argument 82 i915_request_add_active_barriers(rq); 84 engine->heartbeat.systole = i915_request_get(rq); 87 heartbeat_commit(struct i915_request *rq, const struct i915_sched_attr *attr) argument 96 show_heartbeat(const struct i915_request *rq, struct intel_engine_cs *engine) argument 117 reset_engine(struct intel_engine_cs *engine, struct i915_request *rq) argument 142 struct i915_request *rq; local 279 struct i915_request *rq; local 384 struct i915_request *rq; local [all...] |
H A D | selftest_execlists.c | 28 static bool is_active(struct i915_request *rq) argument 30 if (i915_request_is_active(rq)) 33 if (i915_request_on_hold(rq)) 36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) 43 struct i915_request *rq, 53 if (i915_request_completed(rq)) /* that was quick! */ 58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) 69 struct i915_request *rq, 81 if (i915_request_completed(rq)) 42 wait_for_submit(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) argument 68 wait_for_reset(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) argument 125 struct i915_request *rq; local 182 struct i915_request *rq[2]; local 344 struct i915_request *rq; local 493 struct i915_request *rq; local 601 struct i915_request *rq; local 720 struct i915_request *rq; local 820 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) argument 860 struct i915_request *rq; local 895 struct i915_request *rq; local 946 struct i915_request *rq; local 1056 struct i915_request *rq; local 1130 struct i915_request *rq[3] = {}; local 1259 struct i915_request *rq; local 1330 struct i915_request *rq, *nop; local 1430 struct i915_request *rq; local 1721 struct i915_request *rq; local 1759 struct i915_request *rq; local 1852 struct i915_request *rq; local 2058 struct i915_request *rq; local 2102 struct i915_request *rq[2] = {}; local 2173 struct i915_request *rq[3] = {}; local 2268 struct i915_request *rq; local 2325 struct i915_request *rq; local 2576 struct i915_request *rq; local 2706 struct i915_request *rq; local 2801 struct i915_request *rq; local 2975 struct i915_request *rq = NULL; local 3153 struct i915_request *rq; local 3214 struct i915_request *rq; local 3299 struct i915_request *rq; local 3398 struct i915_request *rq; local 3490 struct i915_request *rq; local 3754 struct i915_request *rq; local 3771 struct i915_request *rq; local 4027 struct i915_request *rq; local 4094 struct i915_request *rq; local 4226 struct i915_request *rq; local 4337 struct i915_request *rq; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | rx.c | 19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) argument 21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); 22 struct mlx5e_icosq *icosq = rq->icosq; 30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) 35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, 36 rq->mpwqe.pages_per_wqe); 44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { 45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); 50 pi = mlx5e_icosq_get_next_pi(icosq, rq 160 mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument 198 mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument 225 mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp) argument 247 mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument 304 mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument [all...] |
/linux-master/drivers/gpu/drm/i915/pxp/ |
H A D | intel_pxp_cmd.c | 83 static void pxp_request_commit(struct i915_request *rq) argument 86 struct intel_timeline * const tl = i915_request_timeline(rq); 88 lockdep_unpin_lock(&tl->mutex, rq->cookie); 90 trace_i915_request_add(rq); 91 __i915_request_commit(rq); 92 __i915_request_queue(rq, &attr); 99 struct i915_request *rq; local 107 rq = i915_request_create(ce); 108 if (IS_ERR(rq)) 109 return PTR_ERR(rq); [all...] |
/linux-master/kernel/sched/ |
H A D | deadline.c | 67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 69 return container_of(dl_rq, struct rq, dl); 72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) 74 struct rq *rq = dl_se->rq; local 77 rq = task_rq(dl_task_of(dl_se)); 79 return rq; 153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity 189 struct rq *r local 325 struct rq *rq; local 409 struct rq *rq = rq_of_dl_se(dl_se); local 547 dl_overloaded(struct rq *rq) argument 552 dl_set_overload(struct rq *rq) argument 568 dl_clear_overload(struct rq *rq) argument 585 has_pushable_dl_tasks(struct rq *rq) argument 594 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument 612 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument 635 need_pull_dl_task(struct rq *rq, struct task_struct *prev) argument 646 deadline_queue_push_tasks(struct rq *rq) argument 654 deadline_queue_pull_task(struct rq *rq) argument 661 dl_task_offline_migration(struct rq *rq, struct task_struct *p) argument 734 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument 739 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) argument 753 deadline_queue_push_tasks(struct rq *rq) argument 757 deadline_queue_pull_task(struct rq *rq) argument 768 replenish_dl_new_period(struct sched_dl_entity *dl_se, struct rq *rq) argument 791 struct rq *rq = rq_of_dl_rq(dl_rq); local 833 struct rq *rq = rq_of_dl_rq(dl_rq); local 950 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) argument 1013 struct rq *rq = rq_of_dl_se(dl_se); local 1048 struct rq *rq = rq_of_dl_rq(dl_rq); local 1090 __push_dl_task(struct rq *rq, struct rq_flags *rf) argument 1129 struct rq *rq; local 1132 struct rq *rq = rq_of_dl_se(dl_se); local 1267 struct rq *rq = rq_of_dl_se(dl_se); local 1302 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) argument 1325 update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) argument 1429 dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, dl_server_pick_f pick) argument 1442 update_curr_dl(struct rq *rq) argument 1470 struct rq *rq; local 1536 struct rq *rq = rq_of_dl_rq(dl_rq); local 1549 struct rq *rq = rq_of_dl_rq(dl_rq); local 1787 enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) argument 1844 dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) argument 1866 yield_task_dl(struct rq *rq) argument 1888 dl_task_is_earliest_deadline(struct task_struct *p, struct rq *rq) argument 1903 struct rq *rq; local 1950 struct rq *rq; local 1980 check_preempt_equal_dl(struct rq *rq, struct task_struct *p) argument 2001 balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) argument 2023 wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags) argument 2043 start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) argument 2048 start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) argument 2053 set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) argument 2084 pick_task_dl(struct rq *rq) argument 2113 pick_next_task_dl(struct rq *rq) argument 2130 put_prev_task_dl(struct rq *rq, struct task_struct *p) argument 2140 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); local 2153 task_tick_dl(struct rq *rq, struct task_struct *p, int queued) argument 2157 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); local 2181 pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) argument 2193 pick_earliest_pushable_dl_task(struct rq *rq, int cpu) argument 2307 find_lock_later_rq(struct task_struct *task, struct rq *rq) argument 2361 pick_next_pushable_dl_task(struct rq *rq) argument 2385 push_dl_task(struct rq *rq) argument 2460 push_dl_tasks(struct rq *rq) argument 2563 task_woken_dl(struct rq *rq, struct task_struct *p) argument 2579 struct rq *rq; local 2609 rq_online_dl(struct rq *rq) argument 2620 rq_offline_dl(struct rq *rq) argument 2641 struct rq *rq; local 2673 switched_from_dl(struct rq *rq, struct task_struct *p) argument 2727 switched_to_dl(struct rq *rq, struct task_struct *p) argument 2755 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); local 2763 prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) argument [all...] |
H A D | core.c | 119 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 240 void sched_core_enqueue(struct rq *rq, struct task_struct *p) argument 242 rq->core->core_task_seq++; 247 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 250 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) argument 252 rq->core->core_task_seq++; 255 rb_erase(&p->core_node, &rq->core_tree); 264 if (!(flags & DEQUEUE_SAVE) && rq 300 sched_core_find(struct rq *rq, unsigned long cookie) argument 454 sched_core_enqueue(struct rq *rq, struct task_struct *p) argument 456 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) argument 552 raw_spin_rq_lock_nested(struct rq *rq, int subclass) argument 577 raw_spin_rq_trylock(struct rq *rq) argument 601 raw_spin_rq_unlock(struct rq *rq) argument 631 struct rq *rq; variable in typeref:struct:rq 637 raw_spin_rq_lock(rq); variable 642 raw_spin_rq_unlock(rq); variable 656 struct rq *rq; variable in typeref:struct:rq 661 raw_spin_rq_lock(rq); variable 683 raw_spin_rq_unlock(rq); variable 695 update_rq_clock_task(struct rq *rq, s64 delta) argument 751 update_rq_clock(struct rq *rq) argument 778 hrtick_clear(struct rq *rq) argument 790 struct rq *rq = container_of(timer, struct rq, hrtick_timer); local 805 __hrtick_restart(struct rq *rq) argument 818 struct rq *rq = arg; local 831 hrtick_start(struct rq *rq, u64 delay) argument 855 hrtick_start(struct rq *rq, u64 delay) argument 868 hrtick_rq_init(struct rq *rq) argument 877 hrtick_clear(struct rq *rq) argument 881 hrtick_rq_init(struct rq *rq) argument 1041 resched_curr(struct rq *rq) argument 1067 struct rq *rq = cpu_rq(cpu); local 1130 struct rq *rq = cpu_rq(cpu); local 1196 struct rq *rq = info; local 1216 __need_bw_check(struct rq *rq, struct task_struct *p) argument 1230 sched_can_stop_tick(struct rq *rq) argument 1439 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) argument 1455 uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) argument 1466 uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) argument 1584 uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) argument 1622 uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) argument 1688 uclamp_rq_inc(struct rq *rq, struct task_struct *p) argument 1712 uclamp_rq_dec(struct rq *rq, struct task_struct *p) argument 1732 uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) argument 1754 struct rq *rq; local 2027 init_uclamp_rq(struct rq *rq) argument 2067 uclamp_rq_inc(struct rq *rq, struct task_struct *p) argument 2068 uclamp_rq_dec(struct rq *rq, struct task_struct *p) argument 2105 enqueue_task(struct rq *rq, struct task_struct *p, int flags) argument 2122 dequeue_task(struct rq *rq, struct task_struct *p, int flags) argument 2139 activate_task(struct rq *rq, struct task_struct *p, int flags) argument 2152 deactivate_task(struct rq *rq, struct task_struct *p, int flags) argument 2224 check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio) argument 2237 wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) argument 2296 struct rq *rq; local 2398 migrate_disable_switch(struct rq *rq, struct task_struct *p) argument 2466 rq_has_pinned_tasks(struct rq *rq) argument 2520 move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) argument 2566 __migrate_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int dest_cpu) argument 2588 struct rq *rq = this_rq(); local 2691 struct rq *lowest_rq = NULL, *rq = this_rq(); local 2756 struct rq *rq = task_rq(p); local 3127 update_rq_clock(rq); variable 3206 struct rq *rq; local 3250 struct rq *rq; local 3710 migrate_disable_switch(struct rq *rq, struct task_struct *p) argument 3712 rq_has_pinned_tasks(struct rq *rq) argument 3727 struct rq *rq; local 3773 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) argument 3853 struct rq *rq; local 3878 struct rq *rq = this_rq(); local 3936 struct rq *rq = cpu_rq(cpu); local 3946 struct rq *rq = cpu_rq(cpu); local 4049 struct rq *rq = cpu_rq(cpu); local 4437 struct rq *rq = NULL; local 4879 struct rq *rq; local 5038 do_balance_callbacks(struct rq *rq, struct balance_callback *head) argument 5074 __splice_balance_callbacks(struct rq *rq, bool split) argument 5098 splice_balance_callbacks(struct rq *rq) argument 5103 __balance_callbacks(struct rq *rq) argument 5108 balance_callbacks(struct rq *rq, struct balance_callback *head) argument 5121 __balance_callbacks(struct rq *rq) argument 5125 splice_balance_callbacks(struct rq *rq) argument 5130 balance_callbacks(struct rq *rq, struct balance_callback *head) argument 5137 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) argument 5153 finish_lock_switch(struct rq *rq) argument 5207 prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) argument 5242 struct rq *rq = this_rq(); variable in typeref:struct:rq 5280 finish_lock_switch(rq); variable 5352 context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next, struct rq_flags *rf) argument 5575 struct rq *rq; local 5612 cpu_resched_latency(struct rq *rq) argument 5657 cpu_resched_latency(struct rq *rq) argument 5667 struct rq *rq = cpu_rq(cpu); local 5682 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure); local 5748 struct rq *rq = cpu_rq(cpu); local 5989 put_prev_task_balance(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 6015 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 6088 pick_task(struct rq *rq) argument 6107 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 6406 sched_core_balance(struct rq *rq) argument 6427 queue_core_balance(struct rq *rq) argument 6449 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; local 6488 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; local 6538 struct rq *rq = cpu_rq(cpu); local 6551 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 6621 struct rq *rq; local 7155 struct rq *rq; local 7276 struct rq *rq; local 7412 struct rq *rq = cpu_rq(cpu); local 7459 struct rq *rq = cpu_rq(cpu); local 7495 struct rq *rq = cpu_rq(cpu); local 7714 struct rq *rq; local 8557 struct rq *rq; local 8960 struct rq *rq, *p_rq; local 9119 struct rq *rq = scope.rq; local 9267 struct rq *rq = cpu_rq(cpu); local 9393 struct rq *rq; local 9437 struct rq *rq = this_rq(); local 9467 balance_push(struct rq *rq) argument 9532 struct rq *rq = cpu_rq(cpu); local 9553 struct rq *rq = this_rq(); local 9562 balance_push(struct rq *rq) argument 9576 set_rq_online(struct rq *rq) argument 9591 set_rq_offline(struct rq *rq) argument 9659 struct rq *rq = cpu_rq(cpu); local 9704 struct rq *rq = cpu_rq(cpu); local 9770 struct rq *rq = cpu_rq(cpu); local 9812 calc_load_migrate(struct rq *rq) argument 9820 dump_rq_tasks(struct rq *rq, const char *loglvl) argument 9841 struct rq *rq = cpu_rq(cpu); local 9985 struct rq *rq; local 10487 struct rq *rq; local 10880 struct rq *rq = cfs_rq->rq; local 11546 call_trace_sched_update_nr_running(struct rq *rq, int count) argument 11825 struct rq *rq = cpu_rq(cpu); local 11880 struct rq *rq = cpu_rq(cpu); local 11981 task_tick_mm_cid(struct rq *rq, struct task_struct *curr) argument 11997 struct rq *rq; local 12019 struct rq *rq; local 12041 struct rq *rq; local [all...] |
H A D | rt.c | 177 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 179 return rt_rq->rq; 187 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 191 return rt_rq->rq; 220 struct rq *rq = cpu_rq(cpu); local 224 rt_rq->rq = rq; 234 rt_se->rt_rq = &rq->rt; 292 static inline struct rq *rq_of_rt_r 306 struct rq *rq = rq_of_rt_se(rt_se); local 323 need_pull_rt_task(struct rq *rq, struct task_struct *prev) argument 329 rt_overloaded(struct rq *rq) argument 334 rt_set_overload(struct rq *rq) argument 353 rt_clear_overload(struct rq *rq) argument 363 has_pushable_tasks(struct rq *rq) argument 374 rt_queue_push_tasks(struct rq *rq) argument 382 rt_queue_pull_task(struct rq *rq) argument 387 enqueue_pushable_task(struct rq *rq, struct task_struct *p) argument 403 dequeue_pushable_task(struct rq *rq, struct task_struct *p) argument 424 enqueue_pushable_task(struct rq *rq, struct task_struct *p) argument 428 dequeue_pushable_task(struct rq *rq, struct task_struct *p) argument 432 rt_queue_push_tasks(struct rq *rq) argument 533 struct rq *rq = rq_of_rt_rq(rt_rq); local 634 struct rq *rq = rq_of_rt_rq(rt_rq); local 735 __disable_runtime(struct rq *rq) argument 817 __enable_runtime(struct rq *rq) argument 878 struct rq *rq = rq_of_rt_rq(rt_rq); local 1000 update_curr_rt(struct rq *rq) argument 1036 struct rq *rq = rq_of_rt_rq(rt_rq); local 1053 struct rq *rq = rq_of_rt_rq(rt_rq); local 1077 struct rq *rq = rq_of_rt_rq(rt_rq); local 1093 struct rq *rq = rq_of_rt_rq(rt_rq); local 1448 struct rq *rq = rq_of_rt_se(rt_se); local 1460 struct rq *rq = rq_of_rt_se(rt_se); local 1479 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) argument 1495 dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) argument 1523 requeue_task_rt(struct rq *rq, struct task_struct *p, int head) argument 1534 yield_task_rt(struct rq *rq) argument 1546 struct rq *rq; local 1614 check_preempt_equal_prio(struct rq *rq, struct task_struct *p) argument 1641 balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) argument 1662 wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags) argument 1687 set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) argument 1731 _pick_next_task_rt(struct rq *rq) argument 1746 pick_task_rt(struct rq *rq) argument 1758 pick_next_task_rt(struct rq *rq) argument 1768 put_prev_task_rt(struct rq *rq, struct task_struct *p) argument 1778 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); local 1793 pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) argument 1806 pick_highest_pushable_task(struct rq *rq, int cpu) argument 1916 find_lock_lowest_rq(struct task_struct *task, struct rq *rq) argument 1976 pick_next_pushable_task(struct rq *rq) argument 2001 push_rt_task(struct rq *rq, bool pull) argument 2121 push_rt_tasks(struct rq *rq) argument 2228 tell_cpu_to_push(struct rq *rq) argument 2266 struct rq *rq; local 2414 task_woken_rt(struct rq *rq, struct task_struct *p) argument 2428 rq_online_rt(struct rq *rq) argument 2439 rq_offline_rt(struct rq *rq) argument 2453 switched_from_rt(struct rq *rq, struct task_struct *p) argument 2484 switched_to_rt(struct rq *rq, struct task_struct *p) argument 2491 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); local 2515 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) argument 2552 watchdog(struct rq *rq, struct task_struct *p) argument 2576 watchdog(struct rq *rq, struct task_struct *p) argument 2587 task_tick_rt(struct rq *rq, struct task_struct *p, int queued) argument 2592 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); local 2621 get_rr_interval_rt(struct rq *rq, struct task_struct *task) argument [all...] |
/linux-master/drivers/gpu/drm/i915/selftests/ |
H A D | i915_perf.c | 161 static int write_timestamp(struct i915_request *rq, int slot) argument 166 cs = intel_ring_begin(rq, 6); 171 if (GRAPHICS_VER(rq->i915) >= 8) 183 intel_ring_advance(rq, cs); 188 static ktime_t poll_status(struct i915_request *rq, int slot) argument 190 while (!intel_read_status_page(rq->engine, slot) && 191 !i915_request_completed(rq)) 201 struct i915_request *rq; local 224 rq = intel_engine_create_kernel_request(stream->engine); 225 if (IS_ERR(rq)) { 294 struct i915_request *rq; local [all...] |
H A D | igt_spinner.c | 117 const struct i915_request *rq) 119 return i915_vma_offset(hws) + seqno_offset(rq->fence.context); 128 struct i915_request *rq = NULL; local 148 rq = intel_context_create_request(ce); 149 if (IS_ERR(rq)) 150 return ERR_CAST(rq); 152 err = igt_vma_move_to_active_unlocked(vma, rq, 0); 156 err = igt_vma_move_to_active_unlocked(hws, rq, 0); 162 if (GRAPHICS_VER(rq->i915) >= 8) { 164 *batch++ = lower_32_bits(hws_address(hws, rq)); 116 hws_address(const struct i915_vma *hws, const struct i915_request *rq) argument 220 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) argument 253 igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) argument [all...] |
/linux-master/drivers/s390/char/ |
H A D | raw3270.h | 30 void (*callback)(struct raw3270_request *rq, void *data); 35 void raw3270_request_free(struct raw3270_request *rq); 36 int raw3270_request_reset(struct raw3270_request *rq); 37 void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd); 38 int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size); 39 void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size); 40 void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib); 43 raw3270_request_final(struct raw3270_request *rq) argument 45 return list_empty(&rq->list); 54 int (*activate)(struct raw3270_view *rq); [all...] |
/linux-master/block/ |
H A D | blk-mq-sched.h | 14 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, 37 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, argument 40 if (rq->rq_flags & RQF_USE_SCHED) { 44 return e->type->ops.allow_merge(q, rq, bio); 49 static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) argument 51 if (rq->rq_flags & RQF_USE_SCHED) { 52 struct elevator_queue *e = rq->q->elevator; 55 e->type->ops.completed_request(rq, now); 59 static inline void blk_mq_sched_requeue_request(struct request *rq) argument 61 if (rq [all...] |
H A D | blk-mq.c | 42 #include "blk-rq-qos.h" 47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); 48 static void blk_mq_request_bypass_insert(struct request *rq, 91 static bool blk_mq_check_inflight(struct request *rq, void *priv) argument 95 if (rq->part && blk_do_io_stat(rq) && 96 (!bdev_is_partition(mi->part) || rq->part == mi->part) && 97 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 98 mi->inflight[rq_data_dir(rq)]++; 314 void blk_rq_init(struct request_queue *q, struct request *rq) argument 332 blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) argument 353 struct request *rq = tags->static_rqs[tag]; local 409 struct request *rq; local 441 struct request *rq; local 533 struct request *rq; local 551 struct request *rq; local 584 struct request *rq; local 624 struct request *rq; local 690 blk_mq_finish_request(struct request *rq) argument 707 __blk_mq_free_request(struct request *rq) argument 728 blk_mq_free_request(struct request *rq) argument 747 struct request *rq; local 753 blk_dump_rq_flags(struct request *rq, char *msg) argument 1029 __blk_mq_end_request_acct(struct request *rq, u64 now) argument 1038 __blk_mq_end_request(struct request *rq, blk_status_t error) argument 1055 blk_mq_end_request(struct request *rq, blk_status_t error) argument 1080 struct request *rq; local 1129 struct request *rq, *next; local 1151 blk_mq_complete_need_ipi(struct request *rq) argument 1178 blk_mq_complete_send_ipi(struct request *rq) argument 1187 blk_mq_raise_softirq(struct request *rq) argument 1198 blk_mq_complete_request_remote(struct request *rq) argument 1232 blk_mq_complete_request(struct request *rq) argument 1247 blk_mq_start_request(struct request *rq) argument 1288 blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) argument 1327 blk_execute_rq_nowait(struct request *rq, bool at_head) argument 1351 blk_end_sync_rq(struct request *rq, blk_status_t ret) argument 1360 blk_rq_is_poll(struct request *rq) argument 1370 blk_rq_poll_completion(struct request *rq, struct completion *wait) argument 1388 blk_execute_rq(struct request *rq, bool at_head) argument 1414 __blk_mq_requeue_request(struct request *rq) argument 1429 blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) argument 1454 struct request *rq; local 1501 blk_is_flush_data_rq(struct request *rq) argument 1506 blk_mq_rq_inflight(struct request *rq, void *priv) argument 1560 blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) argument 1580 blk_mq_put_rq_ref(struct request *rq) argument 1590 blk_mq_check_expired(struct request *rq, void *priv) argument 1608 blk_mq_handle_expired(struct request *rq, void *priv) argument 1712 struct request *rq; member in struct:dispatch_rq_data 1750 __blk_mq_alloc_driver_tag(struct request *rq) argument 1802 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) argument 1915 blk_mq_handle_dev_resource(struct request *rq, struct list_head *list) argument 1928 blk_mq_prep_dispatch_rq(struct request *rq, bool need_budget) argument 1969 struct request *rq; local 2004 struct request *rq; local 2432 blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) argument 2448 struct request *rq; local 2481 blk_mq_insert_request(struct request *rq, blk_insert_t flags) argument 2543 blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) argument 2562 __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) argument 2595 blk_mq_get_budget_and_tag(struct request *rq) argument 2620 blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) argument 2651 blk_mq_request_issue_directly(struct request *rq, bool last) argument 2668 struct request *rq; local 2723 struct request *rq = rq_list_pop(&plug->mq_list); local 2760 struct request *rq; local 2810 struct request *rq = list_first_entry(list, struct request, local 2858 struct request *rq; local 2884 struct request *rq; local 2899 blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) argument 2937 struct request *rq; local 3041 blk_insert_cloned_request(struct request *rq) argument 3107 blk_rq_unprep_clone(struct request *rq) argument 3136 blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) argument 3194 blk_steal_bios(struct bio_list *list, struct request *rq) argument 3236 struct request *rq = drv_tags->rqs[i]; local 3274 struct request *rq = tags->static_rqs[i]; local 3371 blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) argument 3444 struct request *rq = p; local 3468 blk_mq_has_request(struct request *rq, void *data) argument 4862 blk_rq_poll(struct request *rq, struct io_comp_batch *iob, unsigned int poll_flags) argument 4880 blk_mq_rq_cpu(struct request *rq) argument [all...] |
/linux-master/drivers/scsi/esas2r/ |
H A D | esas2r_disc.c | 49 struct esas2r_request *rq); 51 struct esas2r_request *rq); 55 struct esas2r_request *rq); 59 struct esas2r_request *rq); 61 struct esas2r_request *rq); 63 struct esas2r_request *rq); 65 struct esas2r_request *rq); 67 struct esas2r_request *rq); 69 struct esas2r_request *rq); 71 struct esas2r_request *rq); 160 struct esas2r_request *rq = &a->general_req; local 313 struct esas2r_request *rq = &a->general_req; local 386 esas2r_disc_continue(struct esas2r_adapter *a, struct esas2r_request *rq) argument 459 esas2r_disc_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 488 esas2r_disc_local_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 502 esas2r_disc_abort(struct esas2r_adapter *a, struct esas2r_request *rq) argument 517 esas2r_disc_block_dev_scan(struct esas2r_adapter *a, struct esas2r_request *rq) argument 548 esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument 577 esas2r_disc_raid_grp_info(struct esas2r_adapter *a, struct esas2r_request *rq) argument 624 esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument 687 esas2r_disc_part_info(struct esas2r_adapter *a, struct esas2r_request *rq) argument 739 esas2r_disc_part_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument 788 esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, struct esas2r_request *rq) argument 825 esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument 881 esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, struct esas2r_request *rq) argument 939 esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1044 esas2r_disc_dev_remove(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1083 esas2r_disc_dev_add(struct esas2r_adapter *a, struct esas2r_request *rq) argument 1163 struct esas2r_request *rq; local [all...] |
H A D | esas2r_vda.c | 59 static void clear_vda_request(struct esas2r_request *rq); 62 struct esas2r_request *rq); 67 struct esas2r_request *rq, 93 clear_vda_request(rq); 95 rq->vrq->scsi.function = vi->function; 96 rq->interrupt_cb = esas2r_complete_vda_ioctl; 97 rq->interrupt_cx = vi; 112 rq->vrq->flash.length = cpu_to_le32(datalen); 113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; 115 memcpy(rq 65 esas2r_process_vda_ioctl(struct esas2r_adapter *a, struct atto_ioctl_vda *vi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument 269 esas2r_complete_vda_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) argument 346 esas2r_build_flash_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u8 cksum, u32 addr, u32 length) argument 372 esas2r_build_mgt_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u8 scan_gen, u16 dev_index, u32 length, void *data) argument 420 esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument 448 esas2r_build_cli_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, u32 cmd_rsp_len) argument 465 esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, u8 sub_func) argument 482 esas2r_build_cfg_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u32 length, void *data) argument 504 clear_vda_request(struct esas2r_request *rq) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gt/uc/ |
H A D | selftest_guc_hangcheck.c | 17 struct i915_request *rq; local 19 rq = intel_engine_create_kernel_request(engine); 20 if (IS_ERR(rq)) 21 return rq; 23 i915_request_get(rq); 24 i915_request_add(rq); 26 return rq; 36 struct i915_request *rq; local 77 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); 79 if (IS_ERR(rq)) { [all...] |
H A D | intel_gsc_uc_heci_cmd_submit.c | 20 static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt) argument 24 cs = intel_ring_begin(rq, 8); 37 intel_ring_advance(rq, cs); 47 struct i915_request *rq; local 59 rq = i915_request_create(ce); 60 if (IS_ERR(rq)) 61 return PTR_ERR(rq); 64 err = ce->engine->emit_init_breadcrumb(rq); 69 err = emit_gsc_heci_pkt(rq, &pkt); 74 err = ce->engine->emit_flush(rq, 143 struct i915_request *rq; local [all...] |
/linux-master/fs/erofs/ |
H A D | decompressor_deflate.c | 97 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, argument 101 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 103 PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 104 struct super_block *sb = rq->sb; 112 kin = kmap_local_page(*rq->in); 113 err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, 114 min_t(unsigned int, rq->inputsize, 115 sb->s_blocksize - rq [all...] |
H A D | decompressor_lzma.c | 150 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, argument 154 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 156 PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 164 kin = kmap(*rq->in); 165 err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, 166 min_t(unsigned int, rq->inputsize, 167 rq->sb->s_blocksize - rq [all...] |
H A D | decompressor_zstd.c | 138 int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, argument 142 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 144 PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 146 struct super_block *sb = rq->sb; 156 kin = kmap_local_page(*rq->in); 157 err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, 158 min_t(unsigned int, rq->inputsize, 159 sb->s_blocksize - rq [all...] |
/linux-master/drivers/platform/chrome/wilco_ec/ |
H A D | mailbox.c | 89 * @rq: EC request structure to fill. 92 struct wilco_ec_request *rq) 94 memset(rq, 0, sizeof(*rq)); 95 rq->struct_version = EC_MAILBOX_PROTO_VERSION; 96 rq->mailbox_id = msg->type; 97 rq->mailbox_version = EC_MAILBOX_VERSION; 98 rq->data_size = msg->request_size; 101 rq->checksum = wilco_ec_checksum(rq, sizeo 91 wilco_ec_prepare(struct wilco_ec_message *msg, struct wilco_ec_request *rq) argument 115 wilco_ec_transfer(struct wilco_ec_device *ec, struct wilco_ec_message *msg, struct wilco_ec_request *rq) argument 198 struct wilco_ec_request *rq; local [all...] |
/linux-master/drivers/scsi/ |
H A D | scsi_bsg.c | 16 struct request *rq; local 28 rq = scsi_alloc_request(q, hdr->dout_xfer_len ? 30 if (IS_ERR(rq)) 31 return PTR_ERR(rq); 32 rq->timeout = timeout; 34 scmd = blk_mq_rq_to_pdu(rq); 50 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), 53 ret = blk_rq_map_user(rq->q, rq, NUL [all...] |