/linux-master/kernel/sched/ |
H A D | core_sched.c | 58 struct rq *rq; local 60 rq = task_rq_lock(p, &rf); 68 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); 71 sched_core_dequeue(rq, p, DEQUEUE_SAVE); 80 sched_core_enqueue(rq, p); 91 if (task_on_cpu(rq, p)) 92 resched_curr(rq); 94 task_rq_unlock(rq, p, &rf); 239 /* REQUIRES: rq 240 __sched_core_account_forceidle(struct rq *rq) argument 289 __sched_core_tick(struct rq *rq) argument [all...] |
H A D | idle.c | 270 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != 271 * rq->idle). This means that, if rq->idle has the polling bit set, 445 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument 454 static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) argument 456 resched_curr(rq); 459 static void put_prev_task_idle(struct rq *rq, struc argument 463 set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) argument 470 pick_task_idle(struct rq *rq) argument 476 pick_next_task_idle(struct rq *rq) argument 490 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) argument 506 task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) argument 510 switched_to_idle(struct rq *rq, struct task_struct *p) argument 516 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) argument 521 update_curr_idle(struct rq *rq) argument [all...] |
H A D | pelt.c | 251 * and the cfs rq, to which they are attached, have the same position in the 346 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) argument 348 if (___update_load_sum(now, &rq->avg_rt, 353 ___update_load_avg(&rq->avg_rt, 1); 354 trace_pelt_rt_tp(rq); 372 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) argument 374 if (___update_load_sum(now, &rq->avg_dl, 379 ___update_load_avg(&rq 403 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) argument 430 update_irq_load_avg(struct rq *rq, u64 running) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | selftest_engine_cs.c | 54 static int write_timestamp(struct i915_request *rq, int slot) argument 57 rcu_dereference_protected(rq->timeline, 58 !i915_request_signaled(rq)); 62 cs = intel_ring_begin(rq, 4); 67 if (GRAPHICS_VER(rq->i915) >= 8) 70 *cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine)); 74 intel_ring_advance(rq, cs); 171 struct i915_request *rq; local 173 rq = i915_request_create(ce); 174 if (IS_ERR(rq)) { 315 struct i915_request *rq; local [all...] |
H A D | selftest_context.c | 15 static int request_sync(struct i915_request *rq) argument 17 struct intel_timeline *tl = i915_request_timeline(rq); 22 i915_request_get(rq); 25 __i915_request_commit(rq); 26 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 27 __i915_request_queue_bh(rq); 29 timeout = i915_request_wait(rq, 0, HZ / 10); 33 i915_request_retire_upto(rq); 35 lockdep_unpin_lock(&tl->mutex, rq->cookie); 38 i915_request_put(rq); 51 struct i915_request *rq; local 78 struct i915_request *rq; local 235 struct i915_request *rq; local 329 struct i915_request *rq; local [all...] |
H A D | selftest_timeline.c | 454 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) argument 458 cs = intel_ring_begin(rq, 4); 462 if (GRAPHICS_VER(rq->i915) >= 8) { 467 } else if (GRAPHICS_VER(rq->i915) >= 4) { 479 intel_ring_advance(rq, cs); 487 struct i915_request *rq; local 492 rq = ERR_PTR(err); 503 rq = intel_engine_create_kernel_request(engine); 504 if (IS_ERR(rq)) 507 i915_request_get(rq); 554 struct i915_request *rq; local 624 struct i915_request *rq; local 696 struct i915_request *rq; local 778 emit_read_hwsp(struct i915_request *rq, u32 seqno, u32 hwsp, u32 *addr) argument 813 struct i915_request *rq; member in struct:hwsp_watcher 899 struct i915_request *rq = fetch_and_zero(&w->rq); local 945 struct i915_request *rq, *rn; local 956 wrap_timeline(struct i915_request *rq) argument 1031 struct i915_request *rq; local 1194 struct i915_request *rq[3] = {}; local 1270 struct i915_request *rq[3] = {}; local 1370 struct i915_request *rq; local [all...] |
H A D | selftest_mocs.c | 38 static int request_add_sync(struct i915_request *rq, int err) argument 40 i915_request_get(rq); 41 i915_request_add(rq); 42 if (i915_request_wait(rq, 0, HZ / 5) < 0) 44 i915_request_put(rq); 49 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) argument 53 i915_request_get(rq); 54 i915_request_add(rq); 55 if (spin && !igt_wait_for_spinner(spin, rq)) 57 i915_request_put(rq); 102 read_regs(struct i915_request *rq, u32 addr, unsigned int count, u32 *offset) argument 130 read_mocs_table(struct i915_request *rq, const struct drm_i915_mocs_table *table, u32 *offset) argument 148 read_l3cc_table(struct i915_request *rq, const struct drm_i915_mocs_table *table, u32 *offset) argument 221 struct i915_request *rq; local 324 struct i915_request *rq; local [all...] |
H A D | selftest_hangcheck.c | 97 const struct i915_request *rq) 100 offset_in_page(sizeof(u32) * rq->fence.context); 109 struct i915_request *rq = NULL; local 157 rq = igt_request_alloc(h->ctx, engine); 158 if (IS_ERR(rq)) { 159 err = PTR_ERR(rq); 163 err = igt_vma_move_to_active_unlocked(vma, rq, 0); 167 err = igt_vma_move_to_active_unlocked(hws, rq, 0); 174 *batch++ = lower_32_bits(hws_address(hws, rq)); 175 *batch++ = upper_32_bits(hws_address(hws, rq)); 96 hws_address(const struct i915_vma *hws, const struct i915_request *rq) argument 253 hws_seqno(const struct hang *h, const struct i915_request *rq) argument 274 wait_until_running(struct hang *h, struct i915_request *rq) argument 287 struct i915_request *rq; local 375 struct i915_request *rq; local 474 struct i915_request *rq; local 595 struct i915_request *rq; local 730 struct i915_request *rq = NULL; local 868 active_request_put(struct i915_request *rq) argument 896 struct i915_request *rq[8] = {}; local 1048 struct i915_request *rq = NULL; local 1300 struct i915_request *rq; local 1434 struct i915_request *rq; local 1683 struct i915_request *rq; local 1813 struct i915_request *rq; local 1912 struct i915_request *rq; local [all...] |
H A D | gen8_engine_cs.h | 20 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode); 21 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode); 22 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode); 24 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode); 25 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode); 27 int gen8_emit_init_breadcrumb(struct i915_request *rq); 29 int gen8_emit_bb_start_noarb(struct i915_request *rq, 32 int gen8_emit_bb_start(struct i915_request *rq, 36 int xehp_emit_bb_start_noarb(struct i915_request *rq, 39 int xehp_emit_bb_start(struct i915_request *rq, [all...] |
H A D | intel_ring.h | 18 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); 19 int intel_ring_cacheline_align(struct i915_request *rq); 41 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) argument 51 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); 52 GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */ 81 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) argument 84 u32 offset = addr - rq->ring->vaddr; 86 GEM_BUG_ON(offset > rq->ring->size); 87 return intel_ring_wrap(rq [all...] |
/linux-master/drivers/scsi/esas2r/ |
H A D | esas2r_int.c | 173 struct esas2r_request *rq, 181 if (unlikely(rq->req_stat != RS_SUCCESS)) { 182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); 184 if (rq->req_stat == RS_ABORTED) { 185 if (rq->timeout > RQ_MAX_TIMEOUT) 186 rq->req_stat = RS_TIMEOUT; 187 } else if (rq->req_stat == RS_SCSI_ERROR) { 188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; 197 rq->req_stat = RS_SUCCESS; 198 rq 172 esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a, struct esas2r_request *rq, struct atto_vda_ob_rsp *rsp) argument 210 struct esas2r_request *rq; local 309 struct esas2r_request *rq; local 390 struct esas2r_request *rq = &a->general_req; local 448 struct esas2r_request *rq; local 749 esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq) argument 876 esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq) argument 879 esas2r_check_req_rsp_sense(struct esas2r_adapter *a, struct esas2r_request *rq) argument 919 esas2r_complete_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument [all...] |
H A D | esas2r_io.c | 46 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 49 struct esas2r_request *startrq = rq; 54 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) 55 rq->req_stat = RS_SEL2; 57 rq->req_stat = RS_DEGRADED; 58 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { 59 t = a->targetdb + rq->target_id; 63 rq->req_stat = RS_SEL; 66 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); 75 rq 119 esas2r_local_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 137 esas2r_start_vda_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument 190 struct esas2r_request *rq = sgc->first_req; local 373 struct esas2r_request *rq = sgc->first_req; local 527 struct esas2r_request *rq = sgc->first_req; local 770 struct esas2r_request *rq; local 858 esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, u8 status) argument [all...] |
/linux-master/drivers/platform/chrome/wilco_ec/ |
H A D | properties.c | 35 struct ec_property_request *rq, 43 ec_msg.request_data = rq; 44 ec_msg.request_size = sizeof(*rq); 51 if (rs->op != rq->op) 53 if (memcmp(rq->property_id, rs->property_id, sizeof(rs->property_id))) 62 struct ec_property_request rq; local 66 memset(&rq, 0, sizeof(rq)); 67 rq.op = EC_OP_GET; 68 put_unaligned_le32(prop_msg->property_id, rq 34 send_property_msg(struct wilco_ec_device *ec, struct ec_property_request *rq, struct ec_property_response *rs) argument 84 struct ec_property_request rq; local [all...] |
H A D | sysfs.c | 74 struct boot_on_ac_request rq; local 85 memset(&rq, 0, sizeof(rq)); 86 rq.cmd = CMD_KB_CMOS; 87 rq.sub_cmd = SUB_CMD_KB_CMOS_AUTO_ON; 88 rq.val = val; 92 msg.request_data = &rq; 93 msg.request_size = sizeof(rq); 158 struct usb_charge_request *rq, 166 msg.request_data = rq; 157 send_usb_charge(struct wilco_ec_device *ec, struct usb_charge_request *rq, struct usb_charge_response *rs) argument 183 struct usb_charge_request rq; local 203 struct usb_charge_request rq; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_rx.c | 64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, argument 92 struct mlx5e_cq_decomp *cqd = &rq->cqd; 97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) 100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 105 mlx5_wq_cyc_ctr2ix(&rq 108 mlx5e_read_title_slot(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, u32 cqcc) argument 153 mlx5e_decompress_cqe(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, u32 cqcc) argument 183 mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, u32 cqcc) argument 194 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, struct mlx5_cqe64 *cqe, int budget_rem) argument 226 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, int update_owner_only, int budget_rem) argument 256 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, int budget_rem) argument 276 mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq, struct mlx5e_frag_page *frag_page) argument 295 mlx5e_page_release_fragmented(struct mlx5e_rq *rq, struct mlx5e_frag_page *frag_page) argument 305 mlx5e_get_rx_frag(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *frag) argument 331 mlx5e_put_rx_frag(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *frag) argument 338 get_frag(struct mlx5e_rq *rq, u16 ix) argument 343 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, u16 ix) argument 374 mlx5e_free_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi) argument 389 mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) argument 407 mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument 425 mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument 439 mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument 457 mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) argument 498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo, struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page, u32 frag_offset, u32 len) argument 525 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, struct page *page, u32 frag_offset, u32 len, unsigned int truesize) argument 538 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb, struct page *page, dma_addr_t addr, int offset_from, int dma_offset, u32 headlen) argument 552 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) argument 585 mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) argument 638 mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, u16 klm_entries, u16 index) argument 720 mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) argument 759 mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) argument 848 mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) argument 882 mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) argument 894 mlx5e_post_rx_wqes(struct mlx5e_rq *rq) argument 980 struct mlx5e_rq *rq = &c->rq; local 1080 mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) argument 1216 mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) argument 1224 mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) argument 1243 mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) argument 1262 mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct tcphdr *skb_tcp_hd) argument 1274 mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, struct mlx5_cqe64 *cqe, bool match) argument 1298 mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, struct mlx5_cqe64 *cqe, bool match) argument 1319 mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) argument 1377 mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) argument 1477 mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb, bool lro) argument 1548 mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, struct sk_buff *skb) argument 1607 mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) argument 1628 mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) argument 1641 mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, u32 frag_size, u16 headroom, u32 cqe_bcnt, u32 metasize) argument 1661 mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void *va, u16 headroom, u32 frame_sz, u32 len, struct mlx5e_xdp_buff *mxbuf) argument 1672 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument 1720 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument 1804 trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 1816 mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 1822 mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 1866 mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 1909 mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 1967 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_frag_page *frag_page, u32 data_bcnt, u32 data_offset) argument 1994 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument 2139 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument 2197 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) argument 2260 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) argument 2283 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) argument 2297 mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 2382 mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 2441 mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq, struct mlx5_cqwq *cqwq, int budget_rem) argument 2490 mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq, struct mlx5_cqwq *cqwq, int budget_rem) argument 2520 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); local 2557 mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) argument 2642 mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 2683 mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) argument 2732 mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument 2766 mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) argument [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | reporter_rx.c | 82 struct mlx5e_rq *rq; local 91 rq = &icosq->channel->rq; 106 mlx5e_deactivate_rq(rq); 116 /* At this point, both the rq and the icosq are disabled */ 124 mlx5e_free_rx_missing_descs(rq); 131 mlx5e_activate_rq(rq); 132 rq->stats->recover++; 152 struct mlx5e_rq *rq = ctx; local 155 mlx5e_deactivate_rq(rq); 173 struct mlx5e_rq *rq; local 221 mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq) argument 237 mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) argument 278 mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) argument 287 mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) argument 334 mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) argument 361 struct mlx5e_rq *rq; local 415 struct mlx5e_rq *rq = ctx; local 463 struct mlx5e_rq *rq = &priv->channels.c[i]->rq; local 493 mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) argument 514 mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq) argument [all...] |
H A D | trap.c | 12 struct mlx5e_rq *rq = &trap_ctx->rq; local 20 work_done = mlx5e_poll_rx_cq(&rq->cq, budget); 22 busy |= rq->post_wqes(rq); 32 mlx5e_cq_arm(&rq->cq); 40 struct mlx5e_rq *rq) 45 rq->wq_type = params->rq_wq_type; 46 rq->pdev = t->pdev; 47 rq 39 mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params, struct mlx5e_rq *rq) argument 65 struct mlx5e_rq *rq = &t->rq; local 96 mlx5e_close_trap_rq(struct mlx5e_rq *rq) argument [all...] |
/linux-master/block/ |
H A D | mq-deadline.c | 117 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) argument 119 return &per_prio->sort_list[rq_data_dir(rq)]; 126 static u8 dd_rq_ioclass(struct request *rq) argument 128 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 132 * get the request before `rq' in sector-sorted order 135 deadline_earlier_request(struct request *rq) argument 137 struct rb_node *node = rb_prev(&rq->rb_node); 146 * get the request after `rq' in sector-sorted order 149 deadline_latter_request(struct request *rq) argument 151 struct rb_node *node = rb_next(&rq 167 struct request *rq, *res = NULL; local 194 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) argument 202 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) argument 204 elv_rb_del(deadline_rb_root(per_prio, rq), rq); local 210 deadline_remove_request(struct request_queue *q, struct dd_per_prio *per_prio, struct request *rq) argument 280 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, struct request *rq) argument 306 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); local 314 deadline_is_seq_write(struct deadline_data *dd, struct request *rq) argument 328 deadline_skip_seq_writes(struct deadline_data *dd, struct request *rq) argument 349 struct request *rq, *rb_rq, *next; local 393 struct request *rq; local 429 started_after(struct deadline_data *dd, struct request *rq, unsigned long latest_start) argument 447 struct request *rq, *next_rq; local 567 struct request *rq; local 600 struct request *rq; local 742 dd_request_merge(struct request_queue *q, struct request **rq, struct bio *bio) argument 794 dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_insert_t flags, struct list_head *free) argument 873 struct request *rq; local 885 dd_prepare_request(struct request *rq) argument 918 dd_finish_request(struct request *rq) argument [all...] |
H A D | elevator.c | 54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 58 * merged with rq. 60 static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) argument 62 struct request_queue *q = rq->q; 66 return e->type->ops.allow_merge(q, rq, bio); 74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) argument 76 if (!blk_rq_merge_ok(rq, bio)) 79 if (!elv_iosched_allow_bio_merge(rq, bi 173 __elv_rqhash_del(struct request *rq) argument 179 elv_rqhash_del(struct request_queue *q, struct request *rq) argument 186 elv_rqhash_add(struct request_queue *q, struct request *rq) argument 196 elv_rqhash_reposition(struct request_queue *q, struct request *rq) argument 206 struct request *rq; local 227 elv_rb_add(struct rb_root *root, struct request *rq) argument 248 elv_rb_del(struct rb_root *root, struct request *rq) argument 259 struct request *rq; local 332 elv_attempt_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free) argument 370 elv_merged_request(struct request_queue *q, struct request *rq, enum elv_merge type) argument 384 elv_merge_requests(struct request_queue *q, struct request *rq, struct request *next) argument 396 elv_latter_request(struct request_queue *q, struct request *rq) argument 406 elv_former_request(struct request_queue *q, struct request *rq) argument 793 elv_rb_former_request(struct request_queue *q, struct request *rq) argument 805 elv_rb_latter_request(struct request_queue *q, struct request *rq) argument [all...] |
/linux-master/tools/testing/selftests/bpf/progs/ |
H A D | test_access_variable_array.c | 11 int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,
|
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_busy.c | 43 const struct i915_request *rq; local 76 rq = to_request(current_fence); 77 if (!i915_request_completed(rq)) 78 return flag(rq->engine->uabi_class); 87 rq = to_request(fence); 88 if (i915_request_completed(rq)) 92 BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); 93 return flag(rq->engine->uabi_class);
|
/linux-master/drivers/ufs/core/ |
H A D | ufshcd-crypto.h | 16 static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, argument 19 if (!rq || !rq->crypt_keyslot) { 24 lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot); 25 lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0]; 50 static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, argument
|
/linux-master/include/trace/events/ |
H A D | block.h | 69 * @rq: block IO operation request 71 * The block operation request @rq is being placed back into queue 77 TP_PROTO(struct request *rq), 79 TP_ARGS(rq), 90 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; 91 __entry->sector = blk_rq_trace_sector(rq); 92 __entry->nr_sector = blk_rq_trace_nr_sectors(rq); 94 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); 107 TP_PROTO(struct request *rq, blk_status_ [all...] |
/linux-master/drivers/gpu/drm/i915/gt/uc/ |
H A D | selftest_guc_multi_lrc.c | 80 struct i915_request *rq, *child_rq; local 85 rq = intel_context_create_request(ce); 86 if (IS_ERR(rq)) 87 return rq; 89 i915_request_get(rq); 90 i915_request_add(rq); 103 return rq; 106 i915_request_put(rq); 114 struct i915_request *rq; local 126 rq [all...] |
/linux-master/drivers/md/ |
H A D | dm-rq.c | 9 #include "dm-rq.h" 13 #define DM_MSG_PREFIX "core-rq" 122 static struct dm_rq_target_io *tio_from_request(struct request *rq) argument 124 return blk_mq_rq_to_pdu(rq); 161 struct request *rq = tio->orig; local 166 rq_end_stats(md, rq); 167 blk_mq_end_request(rq, error); 182 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) argument 184 blk_mq_requeue_request(rq, false); 185 __dm_mq_kick_requeue_list(rq 191 struct request *rq = tio->orig; local 251 dm_softirq_done(struct request *rq) argument 276 dm_complete_request(struct request *rq, blk_status_t error) argument 291 dm_kill_unmapped_request(struct request *rq, blk_status_t error) argument 320 setup_clone(struct request *clone, struct request *rq, struct dm_rq_target_io *tio, gfp_t gfp_mask) argument 338 init_tio(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) argument 367 struct request *rq = tio->orig; local 457 dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) argument 480 struct request *rq = bd->rq; local [all...] |