Searched refs:rq (Results 76 - 100 of 634) sorted by relevance

1234567891011>>

/linux-master/drivers/net/
H A Dveth.c76 struct veth_rq *rq; member in struct:veth_priv
187 if (!priv->rq[i].page_pool)
189 page_pool_get_stats(priv->rq[i].page_pool, &pp_stats);
205 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
226 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
295 static void __veth_xdp_flush(struct veth_rq *rq) argument
299 if (!READ_ONCE(rq->rx_notify_masked) &&
300 napi_schedule_prep(&rq->xdp_napi)) {
301 WRITE_ONCE(rq->rx_notify_masked, true);
302 __napi_schedule(&rq
306 veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) argument
316 veth_forward_skb(struct net_device *dev, struct sk_buff *skb, struct veth_rq *rq, bool xdp) argument
346 struct veth_rq *rq = NULL; local
481 struct veth_rq *rq; local
545 veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) argument
569 veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) argument
592 veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, struct veth_xdp_tx_bq *bq) argument
608 veth_xdp_rcv_one(struct veth_rq *rq, struct xdp_frame *frame, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument
679 veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, int n_xdpf, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument
722 veth_convert_skb_to_xdp_buff(struct veth_rq *rq, struct xdp_buff *xdp, struct sk_buff **pskb) argument
761 veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument
872 veth_xdp_rcv(struct veth_rq *rq, int budget, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) argument
932 struct veth_rq *rq = local
964 veth_create_page_pool(struct veth_rq *rq) argument
996 struct veth_rq *rq = &priv->rq[i]; local
1004 struct veth_rq *rq = &priv->rq[i]; local
1036 struct veth_rq *rq = &priv->rq[i]; local
1045 struct veth_rq *rq = &priv->rq[i]; local
1074 struct veth_rq *rq = &priv->rq[i]; local
1097 struct veth_rq *rq = &priv->rq[i]; local
1114 struct veth_rq *rq = &priv->rq[i]; local
1172 struct veth_rq *rq = &priv->rq[i]; local
1180 struct veth_rq *rq = &priv->rq[i]; local
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dsrq.c54 srq->rq.size = srq_init_attr->attr.max_wr + 1;
55 srq->rq.max_sge = srq_init_attr->attr.max_sge;
56 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
58 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
69 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
71 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
86 spin_lock_init(&srq->rq.lock);
110 rvt_free_rq(&srq->rq);
145 srq->rq
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_execlists_submission.c213 struct i915_request *rq,
216 struct i915_request *active = rq;
218 list_for_each_entry_from_reverse(rq, &tl->requests, link) {
219 if (__i915_request_is_complete(rq))
223 i915_request_set_error_once(rq, error);
224 __i915_request_skip(rq);
226 active = rq;
233 active_request(const struct intel_timeline * const tl, struct i915_request *rq) argument
235 return __active_request(tl, rq, 0);
256 static int rq_prio(const struct i915_request *rq) argument
212 __active_request(const struct intel_timeline * const tl, struct i915_request *rq, int error) argument
261 effective_prio(const struct i915_request *rq) argument
297 need_preempt(const struct intel_engine_cs *engine, const struct i915_request *rq) argument
369 struct i915_request *rq, *rn, *active = NULL; local
418 execlists_context_status_change(struct i915_request *rq, unsigned long status) argument
431 reset_active(struct i915_request *rq, struct intel_engine_cs *engine) argument
469 bad_request(const struct i915_request *rq) argument
475 __execlists_schedule_in(struct i915_request *rq) argument
530 execlists_schedule_in(struct i915_request *rq, int idx) argument
547 resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve) argument
560 kick_siblings(struct i915_request *rq, struct intel_context *ce) argument
589 __execlists_schedule_out(struct i915_request * const rq, struct intel_context * const ce) argument
651 execlists_schedule_out(struct i915_request *rq) argument
675 execlists_update_context(struct i915_request *rq) argument
738 dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) argument
784 struct i915_request * const *port, *rq, *prev = NULL; local
932 struct i915_request *rq = execlists->pending[n]; local
962 i915_request_flags(const struct i915_request *rq) argument
996 virtual_matches(const struct virtual_engine *ve, const struct i915_request *rq, const struct intel_engine_cs *engine) argument
1033 struct i915_request *rq = READ_ONCE(ve->request); local
1075 defer_request(struct i915_request *rq, struct list_head * const pl) argument
1125 struct i915_request *rq; local
1136 timeslice_yield(const struct intel_engine_execlists *el, const struct i915_request *rq) argument
1154 needs_timeslice(const struct intel_engine_cs *engine, const struct i915_request *rq) argument
1190 timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq) argument
1239 active_preempt_timeout(struct intel_engine_cs *engine, const struct i915_request *rq) argument
1255 set_preempt_timeout(struct intel_engine_cs *engine, const struct i915_request *rq) argument
1265 completed(const struct i915_request *rq) argument
1414 struct i915_request *rq; local
1493 struct i915_request *rq, *rn; local
1968 struct i915_request *rq = *execlists->active; local
2056 __execlists_hold(struct i915_request *rq) argument
2099 execlists_hold(struct intel_engine_cs *engine, struct i915_request *rq) argument
2128 hold_request(const struct i915_request *rq) argument
2154 __execlists_unhold(struct i915_request *rq) argument
2197 execlists_unhold(struct intel_engine_cs *engine, struct i915_request *rq) argument
2218 struct i915_request *rq; member in struct:execlists_capture
2293 struct i915_request * const *port, *rq; local
2443 const struct i915_request *rq = *engine->execlists.active; local
2550 queue_request(struct intel_engine_cs *engine, struct i915_request *rq) argument
2560 submit_queue(struct intel_engine_cs *engine, const struct i915_request *rq) argument
2572 ancestor_on_hold(const struct intel_engine_cs *engine, const struct i915_request *rq) argument
2642 execlists_context_cancel_request(struct intel_context *ce, struct i915_request *rq) argument
2714 emit_pdps(struct i915_request *rq) argument
3030 struct i915_request *rq; local
3151 struct i915_request *rq, *rn; local
3280 add_to_engine(struct i915_request *rq) argument
3286 remove_from_engine(struct i915_request *rq) argument
3325 kick_execlists(const struct i915_request *rq, int prio) argument
3791 struct i915_request *rq; local
3898 virtual_submit_request(struct i915_request *rq) argument
4080 intel_execlists_show_requests(struct intel_engine_cs *engine, struct drm_printer *m, void (*show_request)(struct drm_printer *m, const struct i915_request *rq, const char *prefix, int indent), unsigned int max) argument
4090 struct i915_request *rq, *last; local
4144 struct i915_request *rq = READ_ONCE(ve->request); local
[all...]
H A Dintel_migrate.c334 static int emit_no_arbitration(struct i915_request *rq) argument
338 cs = intel_ring_begin(rq, 2);
345 intel_ring_advance(rq, cs);
350 static int max_pte_pkt_size(struct i915_request *rq, int pkt) argument
352 struct intel_ring *ring = rq->ring;
354 pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
362 static int emit_pte(struct i915_request *rq, argument
369 bool has_64K_pages = HAS_64K_PAGES(rq->i915);
370 const u64 encode = rq->context->vm->pte_encode(0, pat_index,
372 struct intel_ring *ring = rq
530 emit_copy_ccs(struct i915_request *rq, u32 dst_offset, u8 dst_access, u32 src_offset, u8 src_access, int size) argument
581 emit_copy(struct i915_request *rq, u32 dst_offset, u32 src_offset, int size) argument
694 struct i915_request *rq; local
917 emit_clear(struct i915_request *rq, u32 offset, int size, u32 value, bool is_lmem) argument
994 struct i915_request *rq; local
[all...]
H A Dselftest_lrc.c38 static bool is_active(struct i915_request *rq) argument
40 if (i915_request_is_active(rq))
43 if (i915_request_on_hold(rq))
46 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
53 struct i915_request *rq,
63 if (i915_request_completed(rq)) /* that was quick! */
68 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
83 struct i915_request *rq; local
86 rq
52 wait_for_submit(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) argument
110 struct i915_request *rq; local
406 struct i915_request *rq; local
534 struct i915_request *rq; local
569 struct i915_request *rq; local
620 struct i915_request *rq; local
742 struct i915_request *rq; local
800 struct i915_request *rq; local
1057 struct i915_request *rq; local
1210 struct i915_request *rq; local
1406 struct i915_request *rq; local
1560 struct i915_request *rq; local
1741 garbage_reset(struct intel_engine_cs *engine, struct i915_request *rq) argument
1763 struct i915_request *rq; local
1879 struct i915_request *rq; local
[all...]
H A Dintel_engine_pm.c101 struct i915_request *rq = to_request(fence); local
103 ewma__engine_latency_add(&rq->engine->latency,
104 ktime_us_delta(rq->fence.timestamp,
105 rq->duration.emitted));
109 __queue_and_release_pm(struct i915_request *rq, argument
123 GEM_BUG_ON(rq->context->active_count != 1);
125 rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
143 __i915_request_queue_bh(rq);
154 struct i915_request *rq; local
212 rq
[all...]
H A Dselftest_migrate.c46 struct i915_request *rq; local
90 err = fn(migrate, &ww, src, dst, &rq);
96 if (rq) {
97 i915_request_wait(rq, 0, HZ);
98 i915_request_put(rq);
107 if (rq) {
108 if (i915_request_wait(rq, 0, HZ) < 0) {
112 i915_request_put(rq);
147 struct i915_request *rq; local
163 rq
265 struct i915_request *rq; local
553 struct i915_request *rq, *prev; local
854 struct i915_request *rq; local
933 struct i915_request *rq; local
[all...]
H A Dintel_gt_buffer_pool.h25 struct i915_request *rq)
30 return i915_active_add_request(&node->active, rq);
24 intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node, struct i915_request *rq) argument
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dsetup.c54 mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
62 struct mlx5e_rq *rq)
68 rq->wq_type = params->rq_wq_type;
69 rq->pdev = c->pdev;
70 rq->netdev = c->netdev;
71 rq->priv = c->priv;
72 rq->tstamp = c->tstamp;
73 rq->clock = &mdev->clock;
74 rq->icosq = &c->icosq;
75 rq
58 mlx5e_init_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk, struct mlx5e_rq *rq) argument
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
155 struct hinic_rq *rq, u16 global_qid)
162 wq = rq->wq;
181 HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
202 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
203 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
242 * alloc_rq_skb_arr - allocate rq array for saved skb
243 * @rq: HW Receive Queue
247 static int alloc_rq_skb_arr(struct hinic_rq *rq) argument
154 hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, struct hinic_rq *rq, u16 global_qid) argument
264 free_rq_skb_arr(struct hinic_rq *rq) argument
316 alloc_rq_cqe(struct hinic_rq *rq) argument
360 free_rq_cqe(struct hinic_rq *rq) argument
384 hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry) argument
435 hinic_clean_rq(struct hinic_rq *rq) argument
468 hinic_get_rq_free_wqebbs(struct hinic_rq *rq) argument
796 hinic_rq_get_wqe(struct hinic_rq *rq, unsigned int wqe_size, u16 *prod_idx) argument
815 hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) argument
837 hinic_rq_read_wqe(struct hinic_rq *rq, unsigned int wqe_size, struct sk_buff **skb, u16 *cons_idx) argument
872 hinic_rq_read_next_wqe(struct hinic_rq *rq, unsigned int wqe_size, struct sk_buff **skb, u16 *cons_idx) argument
899 hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, unsigned int wqe_size) argument
922 hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, u16 cons_idx, struct hinic_sge *sge) argument
940 hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) argument
968 hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) argument
[all...]
H A Dhinic_rx.c138 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
142 addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
170 dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
204 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
217 rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
224 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
226 hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
233 hinic_rq_update(rxq->rq, prod_idx);
245 struct hinic_rq *rq = rxq->rq; local
359 struct hinic_rq *rq = rxq->rq; local
461 struct hinic_rq *rq = rxq->rq; local
496 struct hinic_rq *rq = rxq->rq; local
519 struct hinic_rq *rq = rxq->rq; local
565 struct hinic_rq *rq = rxq->rq; local
580 hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, struct net_device *netdev) argument
[all...]
/linux-master/kernel/sched/
H A Dstats.c6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p, argument
11 wait_start = rq_clock(rq);
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p, argument
23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, argument
56 u64 delta = rq_clock(rq) - sleep_start;
74 u64 delta = rq_clock(rq)
126 struct rq *rq; local
[all...]
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Digt_gem_utils.c24 struct i915_request *rq; local
35 rq = intel_context_create_request(ce);
38 return rq;
115 struct i915_request *rq; local
127 rq = intel_context_create_request(ce);
128 if (IS_ERR(rq)) {
129 err = PTR_ERR(rq);
133 err = igt_vma_move_to_active_unlocked(batch, rq, 0);
137 err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
145 err = rq
[all...]
H A Digt_gem_utils.h35 igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq, argument
41 err = i915_vma_move_to_active(vma, rq, flags);
/linux-master/include/linux/
H A Dblktrace_api.h75 extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
85 # define blk_add_driver_data(rq, data, len) do {} while (0)
115 static inline sector_t blk_rq_trace_sector(struct request *rq) argument
121 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
123 return blk_rq_pos(rq);
126 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) argument
128 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_xdp.c251 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, argument
259 rq->stats.xdp_packets++;
267 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
269 rq->stats.xdp_redirects++;
271 rq->stats.xdp_drops++;
272 page_pool_recycle_direct(rq->page_pool, page);
278 vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
279 rq->stats.xdp_drops++;
280 page_pool_recycle_direct(rq->page_pool, page);
282 rq
303 vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page, const struct xdp_buff *xdp) argument
325 vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter, struct vmxnet3_rx_queue *rq, void *data, int len, struct sk_buff **skb_xdp_pass) argument
368 vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_buf_info *rbi, struct Vmxnet3_RxDesc *rxd, struct sk_buff **skb_xdp_pass) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_throttle.c64 struct i915_request *rq, *target = NULL; local
70 list_for_each_entry_reverse(rq,
73 if (i915_request_completed(rq))
76 if (time_after(rq->emitted_jiffies,
80 target = i915_request_get(rq);
/linux-master/drivers/s390/char/
H A Draw3270.c144 struct raw3270_request *rq; local
147 rq = kzalloc(sizeof(*rq), GFP_KERNEL | GFP_DMA);
148 if (!rq)
153 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
154 if (!rq->buffer) {
155 kfree(rq);
159 rq->size = size;
160 INIT_LIST_HEAD(&rq->list);
165 if (rq
176 raw3270_request_free(struct raw3270_request *rq) argument
186 raw3270_request_reset(struct raw3270_request *rq) argument
204 raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd) argument
213 raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size) argument
226 raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size) argument
236 raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) argument
248 __raw3270_start(struct raw3270 *rp, struct raw3270_view *view, struct raw3270_request *rq) argument
274 raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) argument
293 raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq, int cmd, void *data, size_t len) argument
309 raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) argument
325 raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq) argument
344 struct raw3270_request *rq; local
565 raw3270_read_modified_cb(struct raw3270_request *rq, void *data) argument
612 raw3270_reset_device_cb(struct raw3270_request *rq, void *data) argument
679 struct raw3270_request *rq; local
698 raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, struct irb *irb) argument
[all...]
H A Dfs3270.c48 static void fs3270_wake_up(struct raw3270_request *rq, void *data) argument
62 static int fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) argument
68 rq->callback = fs3270_wake_up;
69 rq->callback_data = &fp->wait;
79 rc = raw3270_start(view, rq);
82 wait_event(fp->wait, raw3270_request_final(rq));
91 static void fs3270_reset_callback(struct raw3270_request *rq, void *data) argument
95 fp = (struct fs3270 *)rq->view;
96 raw3270_request_reset(rq);
100 static void fs3270_restore_callback(struct raw3270_request *rq, voi argument
160 fs3270_save_callback(struct raw3270_request *rq, void *data) argument
217 fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) argument
242 struct raw3270_request *rq; local
287 struct raw3270_request *rq; local
[all...]
/linux-master/block/
H A Dblk-mq.h53 void blk_mq_put_rq_ref(struct request *rq);
258 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) argument
263 if (rq->q->mq_ops->set_rq_budget_token)
264 rq->q->mq_ops->set_rq_budget_token(rq, token);
267 static inline int blk_mq_get_rq_budget_token(struct request *rq) argument
269 if (rq->q->mq_ops->get_rq_budget_token)
270 return rq->q->mq_ops->get_rq_budget_token(rq);
335 struct request *rq)
334 __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) argument
342 blk_mq_put_driver_tag(struct request *rq) argument
352 blk_mq_get_driver_tag(struct request *rq) argument
403 struct request *rq = list_entry_rq(list->next); local
[all...]
H A Dblk-crypto.c193 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) argument
195 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
228 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) argument
230 return blk_crypto_get_keyslot(rq->q->crypto_profile,
231 rq->crypt_ctx->bc_key,
232 &rq->crypt_keyslot);
235 void __blk_crypto_rq_put_keyslot(struct request *rq) argument
237 blk_crypto_put_keyslot(rq->crypt_keyslot);
238 rq->crypt_keyslot = NULL;
241 void __blk_crypto_free_request(struct request *rq) argument
303 __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) argument
[all...]
H A Dblk-merge.c18 #include "blk-rq-qos.h"
408 unsigned int blk_recalc_rq_segments(struct request *rq) argument
415 if (!rq->bio)
418 switch (bio_op(rq->bio)) {
421 if (queue_max_discard_segments(rq->q) > 1) {
422 struct bio *bio = rq->bio;
435 rq_for_each_bvec(bv, rq, iter)
436 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
561 * must make sure sg can hold rq->nr_phys_segments entries
563 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, argument
586 blk_rq_get_max_sectors(struct request *rq, sector_t offset) argument
729 blk_rq_set_mixed_merge(struct request *rq) argument
887 attempt_back_merge(struct request_queue *q, struct request *rq) argument
898 attempt_front_merge(struct request_queue *q, struct request *rq) argument
914 blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next) argument
920 blk_rq_merge_ok(struct request *rq, struct bio *bio) argument
954 blk_try_merge(struct request *rq, struct bio *bio) argument
1060 blk_attempt_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio, unsigned int nr_segs, bool sched_allow_merge) argument
1111 struct request *rq; local
1142 struct request *rq; local
1167 struct request *rq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dtxrx.h68 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
69 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
71 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
72 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
198 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
200 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
214 struct mlx5e_rq *rq; member in struct:mlx5e_icosq_wqe_info::__anon1819::__anon1820
345 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) argument
347 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
348 mlx5_wq_ll_reset(&rq
371 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) argument
381 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) argument
391 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) argument
401 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) argument
503 mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i) argument
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_srq.c56 srq->rq.max_wr = init->attr.max_wr;
57 srq->rq.max_sge = init->attr.max_sge;
60 srq->rq.max_sge*sizeof(struct ib_sge);
62 spin_lock_init(&srq->rq.producer_lock);
63 spin_lock_init(&srq->rq.consumer_lock);
65 q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
80 srq->rq.queue = q;
81 init->attr.max_wr = srq->rq.max_wr;
137 if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
140 srq->rq
[all...]
/linux-master/drivers/scsi/esas2r/
H A Desas2r_ioctl.c85 struct esas2r_request *rq)
113 struct esas2r_request *rq; local
120 rq = esas2r_alloc_request(a);
121 if (rq == NULL) {
153 rq->comp_cb = complete_fm_api_req;
157 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
176 esas2r_free_request(a, (struct esas2r_request *)rq);
184 struct esas2r_request *rq)
201 struct esas2r_request *rq)
210 struct esas2r_request *rq; local
84 complete_fm_api_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
183 complete_nvr_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
200 complete_buffered_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
295 smp_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) argument
332 esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
343 csmi_ioctl_tunnel(struct esas2r_adapter *a, union atto_ioctl_csmi *ci, struct esas2r_request *rq, struct esas2r_sg_context *sgc, u32 ctrl_code, u16 target_id) argument
392 csmi_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) argument
608 csmi_ioctl_done_callback(struct esas2r_adapter *a, struct esas2r_request *rq, void *context) argument
669 hba_ioctl_tunnel(struct esas2r_adapter *a, struct atto_ioctl *hi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
689 scsi_passthru_comp_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
746 hba_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) argument
1198 hba_ioctl_done_callback(struct esas2r_adapter *a, struct esas2r_request *rq, void *context) argument
1244 esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *data) argument
1272 struct esas2r_request *rq; local
1801 vda_complete_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1825 struct esas2r_request *rq; local
1919 fs_api_complete_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1947 struct esas2r_request *rq; local
[all...]

Completed in 322 milliseconds

1234567891011>>