Searched refs:rq (Results 226 - 250 of 634) sorted by relevance

1234567891011>>

/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_client_blt.c474 struct i915_request *rq; local
499 rq = intel_context_create_request(t->ce);
500 if (IS_ERR(rq)) {
501 err = PTR_ERR(rq);
505 err = igt_vma_move_to_active_unlocked(t->batch, rq, 0);
507 err = igt_vma_move_to_active_unlocked(src->vma, rq, 0);
509 err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
511 err = rq->engine->emit_bb_start(rq,
515 i915_request_get(rq);
[all...]
H A Di915_gem_coherency.c196 struct i915_request *rq; local
210 rq = intel_engine_create_kernel_request(ctx->engine);
211 if (IS_ERR(rq)) {
212 err = PTR_ERR(rq);
216 cs = intel_ring_begin(rq, 4);
238 intel_ring_advance(rq, cs);
240 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
243 i915_request_add(rq);
/linux-master/drivers/scsi/
H A Dscsi_lib.c116 struct request *rq = scsi_cmd_to_rq(cmd); local
118 if (rq->rq_flags & RQF_DONTPREP) {
119 rq->rq_flags &= ~RQF_DONTPREP;
125 blk_mq_requeue_request(rq, false);
127 blk_mq_delay_kick_requeue_list(rq->q, msecs);
715 * @rq: request to examine
726 static unsigned int scsi_rq_err_bytes(const struct request *rq) argument
728 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
732 if (!(rq->rq_flags & RQF_MIXED_MERGE))
733 return blk_rq_bytes(rq);
1090 scsi_cmd_needs_dma_drain(struct scsi_device *sdev, struct request *rq) argument
1113 struct request *rq = scsi_cmd_to_rq(cmd); local
1212 scsi_initialize_rq(struct request *rq) argument
1227 struct request *rq; local
1240 scsi_cleanup_rq(struct request *rq) argument
1251 struct request *rq = scsi_cmd_to_rq(cmd); local
1511 scsi_complete(struct request *rq) argument
1899 scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) argument
1927 scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) argument
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_scheduler.c133 const struct i915_request *rq = node_to_request(node); local
140 * as their rq->engine pointer is not stable until under that
142 * check that the rq still belongs to the newly locked engine.
144 while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) {
289 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) argument
292 __i915_schedule(&rq->sched, attr);
410 const struct i915_request *rq,
416 i915_request_show(m, rq, prefix, indent);
417 if (i915_request_completed(rq))
421 for_each_signaler(dep, rq) {
409 i915_request_show_with_schedule(struct drm_printer *m, const struct i915_request *rq, const char *prefix, int indent) argument
[all...]
H A Di915_active.c427 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) argument
429 u64 idx = i915_request_timeline(rq)->fence_context;
430 struct dma_fence *fence = &rq->fence;
738 int i915_request_await_active(struct i915_request *rq, argument
742 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
987 void i915_request_add_active_barriers(struct i915_request *rq) argument
989 struct intel_engine_cs *engine = rq->engine;
993 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
995 GEM_BUG_ON(i915_request_timeline(rq) !
1118 i915_active_fence_set(struct i915_active_fence *active, struct i915_request *rq) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_timeline.c327 struct i915_request *rq,
413 const struct i915_request *rq,
424 struct i915_request *rq, *rn; local
441 list_for_each_entry_safe(rq, rn, &tl->requests, link) {
442 if (i915_request_completed(rq))
446 if (i915_request_is_ready(rq))
448 if (i915_request_is_active(rq))
466 list_for_each_entry_safe(rq, rn, &tl->requests, link)
467 show_request(m, rq, "", 2);
326 intel_timeline_get_seqno(struct intel_timeline *tl, struct i915_request *rq, u32 *seqno) argument
410 intel_gt_show_timelines(struct intel_gt *gt, struct drm_printer *m, void (*show_request)(struct drm_printer *m, const struct i915_request *rq, const char *prefix, int indent)) argument
H A Dintel_renderstate.c210 struct i915_request *rq)
212 struct intel_engine_cs *engine = rq->engine;
218 err = i915_vma_move_to_active(so->vma, rq, 0);
222 err = engine->emit_bb_start(rq,
229 err = engine->emit_bb_start(rq,
209 intel_renderstate_emit(struct intel_renderstate *so, struct i915_request *rq) argument
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_gem_evict.c408 /* Reserve a block so that we know we have enough to fit a few rq */
457 struct i915_request *rq; local
463 /* We will need some GGTT space for the rq's context */
465 rq = intel_context_create_request(ce);
469 if (IS_ERR(rq)) {
471 if (PTR_ERR(rq) != -EBUSY) {
474 (int)PTR_ERR(rq));
475 err = PTR_ERR(rq);
481 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
487 i915_request_add(rq);
[all...]
H A Di915_gem.c28 struct i915_request *rq; local
30 rq = intel_context_create_request(ce);
31 if (IS_ERR(rq)) {
32 err = PTR_ERR(rq);
36 i915_request_add(rq);
/linux-master/drivers/net/ethernet/cisco/enic/
H A Denic.h94 * @rq_id: desired rq index
170 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; member in struct:enic
219 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) argument
221 return rq;
230 unsigned int rq)
232 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
229 enic_msix_rq_intr(struct enic *enic, unsigned int rq) argument
H A Denic_res.h109 static inline void enic_queue_rq_desc(struct vnic_rq *rq, argument
113 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
122 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
/linux-master/kernel/sched/
H A Ddebug.c575 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) argument
577 if (task_current(rq, p))
608 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) argument
624 print_task(m, rq, p);
633 struct rq *rq = cpu_rq(cpu); local
646 raw_spin_rq_lock_irqsave(rq, flags);
657 raw_spin_rq_unlock_irqrestore(rq, flag
765 struct rq *rq = cpu_rq(cpu); local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.c61 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, argument
91 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
182 if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
185 *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
186 _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
311 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, argument
323 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp)))
325 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
329 err = xdp_do_redirect(rq
953 mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) argument
[all...]
/linux-master/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c749 struct rcv_queue *rq; local
752 rq = &qs->rq[qidx];
753 rq->enable = enable;
758 if (!rq->enable) {
760 xdp_rxq_info_unreg(&rq->xdp_rxq);
764 rq->cq_qs = qs->vnic_id;
765 rq->cq_idx = qidx;
766 rq->start_rbdr_qs = qs->vnic_id;
767 rq
1814 struct rcv_queue *rq; local
[all...]
/linux-master/drivers/net/ethernet/intel/igc/
H A Digc_ptp.c246 struct ptp_clock_request *rq, int on)
257 switch (rq->type) {
260 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
267 if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
268 (rq->extts.flags & PTP_ENABLE_FEATURE) &&
269 (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
274 rq->extts.index);
278 if (rq->extts.index == 1) {
289 igc_pin_extts(igc, rq->extts.index, pin);
303 if (rq
245 igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument
[all...]
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c120 if (qp->rq.ring) {
121 atomic_set(&qp->rq.ring->cons_head, 0);
122 atomic_set(&qp->rq.ring->prod_tail, 0);
140 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
141 qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
144 req_cap->max_recv_wr = qp->rq.wqe_cnt;
145 req_cap->max_recv_sge = qp->rq.max_sg;
147 qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
149 qp->rq.max_sg);
150 qp->npages_recv = (qp->rq
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dgddr5.c39 int rq = ram->freq < 1000000; /* XXX */ local
96 ram->mr[3] |= (rq & 0x01) << 5;
/linux-master/drivers/char/agp/
H A Disoch.c76 u32 rq; member in struct:isoch_data
127 target.rq = (tstatus >> 24) & 0xff;
215 master[cdev].rq = master[cdev].n;
217 master[cdev].rq *= (1 << (master[cdev].y - 1));
219 tot_rq += master[cdev].rq;
226 rq_async = target.rq - rq_isoch;
251 master[cdev].rq += (cdev == ndevs - 1)
263 mcmd |= master[cdev].rq << 24;
/linux-master/include/rdma/
H A Drdmavt_qp.h269 * @rq: data structure for request queue entry
276 static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) argument
281 count += rq->size;
453 struct rvt_rq rq; member in struct:rvt_srq
544 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) argument
547 ((char *)rq->kwq->curr_wq +
549 rq->max_sge * sizeof(struct ib_sge)) * n);
952 static inline void rvt_free_rq(struct rvt_rq *rq) argument
954 kvfree(rq->kwq);
955 rq
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dqp.c253 struct mlx5_ib_wq *wq = &qp->rq;
273 struct mlx5_ib_wq *wq = &qp->rq;
444 qp->rq.max_gs = 0;
445 qp->rq.wqe_cnt = 0;
446 qp->rq.wqe_shift = 0;
453 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
456 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
457 if ((1 << qp->rq.wqe_shift) /
461 qp->rq.max_gs =
462 (1 << qp->rq
1432 create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, void *qpin, struct ib_pd *pd, struct mlx5_ib_cq *cq) argument
1502 destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq) argument
1508 destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, u32 qp_flags_en, struct ib_pd *pd) argument
1519 create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, u32 tdn, u32 *qp_flags_en, struct ib_pd *pd, u32 *out) argument
1577 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; local
1669 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; local
1686 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; local
3780 modify_raw_packet_qp_rq( struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state, const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) argument
3903 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; local
4052 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; local
4834 query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, u8 *rq_state) argument
4912 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den.h350 MLX5E_RQ_STATE_XSK, /* set to indicate an xsk rq */
601 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
605 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
607 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
611 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
612 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
759 struct mlx5e_rq rq; member in struct:mlx5e_channel
814 struct mlx5e_rq_stats rq; member in struct:mlx5e_channel_stats
825 struct mlx5e_rq_stats rq; member in struct:mlx5e_ptp_stats
1011 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u1
[all...]
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_ethtool.c86 /* per rq stats maintained by the device */
102 /* per rq stats maintained by the driver */
578 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
585 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
586 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
587 buf[j++] = rq->rx_ring[0].size;
588 buf[j++] = rq->rx_ring[0].next2fill;
589 buf[j++] = rq->rx_ring[0].next2comp;
590 buf[j++] = rq->rx_ring[0].gen;
592 buf[j++] = VMXNET3_GET_ADDR_LO(rq
[all...]
/linux-master/drivers/block/aoe/
H A Daoecmd.c825 bufinit(struct buf *buf, struct request *rq, struct bio *bio) argument
828 buf->rq = rq;
836 struct request *rq; local
847 rq = d->ip.rq;
848 if (rq == NULL) {
849 rq = list_first_entry_or_null(&d->rq_list, struct request,
851 if (rq == NULL)
853 list_del_init(&rq
1029 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) argument
1056 struct request *rq = buf->rq; local
[all...]
/linux-master/arch/mips/loongson64/
H A Dcop2-ex.c83 regs->regs[insn.loongson3_lswc2_format.rq] = value_next;
99 set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
114 value_next = regs->regs[insn.loongson3_lswc2_format.rq];
131 value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
/linux-master/drivers/staging/rtl8723bs/include/
H A Dosdep_intf.h50 int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);

Completed in 386 milliseconds

1234567891011>>