/linux-master/drivers/infiniband/hw/cxgb4/ |
H A D | t4.h | 384 struct t4_rq rq; member in struct:t4_wq 482 return wq->rq.in_use; 487 return wq->rq.in_use == 0; 492 return wq->rq.size - 1 - wq->rq.in_use; 497 wq->rq.in_use++; 498 if (++wq->rq.pidx == wq->rq.size) 499 wq->rq.pidx = 0; 500 wq->rq [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_context.c | 481 struct i915_request *rq) 487 GEM_BUG_ON(rq->context == ce); 489 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 491 err = i915_active_fence_set(&tl->last_request, rq); 504 return i915_active_add_request(&ce->active, rq); 510 struct i915_request *rq; local 517 rq = i915_request_create(ce); 523 rq = ERR_PTR(err); 525 rq = ERR_PTR(err); 530 if (IS_ERR(rq)) 480 intel_context_prepare_remote_request(struct intel_context *ce, struct i915_request *rq) argument 548 struct i915_request *rq, *active = NULL; local 623 intel_context_ban(struct intel_context *ce, struct i915_request *rq) argument [all...] |
H A D | selftest_ring_submission.c | 72 struct i915_request *rq; local 75 rq = intel_context_create_request(ce); 76 if (IS_ERR(rq)) 77 return PTR_ERR(rq); 79 i915_request_get(rq); 80 i915_request_add(rq); 82 if (i915_request_wait(rq, 0, HZ / 5) < 0) 84 i915_request_put(rq);
|
H A D | selftest_tlb.c | 44 struct i915_request *rq; local 124 rq = i915_request_create(ce); 125 if (IS_ERR(rq)) { 126 err = PTR_ERR(rq); 130 err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0); 132 i915_request_add(rq); 136 i915_request_get(rq); 137 i915_request_add(rq); 149 if (!i915_request_completed(rq)) { [all...] |
/linux-master/drivers/staging/octeon/ |
H A D | ethernet-mdio.h | 25 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
H A D | ktls_txrx.h | 22 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 85 static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, argument
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | health.h | 32 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); 33 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
|
H A D | channels.c | 31 *rqn = c->rq.rqn; 55 *rqn = c->rq.rqn;
|
/linux-master/block/ |
H A D | blk-stat.c | 50 void blk_stat_add(struct request *rq, u64 now) argument 52 struct request_queue *q = rq->q; 58 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; 60 if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE) 61 blk_throtl_stat_add(rq, value); 69 bucket = cb->bucket_fn(rq);
|
H A D | elevator.h | 106 void elv_rqhash_del(struct request_queue *q, struct request *rq); 107 void elv_rqhash_add(struct request_queue *q, struct request *rq); 108 void elv_rqhash_reposition(struct request_queue *q, struct request *rq); 184 #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
|
/linux-master/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); 213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 214 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); 506 qp_attr->cap.max_recv_wr = qp->rq.max; 508 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 615 if (qp->rq.max) 616 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; 617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 777 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 843 mthca_wq_reset(&qp->rq); [all...] |
/linux-master/drivers/nvme/host/ |
H A D | fabrics.h | 202 static inline void nvmf_complete_timed_out_request(struct request *rq) argument 204 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 205 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 206 blk_mq_complete_request(rq);
|
H A D | rdma.c | 154 static void nvme_rdma_complete_rq(struct request *rq); 286 struct request *rq, unsigned int hctx_idx) 288 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 294 struct request *rq, unsigned int hctx_idx, 298 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 302 nvme_req(rq)->ctrl = &ctrl->ctrl; 309 req->metadata_sgl = (void *)nvme_req(rq) + 314 nvme_req(rq)->cmd = req->sqe.data; 1162 struct request *rq = blk_mq_rq_from_pdu(req); local 1166 if (!nvme_try_complete_req(rq, re 285 nvme_rdma_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) argument 293 nvme_rdma_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) argument 1218 nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq) argument 1234 nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct request *rq) argument 1415 struct request *rq = blk_mq_rq_from_pdu(req); local 1469 nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, int *count, int *pi_count) argument 1529 nvme_rdma_map_data(struct nvme_rdma_queue *queue, struct request *rq, struct nvme_command *c) argument 1697 struct request *rq; local 1946 nvme_rdma_complete_timed_out(struct request *rq) argument 1955 nvme_rdma_timeout(struct request *rq) argument 1999 struct request *rq = bd->rq; local 2084 struct request *rq = blk_mq_rq_from_pdu(req); local 2113 nvme_rdma_complete_rq(struct request *rq) argument [all...] |
H A D | tcp.c | 269 struct request *rq; local 274 rq = blk_mq_rq_from_pdu(req); 276 return rq_data_dir(rq) == WRITE && req->data_len && 311 struct request *rq = blk_mq_rq_from_pdu(req); local 317 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { 318 vec = &rq->special_vec; 320 size = blk_rq_payload_bytes(rq); 505 struct request *rq, unsigned int hctx_idx) 507 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 513 struct request *rq, unsigne 504 nvme_tcp_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) argument 512 nvme_tcp_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) argument 587 struct request *rq; local 612 struct request *rq; local 669 struct request *rq = blk_mq_rq_from_pdu(req); local 703 struct request *rq; local 798 nvme_tcp_end_request(struct request *rq, u16 status) argument 810 struct request *rq = local 897 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), local 910 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), local 2429 nvme_tcp_complete_timed_out(struct request *rq) argument 2438 nvme_tcp_timeout(struct request *rq) argument 2477 nvme_tcp_map_data(struct nvme_tcp_queue *queue, struct request *rq) argument 2497 nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, struct request *rq) argument 2564 struct request *rq = bd->rq; local [all...] |
H A D | nvme.h | 572 static inline u16 nvme_cid(struct request *rq) argument 574 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; 582 struct request *rq; local 584 rq = blk_mq_tag_to_rq(tags, tag); 585 if (unlikely(!rq)) { 590 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { 591 dev_err(nvme_req(rq)->ctrl->device, 593 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); 596 return rq; 718 struct nvme_request *rq = nvme_req(req); local 773 nvme_complete_batch(struct io_comp_batch *iob, void (*fn)(struct request *rq)) argument 838 nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live) argument 1053 nvme_mpath_start_request(struct request *rq) argument 1056 nvme_mpath_end_request(struct request *rq) argument 1112 nvme_start_request(struct request *rq) argument [all...] |
/linux-master/drivers/gpu/drm/i915/display/ |
H A D | intel_overlay.c | 235 struct i915_request *rq; local 240 rq = i915_request_create(overlay->context); 241 if (IS_ERR(rq)) 242 return rq; 244 err = i915_active_add_request(&overlay->last_flip, rq); 246 i915_request_add(rq); 250 return rq; 257 struct i915_request *rq; local 262 rq = alloc_request(overlay, NULL); 263 if (IS_ERR(rq)) 322 struct i915_request *rq; local 403 struct i915_request *rq; local 456 struct i915_request *rq; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
H A D | clock.c | 444 struct ptp_clock_request *rq, 462 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | 469 if ((rq->extts.flags & PTP_STRICT_FLAGS) && 470 (rq->extts.flags & PTP_ENABLE_FEATURE) && 471 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) 474 if (rq->extts.index >= clock->ptp_info.n_pins) 477 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); 483 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); 542 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, argument 548 ts.tv_nsec = rq 443 mlx5_extts_configure(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument 562 mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, u32 *out_pulse_duration_ns) argument 595 perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, u32 *field_select, u32 *out_pulse_duration_ns, u64 *period, u64 *time_stamp) argument 628 mlx5_perout_configure(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument 704 mlx5_pps_configure(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument 715 mlx5_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument [all...] |
/linux-master/drivers/block/ |
H A D | loop.c | 259 static int lo_write_simple(struct loop_device *lo, struct request *rq, argument 266 rq_for_each_segment(bvec, rq, iter) { 276 static int lo_read_simple(struct loop_device *lo, struct request *rq, argument 284 rq_for_each_segment(bvec, rq, iter) { 295 __rq_for_each_bio(bio, rq) 305 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, argument 320 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); 326 static int lo_req_flush(struct loop_device *lo, struct request *rq) argument 335 static void lo_complete_rq(struct request *rq) argument 337 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 372 struct request *rq = blk_mq_rq_from_pdu(cmd); local 396 struct request *rq = blk_mq_rq_from_pdu(cmd); local 459 do_req_filebacked(struct loop_device *lo, struct request *rq) argument 1839 struct request *rq = bd->rq; local 1883 struct request *rq = blk_mq_rq_from_pdu(cmd); local [all...] |
/linux-master/include/trace/events/ |
H A D | sched.h | 748 TP_PROTO(struct rq *rq), 749 TP_ARGS(rq)); 752 TP_PROTO(struct rq *rq), 753 TP_ARGS(rq)); 756 TP_PROTO(struct rq *rq), 757 TP_ARGS(rq)); 760 TP_PROTO(struct rq *r [all...] |
/linux-master/drivers/mtd/ |
H A D | mtd_blkdevs.c | 108 struct request *rq; local 110 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); 111 if (rq) { 112 list_del_init(&rq->queuelist); 113 blk_mq_start_request(rq); 114 return rq; 173 blk_mq_start_request(bd->rq); 178 list_add_tail(&bd->rq->queuelist, &dev->rq_list); 348 new->rq = new->disk->queue; 377 blk_queue_write_cache(new->rq, tru [all...] |
/linux-master/drivers/platform/chrome/wilco_ec/ |
H A D | debugfs.c | 178 struct ec_request rq; local 183 memset(&rq, 0, sizeof(rq)); 184 rq.cmd = CMD_KB_CHROME; 185 rq.sub_cmd = sub_cmd; 189 msg.request_data = &rq; 190 msg.request_size = sizeof(rq);
|
/linux-master/drivers/gpu/drm/i915/selftests/ |
H A D | i915_active.c | 101 struct i915_request *rq; local 103 rq = intel_engine_create_kernel_request(engine); 104 if (IS_ERR(rq)) { 105 err = PTR_ERR(rq); 109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 113 err = i915_active_add_request(&active->base, rq); 114 i915_request_add(rq);
|
/linux-master/kernel/sched/ |
H A D | membarrier.c | 89 * b: read rq->curr->mm == NULL 111 * b: read rq->curr->mm == NULL 125 * b: read rq->curr->mm == NULL 238 struct rq *rq = this_rq(); local 243 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 245 WRITE_ONCE(rq->membarrier_state, membarrier_state); 257 * Matches memory barriers after rq->curr modification in 308 * rq->curr modification in scheduler. 347 * Matches memory barriers after rq 477 struct rq *rq = cpu_rq(cpu); local [all...] |
/linux-master/drivers/block/null_blk/ |
H A D | main.c | 158 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); 786 static void null_complete_rq(struct request *rq) argument 788 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); 790 blk_mq_end_request(rq, cmd->error); 1207 struct request *rq = blk_mq_rq_from_pdu(cmd); local 1211 sector_t sector = blk_rq_pos(rq); 1216 rq_for_each_segment(bvec, rq, iter) { 1219 op_is_write(req_op(rq)), sector, 1220 rq->cmd_flags & REQ_FUA); 1237 struct request *rq local 1282 struct request *rq = blk_mq_rq_from_pdu(cmd); local 1294 struct request *rq = blk_mq_rq_from_pdu(cmd); local 1392 should_timeout_request(struct request *rq) argument 1400 should_requeue_request(struct request *rq) argument 1415 should_timeout_request(struct request *rq) argument 1420 should_requeue_request(struct request *rq) argument 1490 struct request *rq; local 1516 null_timeout_rq(struct request *rq) argument 1552 struct request *rq = bd->rq; local 1612 struct request *rq = rq_list_pop(rqlist); local [all...] |
/linux-master/drivers/gpu/drm/i915/gem/selftests/ |
H A D | i915_gem_migrate.c | 193 struct i915_request *rq; local 224 0xdeadbeaf, &rq); 225 if (rq) { 228 dma_resv_add_fence(obj->base.resv, &rq->fence, 230 i915_request_put(rq); 394 struct i915_request *rq; local 411 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); 413 if (IS_ERR(rq)) { 414 err = PTR_ERR(rq); 419 err = i915_deps_add_dependency(&deps, &rq [all...] |