/linux-master/arch/mips/cavium-octeon/executive/ |
H A D | cvmx-pko.c | 70 int queue; local 76 for (queue = 0; queue < num_queues; queue++) { 82 config.s.index = queue; 83 config.s.qid = base_queue + queue; 85 config.s.tail = (queue == (num_queues - 1)); 86 config.s.s_tail = (queue == static_priority_end); 88 config.s.static_q = (queue <= static_priority_end); 92 CVMX_CMD_QUEUE_PKO(base_queue + queue), 284 int queue; local 331 uint64_t queue; local [all...] |
/linux-master/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_kernel_queue.c | 38 /* Initialize a kernel queue, including allocations of GART memory 39 * needed for the queue. 51 pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ, 71 pr_err("Invalid queue type %d\n", type); 141 if (init_queue(&kq->queue, &prop) != 0) 144 kq->queue->device = dev; 145 kq->queue->process = kfd_get_process(current); 147 kq->queue->mqd_mem_obj = kq->mqd_mgr->allocate_mqd(kq->mqd_mgr->dev, 148 &kq->queue->properties); 149 if (!kq->queue [all...] |
/linux-master/drivers/misc/genwqe/ |
H A D | card_debugfs.c | 215 struct ddcb_queue *queue; local 218 queue = &cd->queue; 229 queue->ddcb_max, (long long)queue->ddcb_daddr, 230 (long long)queue->ddcb_daddr + 231 (queue->ddcb_max * DDCB_LENGTH), 232 queue->ddcb_vaddr, queue->ddcbs_in_flight, 233 queue [all...] |
/linux-master/drivers/net/ |
H A D | eql.c | 142 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 149 spin_lock(&eql->queue.lock); 150 head = &eql->queue.all_slaves; 159 eql_kill_one_slave(&eql->queue, slave); 163 spin_unlock(&eql->queue.lock); 186 spin_lock_init(&eql->queue.lock); 187 INIT_LIST_HEAD(&eql->queue.all_slaves); 188 eql->queue.master_dev = dev; 213 BUG_ON(!list_empty(&eql->queue.all_slaves)); 223 static void eql_kill_one_slave(slave_queue_t *queue, slave_ argument 232 eql_kill_slave_queue(slave_queue_t *queue) argument 302 __eql_schedule_slaves(slave_queue_t *queue) argument 368 __eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) argument 383 eql_is_full(slave_queue_t *queue) argument 393 __eql_insert_slave(slave_queue_t *queue, slave_t *slave) argument [all...] |
/linux-master/drivers/nvme/target/ |
H A D | rdma.c | 50 struct nvmet_rdma_queue *queue; member in struct:nvmet_rdma_cmd 64 struct nvmet_rdma_queue *queue; member in struct:nvmet_rdma_rsp 146 MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 171 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 211 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) argument 216 spin_lock_irqsave(&queue->rsps_lock, flags); 217 rsp = list_first_entry_or_null(&queue->free_rsps, 221 spin_unlock_irqrestore(&queue->rsps_lock, flags); 229 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); 247 nvmet_rdma_free_rsp(rsp->queue 453 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) argument 488 nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) argument 522 nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) argument 671 struct nvmet_rdma_queue *queue = rsp->queue; local 687 nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) argument 705 struct nvmet_rdma_queue *queue = wc->qp->qp_context; local 758 struct nvmet_rdma_queue *queue = wc->qp->qp_context; local 791 struct nvmet_rdma_queue *queue = wc->qp->qp_context; local 953 struct nvmet_rdma_queue *queue = rsp->queue; local 975 nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, struct nvmet_rdma_rsp *cmd) argument 1011 struct nvmet_rdma_queue *queue = wc->qp->qp_context; local 1259 nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) argument 1336 nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) argument 1346 nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) argument 1365 struct nvmet_rdma_queue *queue = local 1375 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, struct nvmet_rdma_queue *queue) argument 1425 struct nvmet_rdma_queue *queue; local 1525 struct nvmet_rdma_queue *queue = priv; local 1542 nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue, struct rdma_conn_param *p) argument 1570 struct nvmet_rdma_queue *queue; local 1625 nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) argument 1652 __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) argument 1687 nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) argument 1702 nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) argument 1731 nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) argument 1766 struct nvmet_rdma_queue *queue = NULL; local 1817 struct nvmet_rdma_queue *queue; local 1835 struct nvmet_rdma_queue *queue, *tmp; local 2045 struct nvmet_rdma_queue *queue, *tmp; local [all...] |
/linux-master/drivers/nvme/host/ |
H A D | rdma.c | 73 struct nvme_rdma_queue *queue; member in struct:nvme_rdma_request 159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) argument 161 return queue - queue->ctrl->queues; 164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) argument 166 return nvme_rdma_queue_idx(queue) > 167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + 168 queue->ctrl->io_queues[HCTX_TYPE_READ]; 171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) argument 173 return queue 246 nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) argument 257 nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) argument 300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; local 323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; local 335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; local 413 nvme_rdma_free_cq(struct nvme_rdma_queue *queue) argument 421 nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) argument 462 nvme_rdma_create_cq(struct ib_device *ibdev, struct nvme_rdma_queue *queue) argument 489 nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) argument 572 struct nvme_rdma_queue *queue; local 633 __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) argument 639 nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) argument 650 nvme_rdma_free_queue(struct nvme_rdma_queue *queue) argument 678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; local 1173 struct nvme_rdma_queue *queue = wc->qp->qp_context; local 1201 nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req) argument 1234 nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct request *rq) argument 1267 nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c, int count) argument 1293 nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c) argument 1305 nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c, int count) argument 1409 nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, struct nvme_rdma_request *req, struct nvme_command *c, int count, int pi_count) argument 1529 nvme_rdma_map_data(struct nvme_rdma_queue *queue, struct request *rq, struct nvme_command *c) argument 1595 nvme_rdma_post_send(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, struct ib_send_wr *first) argument 1626 nvme_rdma_post_recv(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe) argument 1652 nvme_rdma_tagset(struct nvme_rdma_queue *queue) argument 1670 struct nvme_rdma_queue *queue = &ctrl->queues[0]; local 1694 nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, struct nvme_completion *cqe, struct ib_wc *wc) argument 1742 struct nvme_rdma_queue *queue = wc->qp->qp_context; local 1778 nvme_rdma_conn_established(struct nvme_rdma_queue *queue) argument 1791 nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, struct rdma_cm_event *ev) argument 1817 nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) argument 1842 nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) argument 1891 struct nvme_rdma_queue *queue = cm_id->context; local 1949 struct nvme_rdma_queue *queue = req->queue; local 1958 struct nvme_rdma_queue *queue = req->queue; local 1998 struct nvme_rdma_queue *queue = hctx->driver_data; local 2077 struct nvme_rdma_queue *queue = hctx->driver_data; local 2116 struct nvme_rdma_queue *queue = req->queue; local [all...] |
/linux-master/drivers/iommu/ |
H A D | io-pgfault.c | 238 * that no new fault is added to the queue. In particular it must flush its 239 * low-level queue before calling this function. 256 flush_workqueue(iopf_param->queue->wq); 292 * @queue: the queue whose partial faults need to be discarded 294 * When the hardware queue overflows, last page faults in a group may have been 296 * driver shouldn't be adding new faults to this queue concurrently. 300 int iopf_queue_discard_partial(struct iopf_queue *queue) argument 305 if (!queue) 308 mutex_lock(&queue 330 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) argument 395 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) argument 448 struct iopf_queue *queue; local 480 iopf_queue_free(struct iopf_queue *queue) argument [all...] |
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | si.h | 30 u32 me, u32 pipe, u32 queue, u32 vmid);
|
H A D | cik.h | 30 u32 me, u32 pipe, u32 queue, u32 vmid);
|
H A D | nv.h | 32 u32 me, u32 pipe, u32 queue, u32 vmid);
|
H A D | soc21.h | 29 u32 me, u32 pipe, u32 queue, u32 vmid);
|
/linux-master/scripts/ |
H A D | headerdep.pl | 89 my @queue = @_; 90 while(@queue) { 91 my $header = pop @queue; 106 push @queue, $dep; 143 my @queue = map { [[0, $_]] } @_; 144 while(@queue) { 145 my $top = pop @queue; 159 push @queue, $chain;
|
/linux-master/drivers/net/ethernet/microchip/lan966x/ |
H A D | lan966x_tbf.c | 10 u32 queue = 0; local 15 queue = TC_H_MIN(qopt->parent) - 1; 16 if (queue >= NUM_PRIO_QUEUES) 23 se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue; 60 u32 queue = 0; local 64 queue = TC_H_MIN(qopt->parent) - 1; 65 if (queue >= NUM_PRIO_QUEUES) 72 se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
|
/linux-master/drivers/net/ethernet/aquantia/atlantic/hw_atl2/ |
H A D | hw_atl2_llh_internal.h | 125 #define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \ 126 (((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0) 128 #define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \ 129 (((queue) < 32) ? ((queue) * 4) % 32 : 0) 173 * parameter: queue {q} | bit-level stride | range [0, 31] 178 #define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \ 179 (((queue) < 32) ? 0x0000799C + ((queue) / [all...] |
/linux-master/drivers/infiniband/sw/rxe/ |
H A D | rxe_cq.c | 28 count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); 30 rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n", 50 cq->queue = rxe_queue_init(rxe, &cqe, 52 if (!cq->queue) { 58 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); 60 vfree(cq->queue->buf); 61 kfree(cq->queue); 78 err = rxe_queue_resize(cq->queue, (unsigne [all...] |
/linux-master/include/linux/iio/ |
H A D | buffer-dma.h | 22 * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue 24 * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue 41 * @queue: Parent DMA buffer queue 57 struct iio_dma_buffer_queue *queue; member in struct:iio_dma_buffer_block 63 * queue->list_lock if the block is not owned by the core. 93 * atomic context as well as blocks on those lists. This is the outgoing queue 96 * @incoming: List of buffers on the incoming queue 120 int (*submit)(struct iio_dma_buffer_queue *queue, 122 void (*abort)(struct iio_dma_buffer_queue *queue); [all...] |
/linux-master/drivers/net/wireless/intel/iwlwifi/ |
H A D | iwl-op-mode.h | 55 * out *iff* the opmode will never run on hardware with multi-queue capability. 63 * @rx_rss: data queue RX notification to the op_mode, for (data) notifications 64 * received on the RSS queue(s). The queue parameter indicates which of the 67 * @queue_full: notifies that a HW queue is full. 69 * @queue_not_full: notifies that a HW queue is not full any more. 82 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and 98 struct iwl_rx_cmd_buffer *rxb, unsigned int queue); 99 void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 100 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 140 iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue) argument 148 iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, int queue) argument 154 iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, int queue) argument [all...] |
/linux-master/drivers/net/wireless/ralink/rt2x00/ |
H A D | rt2x00usb.c | 252 struct data_queue *queue; local 255 tx_queue_for_each(rt2x00dev, queue) { 256 while (!rt2x00queue_empty(queue)) { 257 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 271 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 298 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 326 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), 363 skbdesc->desc_len = entry->queue->desc_size; 375 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 385 if (urb->actual_length < entry->queue 427 rt2x00usb_kick_queue(struct data_queue *queue) argument 476 rt2x00usb_flush_queue(struct data_queue *queue, bool drop) argument 526 rt2x00usb_watchdog_tx_dma(struct data_queue *queue) argument 536 rt2x00usb_dma_timeout(struct data_queue *queue) argument 546 struct data_queue *queue; local 579 rt2x00usb_assign_endpoint(struct data_queue *queue, struct usb_endpoint_descriptor *ep_desc) argument 604 struct data_queue *queue = rt2x00dev->tx; local 649 rt2x00usb_alloc_entries(struct data_queue *queue) argument 682 rt2x00usb_free_entries(struct data_queue *queue) argument 716 struct data_queue *queue; local 746 struct data_queue *queue; local [all...] |
/linux-master/net/core/ |
H A D | request_sock.c | 20 * Maximum number of SYN_RECV sockets in queue per LISTEN socket. 34 void reqsk_queue_alloc(struct request_sock_queue *queue) argument 36 queue->fastopenq.rskq_rst_head = NULL; 37 queue->fastopenq.rskq_rst_tail = NULL; 38 queue->fastopenq.qlen = 0; 40 queue->rskq_accept_head = NULL;
|
/linux-master/include/net/ |
H A D | request_sock.h | 212 /** struct request_sock_queue - queue of request_socks 234 void reqsk_queue_alloc(struct request_sock_queue *queue); 239 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) argument 241 return READ_ONCE(queue->rskq_accept_head) == NULL; 244 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, argument 249 spin_lock_bh(&queue->rskq_lock); 250 req = queue->rskq_accept_head; 253 WRITE_ONCE(queue->rskq_accept_head, req->dl_next); 254 if (queue->rskq_accept_head == NULL) 255 queue 261 reqsk_queue_removed(struct request_sock_queue *queue, const struct request_sock *req) argument 269 reqsk_queue_added(struct request_sock_queue *queue) argument 275 reqsk_queue_len(const struct request_sock_queue *queue) argument 280 reqsk_queue_len_young(const struct request_sock_queue *queue) argument [all...] |
/linux-master/drivers/usb/gadget/function/ |
H A D | uvc_video.c | 38 if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) 41 if (video->queue.buf_used == 0 && ts.tv_sec) { 65 if (buf->bytesused - video->queue.buf_used <= len - pos) 75 struct uvc_video_queue *queue = &video->queue; local 80 mem = buf->mem + queue->buf_used; 81 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); 84 queue->buf_used += nbytes; 116 if (buf->bytesused == video->queue.buf_used) { 117 video->queue 374 struct uvc_video_queue *queue = &video->queue; local 570 struct uvc_video_queue *queue = &video->queue; local [all...] |
/linux-master/sound/core/seq/ |
H A D | seq_queue.c | 3 * ALSA sequencer Timing queue handling 10 * - Owner of unlocked queue is kept unmodified even if it is 19 * - The queue is locked when struct snd_seq_queue pointer is returned via 49 /* assign queue id and insert to list */ 58 q->queue = i; 87 /* create new queue (constructor) */ 100 q->queue = -1; 120 /* delete queue (destructor) */ 153 static void queue_use(struct snd_seq_queue *queue, int client, int use); 155 /* allocate a new queue 417 struct snd_seq_queue *queue; local 438 struct snd_seq_queue *queue; local 473 queue_use(struct snd_seq_queue *queue, int client, int use) argument 497 struct snd_seq_queue *queue; local [all...] |
/linux-master/drivers/staging/media/sunxi/sun6i-isp/ |
H A D | sun6i_isp_params.h | 17 struct list_head queue; /* Queue and buffers lock. */ member in struct:sun6i_isp_params_state 30 struct vb2_queue queue; member in struct:sun6i_isp_params
|
/linux-master/drivers/s390/crypto/ |
H A D | zcrypt_error.h | 82 int card = AP_QID_CARD(zq->queue->qid); 83 int queue = AP_QID_QUEUE(zq->queue->qid); local 110 __func__, card, queue, 114 __func__, card, queue, 136 __func__, card, queue, ehdr->reply_code, apfs); 139 __func__, card, queue, 146 __func__, card, queue, ehdr->reply_code);
|
/linux-master/drivers/soc/ti/ |
H A D | knav_qmss_acc.c | 3 * Keystone accumulator queue manager 28 int range_base, queue; local 33 for (queue = 0; queue < range->num_queues; queue++) { 35 queue); 39 range_base + queue); 44 queue = acc->channel - range->acc_info.start_channel; 45 inst = knav_range_offset_to_inst(kdev, range, queue); 47 range_base + queue); 87 int range_base, channel, queue = 0; local 195 knav_range_setup_acc_irq(struct knav_range_info *range, int queue, bool enabled) argument 300 knav_acc_setup_cmd(struct knav_device *kdev, struct knav_range_info *range, struct knav_reg_acc_command *cmd, int queue) argument 334 knav_acc_stop(struct knav_device *kdev, struct knav_range_info *range, int queue) argument 352 knav_acc_start(struct knav_device *kdev, struct knav_range_info *range, int queue) argument 377 int queue; local [all...] |