Searched refs:queue (Results 151 - 175 of 1381) sorted by relevance

1234567891011>>

/linux-master/drivers/crypto/cavium/cpt/
H A Dcptvf.h12 /* Default command queue length */
52 spinlock_t lock; /* command queue lock */
53 u32 idx; /* Command queue host write idx */
55 struct command_chunk *qhead; /* Command queue head, instructions
63 u32 qchunksize; /* Command queue chunk size */
64 struct command_queue queue[CPT_NUM_QS_PER_VF]; member in struct:command_qinfo
77 struct pending_entry *head; /* head of the queue */
87 struct pending_queue queue[CPT_NUM_QS_PER_VF]; member in struct:pending_qinfo
91 for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
92 q = &qinfo->queue[
[all...]
H A Dcptvf_main.c84 struct pending_queue *queue; local
86 for_each_pending_queue(pqinfo, queue, i) {
87 if (!queue->head)
90 /* free single queue */
91 kfree_sensitive((queue->head));
93 queue->front = 0;
94 queue->rear = 0;
108 struct pending_queue *queue = NULL; local
113 for_each_pending_queue(pqinfo, queue, i) {
114 queue
170 struct command_queue *queue = NULL; local
206 struct command_queue *queue = NULL; local
[all...]
/linux-master/net/mac80211/
H A Dtkip.h27 u8 *ra, int only_iv, int queue,
/linux-master/drivers/usb/musb/
H A Dmusb_host.h45 /* map from control or bulk queue head to the first qh on that ring */
114 struct list_head *queue; local
118 queue = &qh->hep->urb_list;
119 if (list_empty(queue))
121 return list_entry(queue->next, struct urb, urb_list);
/linux-master/drivers/staging/rtl8723bs/include/
H A Dosdep_service_linux.h44 struct list_head queue; member in struct:__queue
53 static inline struct list_head *get_list_head(struct __queue *queue) argument
55 return (&(queue->queue));
/linux-master/drivers/net/wireless/silabs/wfx/
H A DMakefile13 queue.o \
/linux-master/drivers/media/usb/uvc/
H A Duvc_isight.c35 static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf, argument
44 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
126 ret = isight_decode(&stream->queue, buf,
136 buf = uvc_queue_next_buffer(&stream->queue,
/linux-master/include/linux/
H A Dif_eql.h45 slave_queue_t queue; member in struct:equalizer
/linux-master/drivers/crypto/qce/
H A Dcore.h13 * @queue: crypto request queue
14 * @lock: the lock protects queue and req
30 struct crypto_queue queue; member in struct:qce_device
/linux-master/include/crypto/internal/
H A Dengine.h31 * @queue_lock: spinlock to synchronise access to request queue
32 * @queue: the crypto queue of the engine
33 * @rt: whether this queue is set to run as a realtime task
34 * @prepare_crypt_hardware: a request will soon arrive from the queue
38 * queue so the subsystem notifies the driver that it may relax the
57 struct crypto_queue queue; member in struct:crypto_engine
/linux-master/drivers/net/ethernet/intel/i40e/
H A Di40e_lan_hmc.h14 /* Rx queue context data
47 /* Tx queue context data
150 u16 queue);
152 u16 queue,
155 u16 queue);
157 u16 queue,
/linux-master/tools/testing/selftests/net/tcp_ao/lib/
H A Drepair.c42 static void test_sock_checkpoint_queue(int sk, int queue, int qlen, argument
48 if (setsockopt(sk, SOL_TCP, TCP_REPAIR_QUEUE, &queue, sizeof(queue)))
127 static void test_sock_restore_seq(int sk, int queue, uint32_t seq) argument
129 if (setsockopt(sk, SOL_TCP, TCP_REPAIR_QUEUE, &queue, sizeof(queue)))
136 static void test_sock_restore_queue(int sk, int queue, void *buf, int len) argument
144 if (setsockopt(sk, SOL_TCP, TCP_REPAIR_QUEUE, &queue, sizeof(queue)))
/linux-master/virt/kvm/
H A Dasync_pf.c41 INIT_LIST_HEAD(&vcpu->async_pf.queue);
107 * is always required when the item is taken off the completion queue.
111 * Wake all events skip the queue and go straight done, i.e. don't
125 /* cancel outstanding work queue item */
126 while (!list_empty(&vcpu->async_pf.queue)) {
128 list_first_entry(&vcpu->async_pf.queue,
129 typeof(*work), queue);
130 list_del(&work->queue);
180 list_del(&work->queue);
218 list_add_tail(&work->queue,
[all...]
/linux-master/drivers/s390/net/
H A Dqeth_core_main.c189 struct qeth_qdio_q *queue = card->qdio.in_q; local
197 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
198 queue->bufs[i].pool_entry = NULL;
383 "Failed to create completion queue\n");
1344 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, argument
1354 QETH_TXQ_STAT_INC(queue, bufs);
1355 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1357 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1359 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1360 QETH_TXQ_STAT_ADD(queue, tx_byte
1384 qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, bool error, int budget) argument
1419 qeth_tx_complete_pending_bufs(struct qeth_card *card, struct qeth_qdio_out_q *queue, bool drain, int budget) argument
2622 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); local
2648 struct qeth_qdio_out_q *queue; local
2717 struct qeth_qdio_out_q *queue; local
2972 qeth_tx_select_bulk_max(struct qeth_card *card, struct qeth_qdio_out_q *queue) argument
3019 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; local
3453 struct qeth_qdio_q *queue = card->qdio.in_q; local
3544 qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) argument
3564 qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) argument
3583 qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) argument
3598 qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) argument
3706 qeth_flush_queue(struct qeth_qdio_out_q *queue) argument
3715 qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) argument
3771 qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, unsigned int queue, int first_element, int count) argument
3813 qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, int queue, int first_elem, int count, unsigned long card_ptr) argument
3950 qeth_add_hw_header(struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int hdr_len, unsigned int proto_len, unsigned int *elements) argument
4023 qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, struct sk_buff *curr_skb, struct qeth_hdr *curr_hdr) argument
4140 __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, unsigned int elements, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len) argument
4218 qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len, unsigned int elements_needed) argument
4319 qeth_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, __be16 proto, void (*fill_header)(struct qeth_qdio_out_q *queue, struct qeth_hdr *hdr, struct sk_buff *skb, __be16 proto, unsigned int data_len)) argument
5850 struct qeth_qdio_out_q *queue; local
5881 qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, unsigned int bidx, unsigned int qdio_error, int budget) argument
5937 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); local
6944 struct qeth_qdio_out_q *queue; local
7045 struct qeth_qdio_out_q *queue; local
7072 struct qeth_qdio_out_q *queue; local
[all...]
H A Dqeth_ethtool.c113 struct qeth_qdio_out_q *queue,
116 WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
117 WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
121 !qeth_out_queue_is_empty(queue))
122 qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
131 struct qeth_qdio_out_q *queue; local
140 qeth_for_each_output_queue(card, queue, i)
141 __qeth_set_coalesce(dev, queue, coal);
293 struct qeth_qdio_out_q *queue; local
301 queue
112 __qeth_set_coalesce(struct net_device *dev, struct qeth_qdio_out_q *queue, struct ethtool_coalesce *coal) argument
308 qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *coal) argument
[all...]
/linux-master/drivers/gpu/drm/panthor/
H A Dpanthor_sched.c43 * through the firmware interface). Each queue is assigned a command
56 * called from the queue ring-buffer by the kernel using a pre-defined
66 * queue ring-buffer, and the group is scheduled for execution if it
79 * group/queue state that would be based on information we wouldn't have
84 * a queue of waiters, ordered by job submission order). This approach
170 * We have a queue dedicated to heap chunk allocation works to avoid
355 * struct panthor_queue - Execution queue
358 /** @scheduler: DRM scheduler used for this queue. */
361 /** @entity: DRM scheduling entity used for this queue. */
367 * The job timeout is suspended when the queue i
770 panthor_queue_put_syncwait_obj(struct panthor_queue *queue) argument
784 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) argument
816 group_free_queue(struct panthor_group *group, struct panthor_queue *queue) argument
976 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; local
1022 struct panthor_queue *queue = group->queues[cs_id]; local
1075 struct panthor_queue *queue = group->queues[cs_id]; local
1312 struct panthor_queue *queue = group && cs_id < group->queue_count ? local
1836 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx, struct list_head *queue, bool skip_idle_groups, bool owned_by_tick_ctx) argument
1987 struct panthor_queue *queue = group->queues[i]; local
2381 struct panthor_queue *queue = group->queues[queue_idx]; local
2457 struct list_head *queue = &sched->groups.runnable[group->priority]; local
2514 queue_stop(struct panthor_queue *queue, struct panthor_job *bad_job) argument
2520 queue_start(struct panthor_queue *queue) argument
2777 struct panthor_queue *queue = group->queues[queue_idx]; local
2813 struct panthor_queue *queue = group->queues[job->queue_idx]; local
2948 struct panthor_queue *queue = group->queues[job->queue_idx]; local
2993 struct panthor_queue *queue; local
[all...]
/linux-master/drivers/s390/crypto/
H A Dzcrypt_msgtype6.c240 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
310 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
587 __func__, AP_QID_CARD(zq->queue->qid),
588 AP_QID_QUEUE(zq->queue->qid),
594 AP_QID_CARD(zq->queue->qid),
595 AP_QID_QUEUE(zq->queue->qid),
598 __func__, AP_QID_CARD(zq->queue->qid),
599 AP_QID_QUEUE(zq->queue->qid),
601 ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
739 AP_QID_CARD(zq->queue
[all...]
/linux-master/drivers/crypto/marvell/octeontx/
H A Dotx_cptvf_main.c77 struct otx_cpt_pending_queue *queue; local
80 for_each_pending_queue(pqinfo, queue, i) {
81 if (!queue->head)
84 /* free single queue */
85 kfree_sensitive((queue->head));
86 queue->front = 0;
87 queue->rear = 0;
88 queue->qlen = 0;
96 struct otx_cpt_pending_queue *queue = NULL; local
102 for_each_pending_queue(pqinfo, queue,
158 struct otx_cpt_cmd_queue *queue = NULL; local
190 struct otx_cpt_cmd_queue *queue = NULL; local
[all...]
/linux-master/drivers/media/platform/xilinx/
H A Dxilinx-dma.c266 * videobuf2 queue operations
272 * @queue: buffer list entry in the DMA engine queued buffers list
277 struct list_head queue; member in struct:xvip_dma_buffer
289 list_del(&buf->queue);
336 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
365 list_add_tail(&buf->queue, &dma->queued_bufs);
370 if (vb2_is_streaming(&dma->queue))
423 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
425 list_del(&buf->queue);
450 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
[all...]
/linux-master/drivers/ptp/
H A Dptp_clock.c39 /* time stamp event queue operations */
46 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, argument
66 spin_lock_irqsave(&queue->lock, flags);
68 dst = &queue->buf[queue->tail];
77 if (!queue_free(queue))
78 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
80 WRITE_ONCE(queue->tail, (queue
238 struct timestamp_event_queue *queue = NULL; local
[all...]
/linux-master/tools/testing/selftests/mqueue/
H A Dmq_open_tests.c18 * open a posix message queue and then reports whether or not they
42 " path Path name of the message queue to create\n"
59 mqd_t queue = -1; variable
90 if (queue != -1)
91 if (mq_close(queue))
158 printf("Current rlimit value for POSIX message queue bytes is "
169 printf("Temporarily lowering default queue parameters "
181 printf("Temporarily lowering maximum queue parameters "
185 "queue parameters to the maximum queue "
[all...]
/linux-master/crypto/
H A Dcryptd.c32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
37 struct crypto_queue queue; member in struct:cryptd_cpu_queue
51 struct cryptd_queue *queue; member in struct:cryptd_instance_ctx
56 struct cryptd_queue *queue; member in struct:skcipherd_instance_ctx
61 struct cryptd_queue *queue; member in struct:hashd_instance_ctx
66 struct cryptd_queue *queue; member in struct:aead_instance_ctx
100 static int cryptd_init_queue(struct cryptd_queue *queue, argument
106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 if (!queue->cpu_queue)
110 cpu_queue = per_cpu_ptr(queue
118 cryptd_fini_queue(struct cryptd_queue *queue) argument
130 cryptd_enqueue_request(struct cryptd_queue *queue, struct crypto_async_request *request) argument
318 struct cryptd_queue *queue; local
373 cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, struct crypto_attr_type *algt, struct cryptd_queue *queue) argument
488 struct cryptd_queue *queue = local
658 cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct crypto_attr_type *algt, struct cryptd_queue *queue) argument
813 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); local
866 cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, struct crypto_attr_type *algt, struct cryptd_queue *queue) argument
921 static struct cryptd_queue queue; variable in typeref:struct:cryptd_queue
[all...]
/linux-master/arch/mips/cavium-octeon/executive/
H A Dcvmx-helper-util.c84 * Setup Random Early Drop on a specific input queue
86 * @queue: Input queue to setup RED on (0-7)
95 static int cvmx_helper_setup_red_queue(int queue, int pass_thresh, argument
107 cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
109 /* Use the actual queue 0 counter, not the average */
116 cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
136 int queue; local
151 for (queue = 0; queue <
[all...]
/linux-master/arch/powerpc/boot/dts/fsl/
H A Dpq3-etsec2-0.dtsi55 queue-group@b0000 {
H A Dpq3-etsec2-1.dtsi55 queue-group@b1000 {

Completed in 288 milliseconds

1234567891011>>