Searched refs:queue (Results 1 - 25 of 1368) sorted by last modified time

1234567891011>>

/linux-master/net/mac80211/
H A Dtx.c339 * from each queue should be OK to make some room for new frames. */
499 * been queued to pending queue. No reordering can happen, go
904 /* first fragment was already added to queue by caller */
1130 * queue yet -- if this happened we acquire the lock
1592 * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
1702 * queue them.
1714 * Since queue is stopped, queue up frames for
1716 * tasklet when the queue is woken again.
3463 head = skb_peek_tail(&flow->queue);
4406 ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev, struct sk_buff_head *queue) argument
4518 struct sk_buff_head queue; local
4608 u16 queue; local
6228 u16 queue = ieee80211_select_queue(sdata, sta, skb); local
[all...]
H A Drx.c2294 int queue = rx->security_idx; local
2303 rx->key->u.ccmp.rx_pn[queue],
2309 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2310 sizeof(rx->key->u.gcmp.rx_pn[queue]));
3485 return true; /* queue the frame */
3491 return true; /* queue the frame */
3633 goto queue;
3651 goto queue;
3656 goto queue;
3694 goto queue;
[all...]
/linux-master/net/ipv4/
H A Dudp.c1436 /* this can save us from acquiring the rx queue lock on next receive */
1455 /* as above, but the caller held the rx queue lock, too */
1506 * queue is full; always allow at least a packet
1514 * - Reduce memory overhead and thus increase receive queue capacity
1527 * queue contains some other skb
1638 * first_packet_length - return length of first packet in receive queue
1699 struct sk_buff_head *queue; local
1704 queue = &udp_sk(sk)->reader_queue;
1715 spin_lock_bh(&queue->lock);
1716 skb = __skb_try_recv_from_queue(sk, queue, flag
[all...]
/linux-master/include/net/
H A Dmac80211.h102 * a single per-vif queue for multicast data frames.
104 * The driver is expected to initialize its private per-queue data for stations
108 * Whenever mac80211 adds a new frame to a queue, it calls the .wake_tx_queue
116 * obtain the next queue to pull frames from, the driver calls
176 * struct ieee80211_tx_queue_params - transmit queue configuration
179 * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29.
187 * @uapsd: is U-APSD mode enabled for the queue
827 * the queue) and may only be unset after mac80211 gives the OK for
830 * hardware queue.
853 * by drivers to kick the DMA queue onl
[all...]
/linux-master/drivers/net/wireless/virtual/
H A Dmac80211_hwsim.c1519 /* If the queue contains MAX_QUEUE skb's drop some */
2002 if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
2717 unsigned int link_id, u16 queue,
2721 "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
2722 __func__, queue,
2715 mac80211_hwsim_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) argument
/linux-master/drivers/net/ethernet/wangxun/libwx/
H A Dwx_lib.c807 /* Make sure that anybody stopping the queue after this
848 /* attempt to distribute budget to each queue fairly, but don't allow
1566 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1573 /* set mask for 16 queue limit of RSS */
1604 /* We start by asking for one vector per queue pair */
1614 /* One for non-queue interrupts */
1674 /* minmum one for queue, one for misc*/
1860 * We allocate one q_vector per queue interrupt. If allocation fails we
1970 wx_err(wx, "Unable to allocate memory for queue vector
2077 wx_set_ivar(struct wx *wx, s8 direction, u16 queue, u16 msix_vector) argument
[all...]
/linux-master/drivers/net/ethernet/intel/igc/
H A Digc_main.c254 /* reset BQL for queue */
271 * @tx_ring: Tx descriptor ring for a specific queue
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
348 * @tx_ring: tx descriptor ring (for a specific queue) to setup
398 netdev_err(dev, "Error on Tx queue %u setup\n", i);
530 * @rx_ring: rx descriptor ring (for a specific queue) to setup
541 /* XDP RX-queue info */
598 netdev_err(dev, "Error on Rx queue %u setup\n", i);
651 /* disable the queue */
738 /* disable the queue */
912 igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, enum igc_mac_filter_type type, const u8 *addr, int queue) argument
3173 igc_add_mac_filter(struct igc_adapter *adapter, enum igc_mac_filter_type type, const u8 *addr, int queue) argument
3239 igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, int queue) argument
3309 igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, int queue) argument
6045 igc_save_launchtime_params(struct igc_adapter *adapter, int queue, bool enable) argument
6340 igc_save_cbs_params(struct igc_adapter *adapter, int queue, bool enable, int idleslope, int sendslope, int hicredit, int locredit) argument
[all...]
/linux-master/drivers/net/ethernet/intel/i40e/
H A Di40e_main.c235 /* Allocate last queue in the pile for FDIR VSI queue
241 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
327 * If not already scheduled, this puts the task into the work queue
340 * @txqueue: queue number timing out
1681 * admin queue command will unnecessarily fire.
1929 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1947 /* find the next higher power-of-2 of num queue pairs */
1954 /* Setup queue offset/count for all TCs for given VSI */
1968 * default queue an
9550 u32 queue = le32_to_cpu(data->prtdcb_rupto); local
11181 u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) - local
11193 u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) - local
[all...]
/linux-master/drivers/md/
H A Ddm.c599 if (blk_queue_io_stat(md->queue))
968 /* nudge anyone waiting on suspend queue */
1081 return &md->queue->limits;
1190 min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1857 /* If suspended, or map not yet available, queue this IO for later */
2041 dm_queue_destroy_crypto_profile(md->queue);
2114 md->queue = md->disk->queue;
2255 * requests in the queue may refer to bio from the old bioset,
2256 * so you must walk through the queue t
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process.c105 struct queue *q;
132 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
137 * Its possible, during this step, a few SDMA queue nodes got deleted
182 * Get the usage count for each SDMA queue in temp_list.
194 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
379 struct queue *q = container_of(kobj, struct queue, kobj);
493 int kfd_procfs_add_queue(struct queue *q)
502 /* Create proc/<pid>/queues/<queue i
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Dsdma_v5_2.c1403 struct amdgpu_mes_queue *queue; local
1408 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1409 if (queue) {
1410 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1411 amdgpu_fence_process(queue->ring);
1430 /* XXX page queue*/
1446 /* XXX page queue*/
1462 /* XXX page queue*/
1478 /* XXX page queue*/
H A Dgfx_v9_0.c777 lower_32_bits(queue_mask)); /* queue mask lo */
779 upper_32_bits(queue_mask)); /* queue mask hi */
798 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
801 /*queue_type: normal compute queue */
1966 int mec, int pipe, int queue)
1977 ring->queue = queue;
1985 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
3222 /* tell RLC which is KIQ queue */
3225 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1965 gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) argument
[all...]
H A Dgfx_v11_0.c152 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
153 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
189 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
192 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
930 int me, int pipe, int queue)
940 ring->queue = queue;
950 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
961 int mec, int pipe, int queue)
973 ring->queue
929 gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, int me, int pipe, int queue) argument
960 gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) argument
5948 struct amdgpu_mes_queue *queue; local
[all...]
H A Dgfx_v10_0.c3508 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
3509 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
3542 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
3545 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
4432 int me, int pipe, int queue)
4442 ring->queue = queue;
4452 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4462 int mec, int pipe, int queue)
4473 ring->queue
4431 gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, int me, int pipe, int queue) argument
4461 gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) argument
8939 struct amdgpu_mes_queue *queue; local
[all...]
H A Damdgpu_mes.c316 struct amdgpu_mes_queue *queue, *tmp2; local
336 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
338 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
341 queue_input.doorbell_offset = queue->doorbell_off;
347 DRM_WARN("failed to remove hardware queue\n");
359 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
360 amdgpu_mes_queue_free_mqd(queue);
361 list_del(&queue->list);
362 kfree(queue);
467 DRM_ERROR("queue lis
614 struct amdgpu_mes_queue *queue; local
734 struct amdgpu_mes_queue *queue; local
[all...]
/linux-master/block/
H A Dbdev.c416 bdev->bd_queue = disk->queue;
H A Dblk-iocost.c815 if (!blk_queue_nonrot(disk->queue))
819 if (blk_queue_depth(disk->queue) == 1)
3228 if (!queue_is_mq(disk->queue)) {
3233 ioc = q_to_ioc(disk->queue);
3238 ioc = q_to_ioc(disk->queue);
3241 blk_mq_freeze_queue(disk->queue);
3242 blk_mq_quiesce_queue(disk->queue);
3316 blk_stat_enable_accounting(disk->queue);
3317 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3320 blk_stat_disable_accounting(disk->queue);
[all...]
/linux-master/arch/arc/kernel/
H A Dentry.S53 ; put last task in scheduler queue
/linux-master/kernel/sched/
H A Dsched.h262 * This is the priority-queue data structure of the RT scheduling class:
266 struct list_head queue[MAX_RT_PRIO]; member in struct:rt_prio_array
1811 * Don't (re)queue an already queued item; nor queue anything when
2195 * each task makes to its run queue's load is weighted according to its
2208 * {de,en}queue flags:
/linux-master/include/linux/
H A Dblkdev.h152 struct request_queue *queue; member in struct:gendisk
171 struct kobject queue_kobj; /* the queue/ directory */
366 * The queue owner gets to use this for whatever they like.
379 * various queue flags, see QUEUE_* below
404 * mq queue kobject
430 * ida allocated id for this queue. Used to index queues from
438 * queue settings
519 #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
520 #define QUEUE_FLAG_DYING 1 /* queue being torn down */
532 #define QUEUE_FLAG_INIT_DONE 14 /* queue i
[all...]
/linux-master/drivers/usb/gadget/udc/
H A Dfsl_udc_core.c164 /* Removed the req from fsl_ep->queue */
165 list_del_init(&req->queue);
212 while (!list_empty(&ep->queue)) {
215 req = list_entry(ep->queue.next, struct fsl_req, queue);
672 * the main operation is to insert the req->queue to the eq->queue
685 INIT_LIST_HEAD(&req->queue);
734 if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
737 lastreq = list_entry(ep->queue
[all...]
/linux-master/arch/x86/kvm/
H A Dx86.c670 queue:
727 goto queue;
837 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
10424 * as a vmexit, or there could already an event in the queue, which is
10482 * KVM must never queue a new exception while injecting an event; KVM
/linux-master/tools/perf/util/
H A Dannotate.c1581 int max_lines, struct annotation_line *queue, int addr_fmt_width,
1612 if (queue != NULL) {
1613 list_for_each_entry_from(queue, &notes->src->source, node) {
1614 if (queue == al)
1616 annotation_line__print(queue, sym, start, evsel, len,
1663 if (queue)
2643 struct annotation_line *pos, *queue = NULL; local
2689 if (context && queue == NULL) {
2690 queue = pos;
2696 queue, addr_fmt_widt
1579 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start, struct evsel *evsel, u64 len, int min_pcnt, int printed, int max_lines, struct annotation_line *queue, int addr_fmt_width, int percent_type) argument
[all...]
/linux-master/tools/include/uapi/sound/
H A Dasound.h772 size_t buffer_size; /* queue size in bytes */
954 #define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */
959 unsigned int queue_size; /* total size of queue (32-1024) */
970 unsigned int overrun; /* count of read queue overruns */
971 unsigned int queue; /* used queue size */ member in struct:snd_timer_status
/linux-master/net/sched/
H A Dsch_generic.c60 /* Main transmission queue. */
157 /* it's still part of the queue */
312 * false - hardware queue frozen backoff
374 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
375 * this queue.
385 * 0 - queue is empty or throttled.
386 * >0 - queue is not empty.
481 * queue is not stopped for another reason, we
526 netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
704 /* 3-band FIFO queue
[all...]

Completed in 483 milliseconds

1234567891011>>