Searched refs:queues (Results 1 - 25 of 227) sorted by relevance

12345678910

/linux-master/tools/testing/selftests/drivers/net/
H A Dqueues.py12 folders = glob.glob(f'/sys/class/net/{ifname}/queues/rx-*')
17 queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
18 if queues:
19 return len([q for q in queues if q['type'] == 'rx'])
24 queues = nl_get_queues(cfg, nl)
25 if not queues:
29 ksft_eq(queues, expected)
33 queues = nl_get_queues(cfg, nl)
34 if not queues:
50 queues
[all...]
H A DMakefile7 queues.py \
/linux-master/drivers/net/ethernet/wangxun/txgbe/
H A Dtxgbe_irq.h4 void txgbe_irq_enable(struct wx *wx, bool queues);
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_context.c170 * pvr_context_destroy_queues() - Destroy all queues attached to a context.
171 * @ctx: Context to destroy queues on.
174 * It releases all resources attached to the queues bound to this context.
180 pvr_queue_destroy(ctx->queues.fragment);
181 pvr_queue_destroy(ctx->queues.geometry);
184 pvr_queue_destroy(ctx->queues.compute);
187 pvr_queue_destroy(ctx->queues.transfer);
193 * pvr_context_create_queues() - Create all queues attached to a context.
194 * @ctx: Context to create queues on.
210 ctx->queues
[all...]
H A Dpvr_context.h64 * @faulty: Set to 1 when the context queues had unfinished job when
72 /** @queues: Union containing all kind of queues. */
87 } queues; member in struct:pvr_context
95 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL;
97 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL;
99 return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL;
101 return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL;
H A Dpvr_queue.c523 job->ctx->queues.fragment);
564 lockdep_assert_held(&pvr_dev->queues.lock);
574 list_move_tail(&queue->node, &pvr_dev->queues.idle);
576 list_move_tail(&queue->node, &pvr_dev->queues.active);
596 mutex_lock(&pvr_dev->queues.lock);
598 mutex_unlock(&pvr_dev->queues.lock);
730 struct pvr_queue *geom_queue = job->ctx->queues.geometry;
731 struct pvr_queue *frag_queue = job->ctx->queues.fragment;
765 /* Make sure we CPU-signal the UFO object, so other queues don't get
819 mutex_lock(&pvr_dev->queues
[all...]
/linux-master/sound/virtio/
H A Dvirtio_card.h47 * @queues: Virtqueue wrappers.
64 struct virtio_snd_queue queues[VIRTIO_SND_VQ_MAX]; member in struct:virtio_snd
86 return &snd->queues[VIRTIO_SND_VQ_CONTROL];
92 return &snd->queues[VIRTIO_SND_VQ_EVENT];
98 return &snd->queues[VIRTIO_SND_VQ_TX];
104 return &snd->queues[VIRTIO_SND_VQ_RX];
/linux-master/drivers/net/wireless/silabs/wfx/
H A Dqueue.c233 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; local
239 /* sort the queues */
243 WARN_ON(num_queues >= ARRAY_SIZE(queues));
244 queues[num_queues] = &wvif->tx_queue[i];
246 if (wfx_tx_queue_get_weight(queues[j]) <
247 wfx_tx_queue_get_weight(queues[j - 1]))
248 swap(queues[j - 1], queues[j]);
256 skb = skb_dequeue(&queues[i]->offchan);
264 atomic_inc(&queues[
[all...]
H A Ddata_tx.h47 void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop);
/linux-master/drivers/net/wireless/realtek/rtw88/
H A Dmac.h37 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
42 rtw_mac_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, drop);
/linux-master/net/sched/
H A Dsch_prio.c26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member in struct:prio_sched_data
57 return q->queues[q->prio2band[band & TC_PRIO_MAX]];
63 return q->queues[q->prio2band[0]];
65 return q->queues[band];
103 struct Qdisc *qdisc = q->queues[prio];
117 struct Qdisc *qdisc = q->queues[prio];
137 qdisc_reset(q->queues[prio]);
173 qdisc_put(q->queues[prio]);
180 struct Qdisc *queues[TCQ_PRIO_BANDS]; local
198 queues[
[all...]
H A Dsch_multiq.c25 struct Qdisc **queues; member in struct:multiq_sched_data
54 return q->queues[0];
56 return q->queues[band];
105 qdisc = q->queues[q->curband];
137 qdisc = q->queues[curband];
154 qdisc_reset(q->queues[band]);
166 qdisc_put(q->queues[band]);
168 kfree(q->queues);
196 if (q->queues[i] != &noop_qdisc) {
197 struct Qdisc *child = q->queues[
[all...]
/linux-master/drivers/nvme/target/
H A Dloop.c30 struct nvme_loop_queue *queues; member in struct:nvme_loop_ctrl
71 return queue - queue->ctrl->queues;
176 struct nvme_loop_queue *queue = &ctrl->queues[0];
198 iod->queue = &ctrl->queues[queue_idx];
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
242 struct nvme_loop_queue *queue = &ctrl->queues[0];
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
285 kfree(ctrl->queues);
296 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[
[all...]
/linux-master/drivers/scsi/aacraid/
H A Dcomminit.c373 struct aac_entry * queues; local
375 struct aac_queue_block * comm = dev->queues;
394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
397 comm->queue[HostNormCmdQueue].base = queues;
399 queues += HOST_NORM_CMD_ENTRIES;
403 comm->queue[HostHighCmdQueue].base = queues;
406 queues += HOST_HIGH_CMD_ENTRIES;
410 comm->queue[AdapNormCmdQueue].base = queues;
413 queues += ADAP_NORM_CMD_ENTRIES;
417 comm->queue[AdapHighCmdQueue].base = queues;
[all...]
/linux-master/drivers/media/platform/nxp/imx8-isi/
H A Dimx8-isi-m2m.c52 /* Protects the m2m vb2 queues */
58 } queues; member in struct:mxc_isi_m2m_ctx
85 return &ctx->queues.out;
87 return &ctx->queues.cap;
112 src_vbuf->sequence = ctx->queues.out.sequence++;
113 dst_vbuf->sequence = ctx->queues.cap.sequence++;
135 .width = ctx->queues.out.format.width,
136 .height = ctx->queues.out.format.height,
139 .width = ctx->queues.cap.format.width,
140 .height = ctx->queues
[all...]
/linux-master/drivers/target/
H A Dtarget_core_tmr.c118 flush_work(&dev->queues[i].sq.work);
120 spin_lock_irqsave(&dev->queues[i].lock, flags);
121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
148 spin_unlock_irqrestore(&dev->queues[i].lock, flags);
163 spin_unlock_irqrestore(&dev->queues[i].lock, flags);
301 flush_work(&dev->queues[i].sq.work);
303 spin_lock_irqsave(&dev->queues[i].lock, flags);
304 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
333 spin_unlock_irqrestore(&dev->queues[i].lock, flags);
/linux-master/include/linux/
H A Dptr_ring.h627 void ***queues; local
630 queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp);
631 if (!queues)
635 queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp);
636 if (!queues[i])
643 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
650 kvfree(queues[i]);
652 kfree(queues);
[all...]
/linux-master/tools/perf/util/
H A Dintel-bts.c46 struct auxtrace_queues queues; member in struct:intel_bts
211 for (i = 0; i < bts->queues.nr_queues; i++) {
212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
222 if (bts->queues.new_data) {
223 bts->queues.new_data = false;
465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
539 struct auxtrace_queues *queues = &bts->queues; local
542 for (i = 0; i < queues->nr_queues; i++) {
543 struct auxtrace_queue *queue = &bts->queues
710 struct auxtrace_queues *queues = &bts->queues; local
[all...]
H A Ds390-cpumsf.c47 * To sort the queues in chronological order, all queue access is controlled
54 * After the auxtrace infrastructure has been setup, the auxtrace queues are
61 * record sample, the auxtrace queues will be processed. As auxtrace queues
170 struct auxtrace_queues queues; member in struct:s390_cpumsf
203 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
206 q = &sf->queues.queue_array[sample->cpu];
701 queue = &sfq->sf->queues.queue_array[sfq->queue_nr];
825 for (i = 0; i < sf->queues.nr_queues; i++) {
826 ret = s390_cpumsf_setup_queue(sf, &sf->queues
1016 struct auxtrace_queues *queues = &sf->queues; local
[all...]
H A Dauxtrace.c221 int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues) argument
223 queues->nr_queues = nr_queues;
224 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
225 if (!queues->queue_array)
230 int auxtrace_queues__init(struct auxtrace_queues *queues) argument
232 return auxtrace_queues__init_nr(queues, AUXTRACE_INIT_NR_QUEUES);
235 static int auxtrace_queues__grow(struct auxtrace_queues *queues, argument
238 unsigned int nr_queues = queues->nr_queues;
248 if (nr_queues < queues
292 auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) argument
326 auxtrace_queues__split_buffer(struct auxtrace_queues *queues, unsigned int idx, struct auxtrace_buffer *buffer) argument
364 auxtrace_queues__add_buffer(struct auxtrace_queues *queues, struct perf_session *session, unsigned int idx, struct auxtrace_buffer *buffer, struct auxtrace_buffer **buffer_ptr) argument
409 auxtrace_queues__add_event(struct auxtrace_queues *queues, struct perf_session *session, union perf_event *event, off_t data_offset, struct auxtrace_buffer **buffer_ptr) argument
429 auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, struct perf_session *session, off_t file_offset, size_t sz) argument
456 auxtrace_queues__free(struct auxtrace_queues *queues) argument
994 auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, struct perf_session *session, struct auxtrace_index_entry *ent) argument
1002 auxtrace_queues__process_index(struct auxtrace_queues *queues, struct perf_session *session) argument
1042 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, struct perf_sample *sample, struct perf_session *session) argument
1066 auxtrace_queues__add_sample(struct auxtrace_queues *queues, struct perf_session *session, struct perf_sample *sample, u64 data_offset, u64 reference) argument
[all...]
H A Darm-spe.c42 struct auxtrace_queues queues; member in struct:arm_spe
154 queue = &speq->spe->queues.queue_array[speq->queue_nr];
271 arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
768 for (i = 0; i < spe->queues.nr_queues; i++) {
769 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
779 if (spe->queues.new_data) {
780 spe->queues.new_data = false;
822 queue = &spe->queues.queue_array[queue_nr];
863 struct auxtrace_queues *queues = &spe->queues; local
1035 struct auxtrace_queues *queues = &spe->queues; local
[all...]
/linux-master/drivers/net/ethernet/netronome/nfp/
H A Dnfp_net_debugfs.c126 struct dentry *queues, *tx, *rx, *xdp; local
140 queues = debugfs_create_dir("queue", nn->debugfs_dir);
142 rx = debugfs_create_dir("rx", queues);
143 tx = debugfs_create_dir("tx", queues);
144 xdp = debugfs_create_dir("xdp", queues);
/linux-master/drivers/vdpa/alibaba/
H A Deni_vdpa.c45 int queues; member in struct:eni_vdpa
118 for (i = 0; i < eni_vdpa->queues; i++) {
164 int queues = eni_vdpa->queues; local
165 int vectors = queues + 1;
177 for (i = 0; i < queues; i++) {
195 irq = pci_irq_vector(pdev, queues);
202 vp_legacy_config_vector(ldev, queues);
500 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
502 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
[all...]
/linux-master/drivers/vdpa/virtio_pci/
H A Dvp_vdpa.c42 int queues; member in struct:vp_vdpa
116 for (i = 0; i < vp_vdpa->queues; i++) {
162 int queues = vp_vdpa->queues; local
166 for (i = 0; i < queues; i++) {
181 for (i = 0; i < queues; i++) {
515 vp_vdpa->queues = vp_modern_get_num_queues(mdev);
539 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
548 for (i = 0; i < vp_vdpa->queues; i++) {
562 ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
[all...]
/linux-master/fs/autofs/
H A Dwaitq.c28 wq = sbi->queues;
29 sbi->queues = NULL; /* Erase all wait queues */
184 for (wq = sbi->queues; wq; wq = wq->next) {
389 wq->next = sbi->queues;
390 sbi->queues = wq;
493 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {

Completed in 281 milliseconds

12345678910