Searched refs:queued (Results 1 - 25 of 109) sorted by relevance

12345

/linux-master/security/integrity/ima/
H A Dima_asymmetric_keys.c33 bool queued = false; local
43 queued = ima_queue_key(keyring, payload, payload_len);
45 if (queued)
H A Dima_queue_keys.c18 * right away or should be queued for processing later.
29 * If custom IMA policy is not loaded then keys queued up
40 * queued up in case custom IMA policy was not loaded.
49 * This function sets up a worker to free queued keys in case
107 bool queued = false; local
117 queued = true;
121 if (!queued)
124 return queued;
128 * ima_process_queued_keys() - process keys queued for measurement
130 * This function sets ima_process_keys to true and processes queued key
[all...]
/linux-master/drivers/net/wireless/mediatek/mt76/
H A Ddebugfs.c59 seq_puts(s, " queue | hw-queued | head | tail |\n");
67 i, q->queued, q->head, q->tail);
77 int i, queued; local
79 seq_puts(s, " queue | hw-queued | head | tail |\n");
83 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
85 i, queued, q->head, q->tail);
H A Dsdio.c317 q->queued = 0;
372 if (q->queued > 0) {
375 q->queued--;
439 while (q->queued > 0) {
455 if (!q->queued)
527 if (q->queued == q->ndesc)
542 q->queued++;
553 if (q->queued == q->ndesc)
570 q->queued++;
/linux-master/drivers/gpu/drm/
H A Ddrm_flip_work.c51 list_add_tail(&task->node, &work->queued);
79 * drm_flip_work_commit - commit queued work
81 * @wq: the work-queue to run the queued work on
83 * Trigger work previously queued by drm_flip_work_queue() to run
86 * prior), and then from vblank irq commit the queued work.
94 list_splice_tail(&work->queued, &work->commited);
95 INIT_LIST_HEAD(&work->queued);
138 INIT_LIST_HEAD(&work->queued);
155 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
/linux-master/net/rose/
H A Drose_in.c105 int queued = 0; local
168 queued = 1;
205 return queued;
266 int queued = 0, frametype, ns, nr, q, d, m; local
275 queued = rose_state1_machine(sk, skb, frametype);
278 queued = rose_state2_machine(sk, skb, frametype);
281 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
284 queued = rose_state4_machine(sk, skb, frametype);
287 queued = rose_state5_machine(sk, skb, frametype);
293 return queued;
[all...]
/linux-master/net/dccp/
H A Dinput.c45 int queued = 0; local
76 queued = 1;
86 return queued;
91 int queued = 0; local
101 return queued;
113 queued = 1;
120 return queued;
524 int queued = 0; local
562 queued = 1; /* packet was queued
578 int queued = 0; local
[all...]
/linux-master/include/drm/
H A Ddrm_flip_work.h46 * @val: value queued via drm_flip_work_queue()
58 * @queued: queued tasks
60 * @lock: lock to access queued and commited lists
66 struct list_head queued; member in struct:drm_flip_work
/linux-master/net/ax25/
H A Dax25_ds_in.c147 int queued = 0; local
240 queued = ax25_rx_iframe(ax25, skb);
273 return queued;
281 int queued = 0, frametype, ns, nr, pf; local
287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type);
290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type);
293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
297 return queued;
H A Dax25_std_in.c143 int queued = 0; local
225 queued = ax25_rx_iframe(ax25, skb);
258 return queued;
268 int queued = 0; local
380 queued = ax25_rx_iframe(ax25, skb);
413 return queued;
421 int queued = 0, frametype, ns, nr, pf; local
427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type);
430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type);
433 queued
[all...]
H A Dax25_in.c103 int queued = 0; local
145 queued = 1;
151 return queued;
159 int queued = 0; local
167 queued = ax25_std_frame_in(ax25, skb, type);
173 queued = ax25_ds_frame_in(ax25, skb, type);
175 queued = ax25_std_frame_in(ax25, skb, type);
180 return queued;
305 * Process the frame. If it is queued up internally it
/linux-master/net/netrom/
H A Dnr_in.c153 int queued = 0; local
225 queued = 1;
272 return queued;
279 int queued = 0, frametype; local
288 queued = nr_state1_machine(sk, skb, frametype);
291 queued = nr_state2_machine(sk, skb, frametype);
294 queued = nr_state3_machine(sk, skb, frametype);
300 return queued;
/linux-master/net/x25/
H A Dx25_in.c210 int queued = 0; local
277 queued = 1;
315 queued = !sock_queue_rcv_skb(sk, skb);
319 queued = 1;
330 return queued;
418 int queued = 0, frametype, ns, nr, q, d, m; local
427 queued = x25_state1_machine(sk, skb, frametype);
430 queued = x25_state2_machine(sk, skb, frametype);
433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
436 queued
450 int queued = x25_process_rx_frame(sk, skb); local
[all...]
H A Dx25_dev.c51 int queued = 1; local
56 queued = x25_process_rx_frame(sk, skb);
58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
62 return queued;
/linux-master/tools/testing/selftests/net/mptcp/
H A Dmptcp_inq.c208 int nsd, ret, queued = -1; local
211 ret = ioctl(fd, TIOCOUTQ, &queued);
219 if ((size_t)queued > total)
220 xerror("TIOCOUTQ %u, but only %zu expected\n", queued, total);
221 assert(nsd <= queued);
223 if (queued == 0)
232 xerror("still tx data queued after %u ms\n", timeout);
356 unsigned int queued; local
358 ret = ioctl(fd, FIONREAD, &queued);
361 if (queued > expect_le
[all...]
/linux-master/drivers/md/
H A Ddm-cache-background-tracker.c27 struct list_head queued; member in struct:background_tracker
48 INIT_LIST_HEAD(&b->queued);
67 list_for_each_entry_safe (w, tmp, &b->queued, list) {
214 list_add(&w->list, &b->queued);
228 if (list_empty(&b->queued))
231 w = list_first_entry(&b->queued, struct bt_work, list);
/linux-master/sound/firewire/fireworks/
H A Dfireworks_hwdep.c128 bool queued; local
133 queued = efw->push_ptr != efw->pull_ptr;
135 while (!dev_lock_changed && !queued) {
144 queued = efw->push_ptr != efw->pull_ptr;
151 else if (queued)
/linux-master/virt/kvm/
H A Dasync_pf.c112 * need to be flushed (but sanity check that the work wasn't queued).
161 vcpu->async_pf.queued = 0;
181 vcpu->async_pf.queued--;
195 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
219 vcpu->async_pf.queued++;
250 vcpu->async_pf.queued++;
/linux-master/fs/xfs/
H A Dxfs_mru_cache.c102 unsigned int queued; /* work has been queued */ member in struct:xfs_mru_cache
204 if (!mru->queued) {
205 mru->queued = 1;
280 mru->queued = next;
281 if ((mru->queued > 0)) {
389 if (mru->queued) {
/linux-master/fs/btrfs/
H A Ddiscard.c129 bool queued; local
133 queued = !list_empty(&block_group->discard_list);
146 if (!queued)
158 bool queued = false; local
168 queued = !list_empty(&block_group->discard_list);
176 if (queued && !running)
/linux-master/drivers/dma/qcom/
H A Dhidma.h30 bool queued; /* flag whether this is pending */ member in struct:hidma_tre
102 struct list_head queued; member in struct:hidma_chan
/linux-master/kernel/sched/
H A Dstop_task.c84 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) argument
/linux-master/drivers/media/platform/renesas/vsp1/
H A Dvsp1_dl.c208 * @lock: protects the free, active, queued, and pending lists
211 * @queued: list queued to the hardware (written to the DL registers)
212 * @pending: list waiting to be queued to the hardware
224 struct vsp1_dl_list *queued; member in struct:vsp1_dl_manager
791 * loop on the same list until a new one is queued. In singleshot mode
841 if (!dlm->queued)
871 * If a previous display list has been queued to the hardware but not
873 * case we can't replace the queued list by the new one, as we could
875 * be queued u
[all...]
/linux-master/sound/usb/
H A Dcard.h58 int queued; /* queued data bytes by this urb */ member in struct:snd_urb_ctx
96 unsigned int next_packet_queued; /* queued items in the ring buffer */
/linux-master/block/
H A Dblk-throttle.h14 * To avoid such starvation, dispatched bios are queued separately
19 * throtl_qnode is used to keep the queued bios separated by their sources.
20 * Bios are queued to throtl_qnode which in turn is queued to
24 * belongs to a throtl_grp and gets queued on itself or the parent, so
26 * queued and decrementing when dequeued is enough to keep the whole blkg
30 struct list_head node; /* service_queue->queued[] */
31 struct bio_list bios; /* queued bios */
39 * Bios queued directly to this service_queue or dispatched from
42 struct list_head queued[ member in struct:throtl_service_queue
[all...]

Completed in 530 milliseconds

12345