Lines Matching defs:list

126 	struct list_head	list;
296 * 32 entries per hash list if totally full and uniformly spread, but
461 req->work.list.next = NULL;
532 struct io_defer_entry, list);
536 list_del_init(&de->list);
672 LIST_HEAD(list);
675 list_splice_init(&ctx->cq_overflow_list, &list);
679 while (!list_empty(&list)) {
680 ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
681 list_del(&ocqe->list);
704 struct io_overflow_cqe, list);
706 list_del(&ocqe->list);
816 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1177 * is reached and return the rest of the list.
1215 * io_llist_xchg - swap all entries in a lock-less list
1216 * @head: the head of lock-less list to delete all entries
1217 * @new: new entry as the head of the list
1219 * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1340 * going to sleep will observe the work added to the list, which
1676 * the poll to the issued list. Otherwise we can spin here
1721 * After the iocb has been issued, it's safe to be found on the poll list.
1722 * Adding the kiocb to the list AFTER submission ensures that we don't
1835 /* Still need defer if there is pending req in defer list. */
1864 list_add_tail(&de->list, &ctx->defer_list);
2995 * Don't flush cqring overflow list here, just do a simple check.
3198 LIST_HEAD(list);
3201 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3203 list_cut_position(&list, &ctx->defer_list, &de->list);
3208 if (list_empty(&list))
3211 while (!list_empty(&list)) {
3212 de = list_first_entry(&list, struct io_defer_entry, list);
3213 list_del_init(&de->list);