Searched refs:next (Results 26 - 50 of 3420) sorted by last modified time

1234567891011>>

/linux-master/drivers/nvme/host/
H A Dtcp.c404 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
H A Dpci.c469 * Write sq tail if we are asked to, or if the next command would wrap.
932 struct request *req, *next, *prev = NULL; local
935 rq_list_for_each_safe(rqlist, req, next) {
947 if (!next || req->mq_hctx != next->mq_hctx) {
951 *rqlist = next;
H A Dmultipath.c504 struct bio *bio, *next; local
507 next = bio_list_get(&head->requeue_list);
510 while ((bio = next) != NULL) {
511 next = bio->bi_next;
H A Dcore.c2509 * Therefore, when running in any given state, we will enter the next
3948 struct nvme_ns *ns, *next; local
3952 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3958 list_for_each_entry_safe(ns, next, &rm_list, list)
4099 struct nvme_ns *ns, *next; local
4134 list_for_each_entry_safe(ns, next, &ns_list, list)
4243 "Firmware is activated after next Controller Level Reset\n");
/linux-master/drivers/net/vxlan/
H A Dvxlan_core.c3331 INIT_LIST_HEAD(&vxlan->next);
3658 list_for_each_entry(tmp, &vn->vxlan_list, next) {
3977 list_add(&vxlan->next, &vn->vxlan_list);
4417 list_del(&vxlan->next);
4623 struct vxlan_dev *vxlan, *next; local
4626 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
4845 struct vxlan_dev *vxlan, *next; local
4847 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
[all...]
/linux-master/drivers/net/ethernet/broadcom/genet/
H A Dbcmgenet.c2288 goto next;
2317 goto next;
2325 goto next;
2346 goto next;
2373 next:
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c197 struct list_head *link, *next; local
199 list_for_each_safe(link, next, list)
265 link = list->next;
741 struct xe_userptr_vma *uvma, *next; local
750 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
759 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
813 struct xe_vma *vma, *next; local
820 list_for_each_entry_safe(vma, next, &vm->rebind_list,
1497 struct drm_gpuva *gpuva, *next; local
1523 drm_gpuvm_for_each_va_safe(gpuva, next,
2561 bool next = !!op->remap.next; local
2807 struct xe_vma_op *op, *next; local
[all...]
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/
H A Dr535.c750 void *next; local
752 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
753 if (IS_ERR(next)) {
754 repv = next;
758 memcpy(next, argv, size);
760 repv = r535_gsp_rpc_send(gsp, next, false, 0);
1936 * the next table (for levels 0 and 1) or the bus address of the next page in
1946 * the next page of the firmware image. Since there can be up to 512*512
2004 // Go to the next scatterlis
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process.c110 struct temp_sdma_queue_list *sdma_q, *next; local
221 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
238 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
247 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
2011 * If restore fails, the timestamp will be set again in the next
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ttm.c22 * next paragraph) shall be included in all copies or substantial portions
310 struct dma_fence *next; local
327 resv, &next, false, true, tmz);
332 fence = next;
2254 struct dma_fence *next; local
2266 &next, true, delayed);
2271 fence = next;
/linux-master/drivers/firewire/
H A Dohci.c776 /* Peek at the next descriptor. */
781 * If the next descriptor is still empty, we must stop at this
945 void *next; local
948 next = handle_ar_packet(ctx, p);
949 if (!next)
951 p = next;
1104 desc = list_entry(ctx->buffer_list.next,
1115 * current buffer, advance to the next buffer. */
1118 desc = list_entry(desc->list.next,
1127 /* If we've advanced to the next buffe
[all...]
H A Dnosy.c41 __le32 next; member in struct:pcl
586 lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
587 lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
/linux-master/drivers/base/regmap/
H A Dregmap.c544 struct rb_node *next; local
547 next = rb_first(&map->range_tree);
548 while (next) {
549 range_node = rb_entry(next, struct regmap_range_node, node);
550 next = rb_next(&range_node->node);
/linux-master/arch/s390/mm/
H A Dgmap.c196 struct page *page, *next; local
202 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
254 struct gmap *sg, *next; local
260 list_for_each_entry_safe(sg, next, &gmap->children, list) {
1205 rmap->next = radix_tree_deref_slot_protected(slot,
1207 for (temp = rmap->next; temp; temp = temp->next) {
1215 rmap->next = NULL;
2272 struct gmap *gmap, *sg, *next; local
2289 list_for_each_entry_safe(sg, next,
2625 __s390_enable_skey_pte(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) argument
2638 __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) argument
2645 __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, unsigned long hmask, unsigned long next, struct mm_walk *walk) argument
2704 __s390_reset_cmma(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) argument
2727 unsigned long next; member in struct:reset_walk_state
2732 s390_gather_pages(pte_t *ptep, unsigned long addr, unsigned long next, struct mm_walk *walk) argument
[all...]
/linux-master/kernel/
H A Dworkqueue.c811 * here are visible to and precede any updates by the next PENDING
1047 (worker->hentry.next || worker->hentry.pprev)))
1163 * multiple works to the scheduled queue, the next position
1179 * If @nextp is not NULL, it's updated to point to the next work of the last
1856 * to unplug the next oldest one to start its work item execution. Note that
2151 * this destroys work->data needed by the next step, stash it.
2560 * expire is on the closest next tick and delayed_work users depend
3213 * of concurrency management and the next code block will chain
3775 * flag of the previous work while there must be a valid next work
3819 head = worker->scheduled.next;
4030 struct wq_flusher *next, *tmp; local
[all...]
/linux-master/fs/nfsd/
H A Dnfs4xdr.c128 tb->next = argp->to_free;
2603 char *str, *end, *next; local
2621 next = end + 1;
2622 if (*end && (!*next || *next == sep)) {
2643 end = next;
3826 * will resolve itself by the client's next attempt.
4952 struct nfsd4_test_stateid_id *stateid, *next; local
4958 list_for_each_entry_safe(stateid, next,
5764 args->to_free = tb->next;
[all...]
/linux-master/kernel/sched/
H A Dfair.c1078 * such that the next switched_to_fair() has the
2601 * period will be for the next scan window. If local/(local+remote) ratio is
3222 work->next = work;
3257 * the next time around.
3402 /* VMA scan is complete, do not scan until next sequence. */
3427 * would find the !migratable VMA on the next scan but not reset the
3465 p->numa_work.next = &p->numa_work;
3506 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
4184 struct cfs_rq *prev, struct cfs_rq *next)
4203 n_last_update_time = cfs_rq_last_update_time(next);
4183 set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) argument
11554 unsigned long interval, next; local
[all...]
/linux-master/drivers/i2c/
H A Di2c-core-base.c1296 struct i2c_client *client, *next; local
1316 list_for_each_entry_safe(client, next, &adap->userspace_clients,
1723 struct i2c_client *client, *next; local
1744 list_for_each_entry_safe(client, next, &adap->userspace_clients,
/linux-master/drivers/dma/xilinx/
H A Dxilinx_dpdma.c150 * @next_desc: next descriptor 32 bit address
501 * from @dma_addr. If a previous descriptor is specified in @prev, its next
657 struct xilinx_dpdma_sw_desc *sw_desc, *next; local
665 list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
820 * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
823 * Queue the next descriptor, if any, to the hardware. If the channel is
824 * stopped, start it first. Otherwise retrigger it with the next descriptor.
1087 /* If the retrigger raced with vsync, retry at the next frame. */
1099 * descriptor to active, and queue the next transfer, if any.
/linux-master/drivers/dma/
H A Dpl330.c2086 /* Try to submit a req imm. next to the last completed cookie */
2518 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2581 desc = list_entry(pool->next,
2723 desc = list_entry(first->node.next,
2836 desc = list_entry(first->node.next,
H A Dowl-dma.c130 * @OWL_DMADESC_NEXT_LLI: physical address of the next link list
366 struct owl_dma_lli *next,
370 list_add_tail(&next->node, &txd->lli_list);
373 prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys;
378 return next;
661 * Start the next descriptor (if any),
792 /* Start from the next active node */
1053 struct owl_dma_vchan *next; local
1056 next, &od->dma.channels, vc.chan.device_node) {
364 owl_dma_add_lli(struct owl_dma_txd *txd, struct owl_dma_lli *prev, struct owl_dma_lli *next, bool is_cyclic) argument
/linux-master/arch/x86/kernel/
H A Dsev-shared.c1039 hdr = (struct setup_data *)hdr->next;
H A Dprocess_64.c326 * The next task is using 64-bit TLS, is not using this
343 * next states are fully zeroed, we can skip
361 * The next task is using a real segment. Loading the selector
369 * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
375 struct thread_struct *next)
387 if (prev->pkru != next->pkru)
388 wrpkru(next->pkru);
392 struct thread_struct *next)
396 if (unlikely(prev->fsindex || next->fsindex))
397 loadseg(FS, next
374 x86_pkru_load(struct thread_struct *prev, struct thread_struct *next) argument
391 x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) argument
613 struct thread_struct *next = &next_p->thread; local
[all...]
/linux-master/arch/riscv/mm/
H A Dinit.c212 * any allocation to happen between _end and the next pmd aligned page.
1402 unsigned long addr, unsigned long next)
1408 unsigned long addr, unsigned long next)
1410 vmemmap_verify((pte_t *)pmdp, node, addr, next);
1401 vmemmap_set_pmd(pmd_t *pmd, void *p, int node, unsigned long addr, unsigned long next) argument
1407 vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) argument
/linux-master/tools/testing/selftests/
H A Dkselftest_harness.h778 item->next = NULL; \
783 item->next = NULL; \
785 item->prev->next = item; \
788 item->next = head; \
789 item->next->prev = item; \
807 struct __fixture_metadata *prev, *next; member in struct:__fixture_metadata
817 struct __test_xfail *prev, *next; member in struct:__test_xfail
859 struct __fixture_variant_metadata *prev, *next; member in struct:__fixture_variant_metadata
886 struct __test_metadata *prev, *next; member in struct:__test_metadata
1041 for (f = __fixture_list; f; f = f->next) {
[all...]

Completed in 370 milliseconds

1234567891011>>