/linux-master/fs/bcachefs/ |
H A D | util.c | 491 * bch2_ratelimit_delay() - return how long to delay until the next time to do 500 return time_after64(d->next, now) 501 ? nsecs_to_jiffies(d->next - now) 514 d->next += div_u64(done * NSEC_PER_SEC, d->rate); 516 if (time_before64(now + NSEC_PER_SEC, d->next)) 517 d->next = now + NSEC_PER_SEC; 519 if (time_after64(now - NSEC_PER_SEC * 2, d->next)) 520 d->next = now - NSEC_PER_SEC * 2; 562 pd->rate.next + NSEC_PER_MSEC)) 622 prt_printf(out, "next i [all...] |
H A D | extents.h | 99 union bch_extent_entry *next = extent_entry_next(entry); local 104 memmove_u64s_down(entry, next, 105 (u64 *) bkey_val_end(k) - (u64 *) next); 106 k.k->u64s -= (u64 *) next - (u64 *) entry;
|
H A D | alloc_background.c | 930 struct bpos next; local 945 next = iter2.pos; 948 BUG_ON(next.offset >= iter->pos.offset + U32_MAX); 956 bch2_key_resize(hole, next.offset - iter->pos.offset); 1423 struct bpos next; local 1436 next = bpos_nosnap_successor(k.k->p); 1446 next = k.k->p; 1450 &next, 1454 &next, 1465 bch2_btree_iter_set_pos(&iter, next); [all...] |
H A D | alloc_foreground.c | 430 goto next; 435 next:
|
H A D | btree_update.c | 312 goto next; 324 next: 618 h->next = trans->hooks;
|
H A D | btree_iter.c | 1173 * btree_path_lock_root() comes next and that it can't fail 1840 * the next child node 2116 /* Advance to next leaf node: */ 2506 struct bpos next; local 2539 next = k.k ? bkey_start_pos(k.k) : POS_MAX; 2541 if (bkey_lt(iter->pos, next)) { 2548 (next.inode == iter->pos.inode 2549 ? next.offset
|
H A D | bset.c | 247 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s); local 270 BUG_ON(next != btree_bkey_last(b, t) && 271 bkey_iter_cmp(b, insert, next) > 0); 273 if (next != btree_bkey_last(b, t) && 274 bkey_iter_cmp(b, insert, next) > 0) { 276 struct bkey k2 = bkey_unpack_key(b, next); 282 panic("insert > next:\n" 284 "next key %s\n", 790 /* round up to next cacheline: */
|
/linux-master/fs/ |
H A D | aio.c | 1199 struct aio_waiter *curr, *next; local 1203 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
|
/linux-master/drivers/of/ |
H A D | dynamic.c | 311 struct property *prop, *next; local 313 for (prop = prop_list; prop != NULL; prop = next) { 314 next = prop->next; 352 pr_err("ERROR: next of_node_put() on this node will result in a kobject warning 'refcount_t: underflow; use-after-free.'\n");
|
/linux-master/drivers/nvme/target/ |
H A D | fc.c | 767 /* Re-use the fod for the next pending cmd that was deferred */ 1554 struct nvmet_fc_tgt_assoc *assoc, *next; local 1559 list_for_each_entry_safe(assoc, next, 1584 struct nvmet_fc_tgtport *tgtport, *next; local 1592 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 2266 * for next sequence: 2388 /* transfer the next chunk */ 2419 /* transfer the next chunk */
|
/linux-master/drivers/net/ |
H A D | xen-netfront.c | 1057 goto next; 1067 goto next; 1099 next:
|
/linux-master/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_txrx.h | 327 struct i40e_ring *next; /* pointer to next ring in q_vector */ member in struct:i40e_ring 344 * and to resume packet building for this ring in the next call to 437 unsigned long next_update; /* jiffies value of next update */ 447 for (pos = (head).ring; pos != NULL; pos = pos->next)
|
H A D | i40e_txrx.c | 29 /* grab the next descriptor */ 113 /* grab the next descriptor */ 159 * @l4proto: next level protocol used in data portion of l3 223 * @l4proto: next level protocol used in data portion of l3 244 * @l4proto: next level protocol used in data portion of l3 270 * @l4proto: next level protocol used in data portion of l3 1002 /* move us one more past the eop_desc for start of next pkt */ 1342 /* next update should occur within next jiffy */ 1369 /* update, and store next t 2082 u32 next = rx_ring->next_to_clean, i = 0; local [all...] |
/linux-master/drivers/mtd/devices/ |
H A D | block2mtd.c | 509 struct list_head *pos, *next; local 512 list_for_each_safe(pos, next, &blkmtd_device_list) {
|
/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_vm_types.h | 265 * @tlb_flush_seqno: Required TLB flush seqno for the next exec. 291 /** @next: VMA subsequent part of a split mapping */ 292 struct xe_vma *next; member in struct:xe_vma_op_remap 299 /** @skip_next: skip next rebind */
|
H A D | xe_pt.c | 390 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level, argument 399 if (!xe_pt_covers(addr, next, level, &xe_walk->base)) 403 if (next - xe_walk->va_curs_start > xe_walk->curs->size) 411 size = next - addr; 422 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) argument 429 if (next > xe_walk->l0_end_addr) 437 for (; addr < next; addr += SZ_64K) { 444 return addr == next; 456 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) argument 470 unsigned int level, u64 addr, u64 next, 469 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, unsigned int level, u64 addr, u64 next, struct xe_ptw **child, enum page_walk_action *action, struct xe_pt_walk *walk) argument 722 xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset, unsigned int level, u64 addr, u64 next, struct xe_ptw **child, enum page_walk_action *action, struct xe_pt_walk *walk) argument 1371 xe_pt_check_kill(u64 addr, u64 next, unsigned int level, const struct xe_pt *child, enum page_walk_action *action, struct xe_pt_walk *walk) argument 1398 xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset, unsigned int level, u64 addr, u64 next, struct xe_ptw **child, enum page_walk_action *action, struct xe_pt_walk *walk) argument 1415 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, unsigned int level, u64 addr, u64 next, struct xe_ptw **child, enum page_walk_action *action, struct xe_pt_walk *walk) argument [all...] |
H A D | xe_gt_tlb_invalidation.c | 22 struct xe_gt_tlb_invalidation_fence *fence, *next; local 25 list_for_each_entry_safe(fence, next, 93 struct xe_gt_tlb_invalidation_fence *fence, *next; local 119 list_for_each_entry_safe(fence, next, 364 struct xe_gt_tlb_invalidation_fence *fence, *next; local 398 list_for_each_entry_safe(fence, next,
|
/linux-master/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
H A D | gf100.c | 1085 u32 next = init->addr + init->count * init->pitch; local 1087 while (addr < next) { 1105 u32 next = init->addr + init->count * init->pitch; local 1115 while (addr < next) { 1144 u32 next = init->addr + init->count * init->pitch; local 1152 while (addr < next) {
|
/linux-master/drivers/gpu/drm/nouveau/ |
H A D | nouveau_uvmm.c | 56 struct nouveau_uvma *next; member in struct:nouveau_uvma_prealloc 474 if (r->next) 475 op_map_prepare_unwind(new->next); 517 if (r->next) 520 if (r->prev && r->next) 668 if (r->next) { 669 ret = op_map_prepare(uvmm, &new->next, r->next, 681 if (args && (r->prev && r->next)) 766 * remap.next, bu 969 struct bind_job_op *op, *next; local 1580 struct bind_job_op *op, *next; local 1895 struct drm_gpuva *va, *next; local [all...] |
/linux-master/drivers/gpio/ |
H A D | gpiolib.c | 388 struct gpio_device *prev, *next; local 398 next = list_first_entry(&gpio_devices, struct gpio_device, list); 399 if (gdev->base + gdev->ngpio <= next->base) { 412 list_for_each_entry_safe(prev, next, &gpio_devices, list) { 414 if (&next->list == &gpio_devices) 417 /* add between prev and next */ 419 && gdev->base + gdev->ngpio <= next->base) { 4835 struct gpio_device *gdev = v, *next; local 4837 next = list_entry_rcu(gdev->list.next, struc [all...] |
/linux-master/drivers/block/null_blk/ |
H A D | main.c | 2090 nullb = list_entry(nullb_list.next, struct nullb, list); 2109 nullb = list_entry(nullb_list.next, struct nullb, list);
|
/linux-master/arch/x86/mm/pat/ |
H A D | memtype.c | 1125 * between seq_start()/next() and seq_show(): 1185 .next = memtype_seq_next,
|
/linux-master/security/selinux/ |
H A D | selinuxfs.c | 58 SEL_DISABLE, /* disable SELinux until next reboot */ 67 SEL_INO_NEXT, /* The next inode number to use */ 1562 .next = sel_avc_stats_seq_next,
|
/linux-master/security/ |
H A D | security.c | 277 char *sep, *name, *next; local 307 next = sep; 309 while ((name = strsep(&next, ",")) != NULL) { 4158 * the next call which actually allocates and returns the @secdata.
|
/linux-master/arch/x86/kernel/ |
H A D | kvm.c | 179 struct hlist_node *p, *next; local 182 hlist_for_each_safe(p, next, &b->list) {
|