Searched refs:cur (Results 1 - 25 of 771) sorted by last modified time

1234567891011>>

/linux-master/net/mptcp/
H A Dprotocol.c2341 struct mptcp_data_frag *cur, *rtx_head; local
2367 list_for_each_entry(cur, &msk->rtx_queue, list) {
2368 if (!cur->already_sent)
2370 cur->already_sent = 0;
/linux-master/lib/
H A Dscatterlist.c387 struct scatterlist *cur,
394 if (cur) {
395 next_sg = sg_next(cur);
406 if (cur) {
386 get_next_sg(struct sg_append_table *table, struct scatterlist *cur, unsigned long needed_sges, gfp_t gfp_mask) argument
/linux-master/kernel/bpf/
H A Dverifier.c584 struct bpf_verifier_state *cur = env->cur_state; local
586 return cur->frame[reg->frameno];
1465 struct bpf_verifier_state *cur = env->cur_state; local
1466 struct bpf_func_state *state = cur->frame[cur->curframe];
1522 * cur .-> succ | .------...
1525 * succ '-- cur | ... ...
1528 * | succ <- cur
1535 * (A) successor state of cur (B) successor state of cur o
1624 update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr) argument
1670 struct bpf_verifier_state *cur = env->cur_state; local
1700 struct bpf_verifier_state *cur = env->cur_state; local
3324 push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, int insn_flags) argument
4485 struct bpf_func_state *cur; /* state of the current function */ local
4627 struct bpf_func_state *cur; /* state of the current function */ local
7483 struct bpf_verifier_state *cur = env->cur_state; local
7841 find_prev_entry(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, int insn_idx) argument
7881 widen_imprecise_scalars(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) argument
16346 range_within(const struct bpf_reg_state *old, const struct bpf_reg_state *cur) argument
16484 clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) argument
16679 stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur, struct bpf_idmap *idmap, enum exact_level exact) argument
16803 refsafe(struct bpf_func_state *old, struct bpf_func_state *cur, struct bpf_idmap *idmap) argument
16845 func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur, enum exact_level exact) argument
16873 states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur, enum exact_level exact) argument
17058 states_maybe_looping(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) argument
17139 iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) argument
17167 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; local
[all...]
/linux-master/fs/btrfs/
H A Dvolumes.c3016 u32 cur; local
3023 cur = 0;
3025 while (cur < array_size) {
3041 memmove(ptr, ptr + len, array_size - (cur + len));
3046 cur += len;
H A Dqgroup.c1486 struct btrfs_qgroup *cur; local
1496 list_for_each_entry(cur, &qgroup_list, iterator) {
4590 struct rb_node **cur; local
4640 cur = &blocks->blocks[level].rb_node;
4641 while (*cur) {
4644 parent = *cur;
4649 cur = &(*cur)->rb_left;
4651 cur = &(*cur)
[all...]
H A Dordered-data.c410 u64 cur = file_offset; local
417 while (cur < file_offset + num_bytes) {
422 node = ordered_tree_search(inode, cur);
431 * cur
434 if (cur >= entry_end) {
443 cur = entry->file_offset;
448 * cur
451 if (cur < entry->file_offset) {
452 cur = entry->file_offset;
461 * cur
551 struct list_head *cur; local
1001 struct rb_node *cur; local
[all...]
H A Dscrub.c1747 for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1748 struct page *page = scrub_stripe_get_page(stripe, cur);
1749 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
H A Dinode.c8019 u64 cur; local
8058 cur = page_start;
8059 while (cur < page_end) {
8065 ordered = btrfs_lookup_first_ordered_range(inode, cur,
8066 page_end + 1 - cur);
8076 if (ordered->file_offset > cur) {
8078 * There is a range between [cur, oe->file_offset) not
8090 ASSERT(range_end + 1 - cur < U32_MAX);
8091 range_len = range_end + 1 - cur;
8092 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_le
10108 u64 cur; local
[all...]
H A Dextent_map.c487 struct extent_map *cur,
493 WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
494 ASSERT(extent_map_in_tree(cur));
495 if (!(cur->flags & EXTENT_FLAG_LOGGING))
496 list_del_init(&cur->list);
497 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
498 RB_CLEAR_NODE(&cur->rb_node);
486 replace_extent_mapping(struct extent_map_tree *tree, struct extent_map *cur, struct extent_map *new, int modified) argument
H A Dbackref.c2591 u32 cur; local
2626 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2631 cur, found_key.objectid,
3228 struct btrfs_backref_node *cur)
3240 cur->is_reloc_root = 1;
3243 root = find_reloc_root(cache->fs_info, cur->bytenr);
3246 cur->root = root;
3252 list_add(&cur
3226 handle_direct_tree_backref(struct btrfs_backref_cache *cache, struct btrfs_key *ref_key, struct btrfs_backref_node *cur) argument
3299 handle_indirect_tree_backref(struct btrfs_trans_handle *trans, struct btrfs_backref_cache *cache, struct btrfs_path *path, struct btrfs_key *ref_key, struct btrfs_key *tree_key, struct btrfs_backref_node *cur) argument
3467 btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans, struct btrfs_backref_cache *cache, struct btrfs_path *path, struct btrfs_backref_iter *iter, struct btrfs_key *node_key, struct btrfs_backref_node *cur) argument
[all...]
/linux-master/drivers/nvme/target/
H A Dcore.c125 struct nvmet_ns *cur; local
129 xa_for_each(&subsys->namespaces, idx, cur)
130 nsid = cur->nsid;
/linux-master/drivers/nvme/host/
H A Dcore.c1397 struct nvme_ns_id_desc *cur, bool *csi_seen)
1400 void *data = cur;
1402 switch (cur->nidt) {
1404 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1406 warn_str, cur->nidl);
1411 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1414 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1416 warn_str, cur->nidl);
1421 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1424 if (cur
1396 nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, struct nvme_ns_id_desc *cur, bool *csi_seen) argument
1479 struct nvme_ns_id_desc *cur = data + pos; local
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c452 ktime_t cur; local
457 cur = ktime_get();
458 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
459 if (!ktime_before(cur, *end))
3332 struct xe_res_cursor cur; local
3335 xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3336 addr = xe_res_dma(&cur);
/linux-master/kernel/
H A Dworkqueue.c7782 int cur, pre, cpu, pod; local
7790 for_each_possible_cpu(cur) {
7792 if (pre >= cur) {
7793 pt->cpu_pod[cur] = pt->nr_pods++;
7796 if (cpus_share_pod(cur, pre)) {
7797 pt->cpu_pod[cur] = pt->cpu_pod[pre];
H A Dcpu.c2886 ssize_t cur, res = 0; local
2894 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2895 buf += cur;
2896 res += cur;
/linux-master/fs/nfsd/
H A Dnfs4xdr.c2680 struct path cur = *path; local
2688 path_get(&cur);
2693 if (path_equal(&cur, root))
2695 if (cur.dentry == cur.mnt->mnt_root) {
2696 if (follow_up(&cur))
2709 components[ncomponents++] = cur.dentry;
2710 cur.dentry = dget_parent(cur.dentry);
2742 path_put(&cur);
[all...]
/linux-master/kernel/sched/
H A Dfair.c2175 struct task_struct *cur; local
2186 cur = rcu_dereference(dst_rq->curr);
2187 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
2188 cur = NULL;
2194 if (cur == env->p) {
2199 if (!cur) {
2207 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
2216 cur
[all...]
H A Dsched.h315 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
/linux-master/net/mac80211/
H A Dtx.c3416 int max_amsdu_len = sta->sta.cur->max_amsdu_len;
3445 if (sta->sta.cur->max_rc_amsdu_len)
3447 sta->sta.cur->max_rc_amsdu_len);
3449 if (sta->sta.cur->max_tid_amsdu_len[tid])
3451 sta->sta.cur->max_tid_amsdu_len[tid]);
/linux-master/include/net/
H A Dmac80211.h2424 * @cur: currently valid data as aggregated from the active links
2459 struct ieee80211_sta_aggregates *cur; member in struct:ieee80211_sta
/linux-master/drivers/video/fbdev/core/
H A Dfb_defio.c46 struct fb_deferred_io_pageref *pageref, *cur; local
74 list_for_each_entry(cur, &fbdefio->pagereflist, list) {
75 if (cur->offset > pageref->offset)
78 pos = &cur->list;
247 struct page *cur = pageref->page; local
248 lock_page(cur);
249 page_mkclean(cur);
250 unlock_page(cur);
/linux-master/drivers/net/ethernet/intel/igc/
H A Digc_main.c3816 struct igc_nfc_rule *pred, *cur; local
3824 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3825 if (cur->location >= rule->location)
3827 pred = cur;
/linux-master/drivers/net/ethernet/broadcom/
H A Db44.c593 u32 cur, cons; local
596 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597 cur /= sizeof(struct dma_desc);
600 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
/linux-master/arch/arc/kernel/
H A Dkprobes.c248 struct kprobe *cur = kprobe_running(); local
251 if (!cur)
254 resume_execution(cur, addr, regs);
257 arch_arm_kprobe(cur);
266 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
268 cur->post_handler(cur, regs, 0);
292 struct kprobe *cur = kprobe_running(); local
304 resume_execution(cur, (unsigned long)cur
[all...]
/linux-master/virt/kvm/
H A Dkvm_main.c3843 ktime_t start, cur, poll_end; local
3853 start = cur = poll_end = ktime_get();
3861 poll_end = cur = ktime_get();
3862 } while (kvm_vcpu_can_poll(cur, stop));
3867 cur = ktime_get();
3870 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3872 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3876 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);

Completed in 614 milliseconds

1234567891011>>