Searched refs:prev (Results 26 - 50 of 1036) sorted by last modified time

1234567891011>>

/linux-master/kernel/trace/
H A Dftrace.c7467 struct task_struct *prev,
7466 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, struct task_struct *prev, struct task_struct *next, unsigned int prev_state) argument
H A Dfgraph.c441 struct task_struct *prev,
457 prev->ftrace_timestamp = timestamp;
440 ftrace_graph_probe_sched_switch(void *ignore, bool preempt, struct task_struct *prev, struct task_struct *next, unsigned int prev_state) argument
/linux-master/kernel/
H A Dseccomp.c200 * and once for the dependent filter (tracked in filter->prev).
210 * @prev: points to a previously installed, or inherited, filter
216 * seccomp_filter objects are organized in a tree linked via the @prev
219 * However, multiple filters may share a @prev node, by way of fork(), which
232 struct seccomp_filter *prev; member in struct:seccomp_filter
425 for (; f; f = f->prev) {
475 for (; child; child = child->prev)
536 orig = orig->prev;
545 orig = orig->prev;
848 sfilter->prev
[all...]
H A Dkprobes.c2066 struct kprobe *prev = kprobe_running(); local
2071 __this_cpu_write(current_kprobe, prev);
/linux-master/kernel/sched/
H A Drt.c323 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) argument
326 return rq->online && rq->rt.highest_prio.curr > prev->prio;
1703 * If prev task was rt, put_prev_task() has already updated the
2613 if (rt_se->run_list.prev != rt_se->run_list.next) {
H A Dfair.c386 * to the prev element but it will point to rq->leaf_cfs_rq_list
390 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
4055 struct list_head *prev;
4058 prev = cfs_rq->leaf_cfs_rq_list.prev;
4062 prev = rq->tmp_alone_branch;
4065 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
4178 struct cfs_rq *prev, struct cfs_rq *next)
4193 if (!(se->avg.last_update_time && prev))
4196 p_last_update_time = cfs_rq_last_update_time(prev);
4054 struct list_head *prev; local
4176 set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) argument
5486 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) argument
7540 select_idle_sibling(struct task_struct *p, int prev, int target) argument
8323 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
8460 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
8612 put_prev_task_fair(struct rq *rq, struct task_struct *prev) argument
[all...]
H A Ddeadline.c635 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) argument
637 return rq->online && dl_task(prev);
1190 * prev->on_rq = 0;
H A Dcore.c4331 * this task as prev, considering queueing p on the remote CPUs wake_list
4354 * this task as prev, wait until it's done referencing the task.
5018 static inline void finish_task(struct task_struct *prev) argument
5022 * This must be the very last reference to @prev from this CPU. After
5027 * In particular, the load of prev->state in finish_task_switch() must
5032 smp_store_release(&prev->on_cpu, 0);
5158 * prev into current:
5196 * @prev: the current task that is being switched out
5207 prepare_task_switch(struct rq *rq, struct task_struct *prev, argument
5210 kcov_prepare_switch(prev);
5276 vtime_task_switch(prev); variable
5278 finish_task(prev); variable
5315 put_task_stack(prev); variable
5317 put_task_struct_rcu_user(prev); variable
5339 finish_task_switch(prev); variable
5352 context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next, struct rq_flags *rf) argument
5930 __schedule_bug(struct task_struct *prev) argument
5958 schedule_debug(struct task_struct *prev, bool preempt) argument
5989 put_prev_task_balance(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6015 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6107 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6551 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) argument
6617 struct task_struct *prev, *next; local
[all...]
H A Dautogroup.c159 struct autogroup *prev; local
166 prev = p->signal->autogroup;
167 if (prev == ag) {
175 * this process can already run with task_group() == prev->tg or we can
176 * race with cgroup code which can read autogroup = prev under rq->lock.
188 autogroup_kref_put(prev);
/linux-master/kernel/module/
H A Dmain.c1034 char *prev)
1047 if (prev) {
1048 size -= prev - modinfo;
1049 modinfo = module_next_tag_pair(prev, &size);
1033 get_next_modinfo(const struct load_info *info, const char *tag, char *prev) argument
/linux-master/kernel/events/
H A Duprobes.c964 struct map_info *prev = NULL; local
974 if (!prev && !more) {
979 prev = kmalloc(sizeof(struct map_info),
981 if (prev)
982 prev->next = NULL;
984 if (!prev) {
992 info = prev;
993 prev = prev->next;
1005 prev
[all...]
H A Dcore.c3641 static void perf_pmu_sched_task(struct task_struct *prev, argument
3649 if (prev == next || cpuctx->task_ctx)
4012 void __perf_event_task_sched_in(struct task_struct *prev, argument
4018 perf_event_switch(task, prev, true);
4021 perf_pmu_sched_task(prev, task, true);
9053 /* Only CPU-wide events are allowed to see next/prev pid/tid */
11153 s64 prev; local
11157 prev = local64_xchg(&event->hw.prev_count, now);
11158 local64_add(now - prev, &event->count);
11231 u64 prev; local
[all...]
/linux-master/include/linux/
H A Dskbuff.h341 struct sk_buff *prev;
752 * @prev: Previous buffer in list
756 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
864 struct sk_buff *prev; member in struct:sk_buff::__anon432::__anon433
1867 return skb->prev == (const struct sk_buff *) list;
1889 * skb_queue_prev - return the prev packet in the queue
1893 * Return the prev packet in @list before @skb. It is only valid to
1903 return skb->prev;
2149 struct sk_buff *skb = READ_ONCE(list_->prev);
2192 list->prev
2223 __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) argument
2237 __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *next) argument
2325 __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk) argument
2383 struct sk_buff *next, *prev; local
[all...]
H A Dpgtable.h1386 #define arch_start_context_switch(prev) do {} while (0)
H A Dmm.h225 /* to align the pointer to the (prev) page boundary */
233 return list_entry((head)->prev, struct folio, lru);
3286 struct vm_area_struct *prev,
3331 struct vm_area_struct *prev,
3342 struct vm_area_struct *prev,
3347 return vma_modify(vmi, prev, vma, start, end, new_flags,
3355 struct vm_area_struct *prev,
3362 return vma_modify(vmi, prev, vma, start, end, new_flags,
3369 struct vm_area_struct *prev,
3374 return vma_modify(vmi, prev, vm
3341 vma_modify_flags(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags) argument
3354 vma_modify_flags_name(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags, struct anon_vma_name *new_name) argument
3368 vma_modify_policy(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct mempolicy *new_pol) argument
3380 vma_modify_flags_uffd(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags, struct vm_userfaultfd_ctx new_ctx) argument
[all...]
H A Dmemcontrol.h1360 struct mem_cgroup *prev,
1367 struct mem_cgroup *prev)
1359 mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) argument
1366 mem_cgroup_iter_break(struct mem_cgroup *root, struct mem_cgroup *prev) argument
H A Dhuge_mm.h146 static inline int next_order(unsigned long *orders, int prev) argument
148 *orders &= ~BIT(prev);
340 struct vm_area_struct **prev,
485 struct vm_area_struct **prev,
484 madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
H A Ddamon.h640 return container_of(r->list.prev, struct damon_region, list);
700 struct damon_region *prev, struct damon_region *next,
703 __list_add(&r->list, &prev->list, &next->list);
699 damon_insert_region(struct damon_region *r, struct damon_region *prev, struct damon_region *next, struct damon_target *t) argument
H A Ddma-fence-chain.h20 * @prev: previous fence of the chain
27 struct dma_fence __rcu *prev; member in struct:dma_fence_chain
123 struct dma_fence *prev,
/linux-master/include/linux/sched/
H A Dmm.h564 static inline void membarrier_arch_switch_mm(struct mm_struct *prev, argument
/linux-master/fs/
H A Duserfaultfd.c535 * list_del_init() to refile across the two lists, the prev
873 struct vm_area_struct *vma, *prev; local
893 prev = NULL;
899 prev = vma;
907 vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
915 prev = vma;
1296 struct vm_area_struct *vma, *prev, *cur; local
1432 prev = vma_prev(&vmi);
1434 prev = vma;
1458 vma = vma_modify_flags_uffd(&vmi, prev, vm
1519 struct vm_area_struct *vma, *prev, *cur; local
[all...]
/linux-master/fs/jbd2/
H A Dcommit.c810 struct buffer_head *bh = list_entry(io_bufs.prev,
856 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
/linux-master/fs/ext4/
H A Dpage-io.c181 "list->prev 0x%p\n",
182 io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
209 before = cur->prev;
214 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
H A Dnamei.c1949 struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; local
1952 prev = to = de;
1960 prev = to;
1965 return prev;
H A Dextents.c527 ext4_lblk_t prev = 0; local
535 if (prev && (prev != lblk))
536 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
543 prev = lblk + len;

Completed in 437 milliseconds

1234567891011>>