1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * taskstats.c - Export per-task statistics to userland
4 *
5 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
6 *           (C) Balbir Singh,   IBM Corp. 2006
7 */
8
9#include <linux/kernel.h>
10#include <linux/taskstats_kern.h>
11#include <linux/tsacct_kern.h>
12#include <linux/acct.h>
13#include <linux/delayacct.h>
14#include <linux/cpumask.h>
15#include <linux/percpu.h>
16#include <linux/slab.h>
17#include <linux/cgroupstats.h>
18#include <linux/cgroup.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/pid_namespace.h>
22#include <net/genetlink.h>
23#include <linux/atomic.h>
24#include <linux/sched/cputime.h>
25
26/*
27 * Maximum length of a cpumask that can be specified in
28 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
29 */
30#define TASKSTATS_CPUMASK_MAXLEN	(100+6*NR_CPUS)
31
32static DEFINE_PER_CPU(__u32, taskstats_seqnum);
33static int family_registered;
34struct kmem_cache *taskstats_cache;
35
36static struct genl_family family;
37
38static const struct nla_policy taskstats_cmd_get_policy[] = {
39	[TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 },
40	[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
41	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
42	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
43
44static const struct nla_policy cgroupstats_cmd_get_policy[] = {
45	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
46};
47
48struct listener {
49	struct list_head list;
50	pid_t pid;
51	char valid;
52};
53
54struct listener_list {
55	struct rw_semaphore sem;
56	struct list_head list;
57};
58static DEFINE_PER_CPU(struct listener_list, listener_array);
59
60enum actions {
61	REGISTER,
62	DEREGISTER,
63	CPU_DONT_CARE
64};
65
66static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
67				size_t size)
68{
69	struct sk_buff *skb;
70	void *reply;
71
72	/*
73	 * If new attributes are added, please revisit this allocation
74	 */
75	skb = genlmsg_new(size, GFP_KERNEL);
76	if (!skb)
77		return -ENOMEM;
78
79	if (!info) {
80		int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
81
82		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
83	} else
84		reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
85	if (reply == NULL) {
86		nlmsg_free(skb);
87		return -EINVAL;
88	}
89
90	*skbp = skb;
91	return 0;
92}
93
94/*
95 * Send taskstats data in @skb to listener with nl_pid @pid
96 */
97static int send_reply(struct sk_buff *skb, struct genl_info *info)
98{
99	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
100	void *reply = genlmsg_data(genlhdr);
101
102	genlmsg_end(skb, reply);
103
104	return genlmsg_reply(skb, info);
105}
106
107/*
108 * Send taskstats data in @skb to listeners registered for @cpu's exit data
109 */
110static void send_cpu_listeners(struct sk_buff *skb,
111					struct listener_list *listeners)
112{
113	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
114	struct listener *s, *tmp;
115	struct sk_buff *skb_next, *skb_cur = skb;
116	void *reply = genlmsg_data(genlhdr);
117	int delcount = 0;
118
119	genlmsg_end(skb, reply);
120
121	down_read(&listeners->sem);
122	list_for_each_entry(s, &listeners->list, list) {
123		int rc;
124
125		skb_next = NULL;
126		if (!list_is_last(&s->list, &listeners->list)) {
127			skb_next = skb_clone(skb_cur, GFP_KERNEL);
128			if (!skb_next)
129				break;
130		}
131		rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
132		if (rc == -ECONNREFUSED) {
133			s->valid = 0;
134			delcount++;
135		}
136		skb_cur = skb_next;
137	}
138	up_read(&listeners->sem);
139
140	if (skb_cur)
141		nlmsg_free(skb_cur);
142
143	if (!delcount)
144		return;
145
146	/* Delete invalidated entries */
147	down_write(&listeners->sem);
148	list_for_each_entry_safe(s, tmp, &listeners->list, list) {
149		if (!s->valid) {
150			list_del(&s->list);
151			kfree(s);
152		}
153	}
154	up_write(&listeners->sem);
155}
156
157static void exe_add_tsk(struct taskstats *stats, struct task_struct *tsk)
158{
159	/* No idea if I'm allowed to access that here, now. */
160	struct file *exe_file = get_task_exe_file(tsk);
161
162	if (exe_file) {
163		/* Following cp_new_stat64() in stat.c . */
164		stats->ac_exe_dev =
165			huge_encode_dev(exe_file->f_inode->i_sb->s_dev);
166		stats->ac_exe_inode = exe_file->f_inode->i_ino;
167		fput(exe_file);
168	} else {
169		stats->ac_exe_dev = 0;
170		stats->ac_exe_inode = 0;
171	}
172}
173
174static void fill_stats(struct user_namespace *user_ns,
175		       struct pid_namespace *pid_ns,
176		       struct task_struct *tsk, struct taskstats *stats)
177{
178	memset(stats, 0, sizeof(*stats));
179	/*
180	 * Each accounting subsystem adds calls to its functions to
181	 * fill in relevant parts of struct taskstsats as follows
182	 *
183	 *	per-task-foo(stats, tsk);
184	 */
185
186	delayacct_add_tsk(stats, tsk);
187
188	/* fill in basic acct fields */
189	stats->version = TASKSTATS_VERSION;
190	stats->nvcsw = tsk->nvcsw;
191	stats->nivcsw = tsk->nivcsw;
192	bacct_add_tsk(user_ns, pid_ns, stats, tsk);
193
194	/* fill in extended acct fields */
195	xacct_add_tsk(stats, tsk);
196
197	/* add executable info */
198	exe_add_tsk(stats, tsk);
199}
200
201static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
202{
203	struct task_struct *tsk;
204
205	tsk = find_get_task_by_vpid(pid);
206	if (!tsk)
207		return -ESRCH;
208	fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
209	put_task_struct(tsk);
210	return 0;
211}
212
213static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
214{
215	struct task_struct *tsk, *first;
216	unsigned long flags;
217	int rc = -ESRCH;
218	u64 delta, utime, stime;
219	u64 start_time;
220
221	/*
222	 * Add additional stats from live tasks except zombie thread group
223	 * leaders who are already counted with the dead tasks
224	 */
225	rcu_read_lock();
226	first = find_task_by_vpid(tgid);
227
228	if (!first || !lock_task_sighand(first, &flags))
229		goto out;
230
231	if (first->signal->stats)
232		memcpy(stats, first->signal->stats, sizeof(*stats));
233	else
234		memset(stats, 0, sizeof(*stats));
235
236	start_time = ktime_get_ns();
237	for_each_thread(first, tsk) {
238		if (tsk->exit_state)
239			continue;
240		/*
241		 * Accounting subsystem can call its functions here to
242		 * fill in relevant parts of struct taskstsats as follows
243		 *
244		 *	per-task-foo(stats, tsk);
245		 */
246		delayacct_add_tsk(stats, tsk);
247
248		/* calculate task elapsed time in nsec */
249		delta = start_time - tsk->start_time;
250		/* Convert to micro seconds */
251		do_div(delta, NSEC_PER_USEC);
252		stats->ac_etime += delta;
253
254		task_cputime(tsk, &utime, &stime);
255		stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
256		stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
257
258		stats->nvcsw += tsk->nvcsw;
259		stats->nivcsw += tsk->nivcsw;
260	}
261
262	unlock_task_sighand(first, &flags);
263	rc = 0;
264out:
265	rcu_read_unlock();
266
267	stats->version = TASKSTATS_VERSION;
268	/*
269	 * Accounting subsystems can also add calls here to modify
270	 * fields of taskstats.
271	 */
272	return rc;
273}
274
275static void fill_tgid_exit(struct task_struct *tsk)
276{
277	unsigned long flags;
278
279	spin_lock_irqsave(&tsk->sighand->siglock, flags);
280	if (!tsk->signal->stats)
281		goto ret;
282
283	/*
284	 * Each accounting subsystem calls its functions here to
285	 * accumalate its per-task stats for tsk, into the per-tgid structure
286	 *
287	 *	per-task-foo(tsk->signal->stats, tsk);
288	 */
289	delayacct_add_tsk(tsk->signal->stats, tsk);
290ret:
291	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
292	return;
293}
294
295static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
296{
297	struct listener_list *listeners;
298	struct listener *s, *tmp, *s2;
299	unsigned int cpu;
300	int ret = 0;
301
302	if (!cpumask_subset(mask, cpu_possible_mask))
303		return -EINVAL;
304
305	if (current_user_ns() != &init_user_ns)
306		return -EINVAL;
307
308	if (task_active_pid_ns(current) != &init_pid_ns)
309		return -EINVAL;
310
311	if (isadd == REGISTER) {
312		for_each_cpu(cpu, mask) {
313			s = kmalloc_node(sizeof(struct listener),
314					GFP_KERNEL, cpu_to_node(cpu));
315			if (!s) {
316				ret = -ENOMEM;
317				goto cleanup;
318			}
319			s->pid = pid;
320			s->valid = 1;
321
322			listeners = &per_cpu(listener_array, cpu);
323			down_write(&listeners->sem);
324			list_for_each_entry(s2, &listeners->list, list) {
325				if (s2->pid == pid && s2->valid)
326					goto exists;
327			}
328			list_add(&s->list, &listeners->list);
329			s = NULL;
330exists:
331			up_write(&listeners->sem);
332			kfree(s); /* nop if NULL */
333		}
334		return 0;
335	}
336
337	/* Deregister or cleanup */
338cleanup:
339	for_each_cpu(cpu, mask) {
340		listeners = &per_cpu(listener_array, cpu);
341		down_write(&listeners->sem);
342		list_for_each_entry_safe(s, tmp, &listeners->list, list) {
343			if (s->pid == pid) {
344				list_del(&s->list);
345				kfree(s);
346				break;
347			}
348		}
349		up_write(&listeners->sem);
350	}
351	return ret;
352}
353
354static int parse(struct nlattr *na, struct cpumask *mask)
355{
356	char *data;
357	int len;
358	int ret;
359
360	if (na == NULL)
361		return 1;
362	len = nla_len(na);
363	if (len > TASKSTATS_CPUMASK_MAXLEN)
364		return -E2BIG;
365	if (len < 1)
366		return -EINVAL;
367	data = kmalloc(len, GFP_KERNEL);
368	if (!data)
369		return -ENOMEM;
370	nla_strscpy(data, na, len);
371	ret = cpulist_parse(data, mask);
372	kfree(data);
373	return ret;
374}
375
376static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
377{
378	struct nlattr *na, *ret;
379	int aggr;
380
381	aggr = (type == TASKSTATS_TYPE_PID)
382			? TASKSTATS_TYPE_AGGR_PID
383			: TASKSTATS_TYPE_AGGR_TGID;
384
385	na = nla_nest_start_noflag(skb, aggr);
386	if (!na)
387		goto err;
388
389	if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
390		nla_nest_cancel(skb, na);
391		goto err;
392	}
393	ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
394				sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
395	if (!ret) {
396		nla_nest_cancel(skb, na);
397		goto err;
398	}
399	nla_nest_end(skb, na);
400
401	return nla_data(ret);
402err:
403	return NULL;
404}
405
406static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
407{
408	int rc = 0;
409	struct sk_buff *rep_skb;
410	struct cgroupstats *stats;
411	struct nlattr *na;
412	size_t size;
413	u32 fd;
414	struct fd f;
415
416	na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
417	if (!na)
418		return -EINVAL;
419
420	fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
421	f = fdget(fd);
422	if (!f.file)
423		return 0;
424
425	size = nla_total_size(sizeof(struct cgroupstats));
426
427	rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
428				size);
429	if (rc < 0)
430		goto err;
431
432	na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
433				sizeof(struct cgroupstats));
434	if (na == NULL) {
435		nlmsg_free(rep_skb);
436		rc = -EMSGSIZE;
437		goto err;
438	}
439
440	stats = nla_data(na);
441	memset(stats, 0, sizeof(*stats));
442
443	rc = cgroupstats_build(stats, f.file->f_path.dentry);
444	if (rc < 0) {
445		nlmsg_free(rep_skb);
446		goto err;
447	}
448
449	rc = send_reply(rep_skb, info);
450
451err:
452	fdput(f);
453	return rc;
454}
455
456static int cmd_attr_register_cpumask(struct genl_info *info)
457{
458	cpumask_var_t mask;
459	int rc;
460
461	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
462		return -ENOMEM;
463	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
464	if (rc < 0)
465		goto out;
466	rc = add_del_listener(info->snd_portid, mask, REGISTER);
467out:
468	free_cpumask_var(mask);
469	return rc;
470}
471
472static int cmd_attr_deregister_cpumask(struct genl_info *info)
473{
474	cpumask_var_t mask;
475	int rc;
476
477	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
478		return -ENOMEM;
479	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
480	if (rc < 0)
481		goto out;
482	rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
483out:
484	free_cpumask_var(mask);
485	return rc;
486}
487
488static size_t taskstats_packet_size(void)
489{
490	size_t size;
491
492	size = nla_total_size(sizeof(u32)) +
493		nla_total_size_64bit(sizeof(struct taskstats)) +
494		nla_total_size(0);
495
496	return size;
497}
498
499static int cmd_attr_pid(struct genl_info *info)
500{
501	struct taskstats *stats;
502	struct sk_buff *rep_skb;
503	size_t size;
504	u32 pid;
505	int rc;
506
507	size = taskstats_packet_size();
508
509	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
510	if (rc < 0)
511		return rc;
512
513	rc = -EINVAL;
514	pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
515	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
516	if (!stats)
517		goto err;
518
519	rc = fill_stats_for_pid(pid, stats);
520	if (rc < 0)
521		goto err;
522	return send_reply(rep_skb, info);
523err:
524	nlmsg_free(rep_skb);
525	return rc;
526}
527
528static int cmd_attr_tgid(struct genl_info *info)
529{
530	struct taskstats *stats;
531	struct sk_buff *rep_skb;
532	size_t size;
533	u32 tgid;
534	int rc;
535
536	size = taskstats_packet_size();
537
538	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
539	if (rc < 0)
540		return rc;
541
542	rc = -EINVAL;
543	tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
544	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
545	if (!stats)
546		goto err;
547
548	rc = fill_stats_for_tgid(tgid, stats);
549	if (rc < 0)
550		goto err;
551	return send_reply(rep_skb, info);
552err:
553	nlmsg_free(rep_skb);
554	return rc;
555}
556
557static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
558{
559	if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
560		return cmd_attr_register_cpumask(info);
561	else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
562		return cmd_attr_deregister_cpumask(info);
563	else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
564		return cmd_attr_pid(info);
565	else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
566		return cmd_attr_tgid(info);
567	else
568		return -EINVAL;
569}
570
571static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
572{
573	struct signal_struct *sig = tsk->signal;
574	struct taskstats *stats_new, *stats;
575
576	/* Pairs with smp_store_release() below. */
577	stats = smp_load_acquire(&sig->stats);
578	if (stats || thread_group_empty(tsk))
579		return stats;
580
581	/* No problem if kmem_cache_zalloc() fails */
582	stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
583
584	spin_lock_irq(&tsk->sighand->siglock);
585	stats = sig->stats;
586	if (!stats) {
587		/*
588		 * Pairs with smp_store_release() above and order the
589		 * kmem_cache_zalloc().
590		 */
591		smp_store_release(&sig->stats, stats_new);
592		stats = stats_new;
593		stats_new = NULL;
594	}
595	spin_unlock_irq(&tsk->sighand->siglock);
596
597	if (stats_new)
598		kmem_cache_free(taskstats_cache, stats_new);
599
600	return stats;
601}
602
603/* Send pid data out on exit */
604void taskstats_exit(struct task_struct *tsk, int group_dead)
605{
606	int rc;
607	struct listener_list *listeners;
608	struct taskstats *stats;
609	struct sk_buff *rep_skb;
610	size_t size;
611	int is_thread_group;
612
613	if (!family_registered)
614		return;
615
616	/*
617	 * Size includes space for nested attributes
618	 */
619	size = taskstats_packet_size();
620
621	is_thread_group = !!taskstats_tgid_alloc(tsk);
622	if (is_thread_group) {
623		/* PID + STATS + TGID + STATS */
624		size = 2 * size;
625		/* fill the tsk->signal->stats structure */
626		fill_tgid_exit(tsk);
627	}
628
629	listeners = raw_cpu_ptr(&listener_array);
630	if (list_empty(&listeners->list))
631		return;
632
633	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
634	if (rc < 0)
635		return;
636
637	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
638			 task_pid_nr_ns(tsk, &init_pid_ns));
639	if (!stats)
640		goto err;
641
642	fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
643	if (group_dead)
644		stats->ac_flag |= AGROUP;
645
646	/*
647	 * Doesn't matter if tsk is the leader or the last group member leaving
648	 */
649	if (!is_thread_group || !group_dead)
650		goto send;
651
652	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
653			 task_tgid_nr_ns(tsk, &init_pid_ns));
654	if (!stats)
655		goto err;
656
657	memcpy(stats, tsk->signal->stats, sizeof(*stats));
658
659send:
660	send_cpu_listeners(rep_skb, listeners);
661	return;
662err:
663	nlmsg_free(rep_skb);
664}
665
666static const struct genl_ops taskstats_ops[] = {
667	{
668		.cmd		= TASKSTATS_CMD_GET,
669		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
670		.doit		= taskstats_user_cmd,
671		.policy		= taskstats_cmd_get_policy,
672		.maxattr	= ARRAY_SIZE(taskstats_cmd_get_policy) - 1,
673		.flags		= GENL_ADMIN_PERM,
674	},
675	{
676		.cmd		= CGROUPSTATS_CMD_GET,
677		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
678		.doit		= cgroupstats_user_cmd,
679		.policy		= cgroupstats_cmd_get_policy,
680		.maxattr	= ARRAY_SIZE(cgroupstats_cmd_get_policy) - 1,
681	},
682};
683
684static struct genl_family family __ro_after_init = {
685	.name		= TASKSTATS_GENL_NAME,
686	.version	= TASKSTATS_GENL_VERSION,
687	.module		= THIS_MODULE,
688	.ops		= taskstats_ops,
689	.n_ops		= ARRAY_SIZE(taskstats_ops),
690	.resv_start_op	= CGROUPSTATS_CMD_GET + 1,
691	.netnsok	= true,
692};
693
694/* Needed early in initialization */
695void __init taskstats_init_early(void)
696{
697	unsigned int i;
698
699	taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
700	for_each_possible_cpu(i) {
701		INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
702		init_rwsem(&(per_cpu(listener_array, i).sem));
703	}
704}
705
706static int __init taskstats_init(void)
707{
708	int rc;
709
710	rc = genl_register_family(&family);
711	if (rc)
712		return rc;
713
714	family_registered = 1;
715	pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
716	return 0;
717}
718
719/*
720 * late initcall ensures initialization of statistics collection
721 * mechanisms precedes initialization of the taskstats interface
722 */
723late_initcall(taskstats_init);
724