1
2#include <linux/mm.h>
3#include <linux/file.h>
4#include <linux/mount.h>
5#include <linux/ptrace.h>
6#include <linux/seq_file.h>
7#include "internal.h"
8
9/*
10 * Logic: we've got two memory sums for each process, "shared", and
11 * "non-shared". Shared memory may get counted more then once, for
12 * each process that owns it. Non-shared memory is counted
13 * accurately.
14 */
15char *task_mem(struct mm_struct *mm, char *buffer)
16{
17	struct vm_list_struct *vml;
18	unsigned long bytes = 0, sbytes = 0, slack = 0;
19
20	down_read(&mm->mmap_sem);
21	for (vml = mm->context.vmlist; vml; vml = vml->next) {
22		if (!vml->vma)
23			continue;
24
25		bytes += kobjsize(vml);
26		if (atomic_read(&mm->mm_count) > 1 ||
27		    atomic_read(&vml->vma->vm_usage) > 1
28		    ) {
29			sbytes += kobjsize((void *) vml->vma->vm_start);
30			sbytes += kobjsize(vml->vma);
31		} else {
32			bytes += kobjsize((void *) vml->vma->vm_start);
33			bytes += kobjsize(vml->vma);
34			slack += kobjsize((void *) vml->vma->vm_start) -
35				(vml->vma->vm_end - vml->vma->vm_start);
36		}
37	}
38
39	if (atomic_read(&mm->mm_count) > 1)
40		sbytes += kobjsize(mm);
41	else
42		bytes += kobjsize(mm);
43
44	if (current->fs && atomic_read(&current->fs->count) > 1)
45		sbytes += kobjsize(current->fs);
46	else
47		bytes += kobjsize(current->fs);
48
49	if (current->files && atomic_read(&current->files->count) > 1)
50		sbytes += kobjsize(current->files);
51	else
52		bytes += kobjsize(current->files);
53
54	if (current->sighand && atomic_read(&current->sighand->count) > 1)
55		sbytes += kobjsize(current->sighand);
56	else
57		bytes += kobjsize(current->sighand);
58
59	bytes += kobjsize(current); /* includes kernel stack */
60
61	buffer += sprintf(buffer,
62		"Mem:\t%8lu bytes\n"
63		"Slack:\t%8lu bytes\n"
64		"Shared:\t%8lu bytes\n",
65		bytes, slack, sbytes);
66
67	up_read(&mm->mmap_sem);
68	return buffer;
69}
70
71unsigned long task_vsize(struct mm_struct *mm)
72{
73	struct vm_list_struct *tbp;
74	unsigned long vsize = 0;
75
76	down_read(&mm->mmap_sem);
77	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
78		if (tbp->vma)
79			vsize += kobjsize((void *) tbp->vma->vm_start);
80	}
81	up_read(&mm->mmap_sem);
82	return vsize;
83}
84
85int task_statm(struct mm_struct *mm, int *shared, int *text,
86	       int *data, int *resident)
87{
88	struct vm_list_struct *tbp;
89	int size = kobjsize(mm);
90
91	down_read(&mm->mmap_sem);
92	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
93		size += kobjsize(tbp);
94		if (tbp->vma) {
95			size += kobjsize(tbp->vma);
96			size += kobjsize((void *) tbp->vma->vm_start);
97		}
98	}
99
100	size += (*text = mm->end_code - mm->start_code);
101	size += (*data = mm->start_stack - mm->start_data);
102	up_read(&mm->mmap_sem);
103	*resident = size;
104	return size;
105}
106
107int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
108{
109	struct vm_list_struct *vml;
110	struct vm_area_struct *vma;
111	struct task_struct *task = get_proc_task(inode);
112	struct mm_struct *mm = get_task_mm(task);
113	int result = -ENOENT;
114
115	if (!mm)
116		goto out;
117	down_read(&mm->mmap_sem);
118
119	vml = mm->context.vmlist;
120	vma = NULL;
121	while (vml) {
122		if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
123			vma = vml->vma;
124			break;
125		}
126		vml = vml->next;
127	}
128
129	if (vma) {
130		*mnt = mntget(vma->vm_file->f_path.mnt);
131		*dentry = dget(vma->vm_file->f_path.dentry);
132		result = 0;
133	}
134
135	up_read(&mm->mmap_sem);
136	mmput(mm);
137out:
138	return result;
139}
140
141/*
142 * display mapping lines for a particular process's /proc/pid/maps
143 */
144static int show_map(struct seq_file *m, void *_vml)
145{
146	struct vm_list_struct *vml = _vml;
147	struct proc_maps_private *priv = m->private;
148	struct task_struct *task = priv->task;
149
150	if (maps_protect && !ptrace_may_attach(task))
151		return -EACCES;
152
153	return nommu_vma_show(m, vml->vma);
154}
155
156static void *m_start(struct seq_file *m, loff_t *pos)
157{
158	struct proc_maps_private *priv = m->private;
159	struct vm_list_struct *vml;
160	struct mm_struct *mm;
161	loff_t n = *pos;
162
163	/* pin the task and mm whilst we play with them */
164	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
165	if (!priv->task)
166		return NULL;
167
168	mm = get_task_mm(priv->task);
169	if (!mm) {
170		put_task_struct(priv->task);
171		priv->task = NULL;
172		return NULL;
173	}
174
175	down_read(&mm->mmap_sem);
176
177	/* start from the Nth VMA */
178	for (vml = mm->context.vmlist; vml; vml = vml->next)
179		if (n-- == 0)
180			return vml;
181	return NULL;
182}
183
184static void m_stop(struct seq_file *m, void *_vml)
185{
186	struct proc_maps_private *priv = m->private;
187
188	if (priv->task) {
189		struct mm_struct *mm = priv->task->mm;
190		up_read(&mm->mmap_sem);
191		mmput(mm);
192		put_task_struct(priv->task);
193	}
194}
195
196static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
197{
198	struct vm_list_struct *vml = _vml;
199
200	(*pos)++;
201	return vml ? vml->next : NULL;
202}
203
204static struct seq_operations proc_pid_maps_ops = {
205	.start	= m_start,
206	.next	= m_next,
207	.stop	= m_stop,
208	.show	= show_map
209};
210
211static int maps_open(struct inode *inode, struct file *file)
212{
213	struct proc_maps_private *priv;
214	int ret = -ENOMEM;
215
216	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
217	if (priv) {
218		priv->pid = proc_pid(inode);
219		ret = seq_open(file, &proc_pid_maps_ops);
220		if (!ret) {
221			struct seq_file *m = file->private_data;
222			m->private = priv;
223		} else {
224			kfree(priv);
225		}
226	}
227	return ret;
228}
229
230const struct file_operations proc_maps_operations = {
231	.open		= maps_open,
232	.read		= seq_read,
233	.llseek		= seq_lseek,
234	.release	= seq_release_private,
235};
236