• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/platforms/pseries/
1/*
2 * Virtual Processor Dispatch Trace Log
3 *
4 * (C) Copyright IBM Corporation 2009
5 *
6 * Author: Jeremy Kerr <jk@ozlabs.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/debugfs.h>
26#include <asm/smp.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/firmware.h>
30
31#include "plpar_wrappers.h"
32
33/*
34 * Layout of entries in the hypervisor's DTL buffer. Although we don't
35 * actually access the internals of an entry (we only need to know the size),
36 * we might as well define it here for reference.
37 */
38struct dtl_entry {
39	u8	dispatch_reason;
40	u8	preempt_reason;
41	u16	processor_id;
42	u32	enqueue_to_dispatch_time;
43	u32	ready_to_enqueue_time;
44	u32	waiting_to_ready_time;
45	u64	timebase;
46	u64	fault_addr;
47	u64	srr0;
48	u64	srr1;
49};
50
51struct dtl {
52	struct dtl_entry	*buf;
53	struct dentry		*file;
54	int			cpu;
55	int			buf_entries;
56	u64			last_idx;
57};
58static DEFINE_PER_CPU(struct dtl, cpu_dtl);
59
60/*
61 * Dispatch trace log event mask:
62 * 0x7: 0x1: voluntary virtual processor waits
63 *      0x2: time-slice preempts
64 *      0x4: virtual partition memory page faults
65 */
66static u8 dtl_event_mask = 0x7;
67
68
69/*
70 * Size of per-cpu log buffers. Default is just under 16 pages worth.
71 */
72static int dtl_buf_entries = (16 * 85);
73
74
75static int dtl_enable(struct dtl *dtl)
76{
77	unsigned long addr;
78	int ret, hwcpu;
79
80	/* only allow one reader */
81	if (dtl->buf)
82		return -EBUSY;
83
84	/* we need to store the original allocation size for use during read */
85	dtl->buf_entries = dtl_buf_entries;
86
87	dtl->buf = kmalloc_node(dtl->buf_entries * sizeof(struct dtl_entry),
88			GFP_KERNEL, cpu_to_node(dtl->cpu));
89	if (!dtl->buf) {
90		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
91				__func__, dtl->cpu);
92		return -ENOMEM;
93	}
94
95	/* Register our dtl buffer with the hypervisor. The HV expects the
96	 * buffer size to be passed in the second word of the buffer */
97	((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry);
98
99	hwcpu = get_hard_smp_processor_id(dtl->cpu);
100	addr = __pa(dtl->buf);
101	ret = register_dtl(hwcpu, addr);
102	if (ret) {
103		printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
104		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
105		kfree(dtl->buf);
106		return -EIO;
107	}
108
109	/* set our initial buffer indices */
110	dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0;
111
112	/* ensure that our updates to the lppaca fields have occurred before
113	 * we actually enable the logging */
114	smp_wmb();
115
116	/* enable event logging */
117	lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask;
118
119	return 0;
120}
121
122static void dtl_disable(struct dtl *dtl)
123{
124	int hwcpu = get_hard_smp_processor_id(dtl->cpu);
125
126	lppaca[dtl->cpu].dtl_enable_mask = 0x0;
127
128	unregister_dtl(hwcpu, __pa(dtl->buf));
129
130	kfree(dtl->buf);
131	dtl->buf = NULL;
132	dtl->buf_entries = 0;
133}
134
135/* file interface */
136
137static int dtl_file_open(struct inode *inode, struct file *filp)
138{
139	struct dtl *dtl = inode->i_private;
140	int rc;
141
142	rc = dtl_enable(dtl);
143	if (rc)
144		return rc;
145
146	filp->private_data = dtl;
147	return 0;
148}
149
150static int dtl_file_release(struct inode *inode, struct file *filp)
151{
152	struct dtl *dtl = inode->i_private;
153	dtl_disable(dtl);
154	return 0;
155}
156
157static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
158		loff_t *pos)
159{
160	int rc, cur_idx, last_idx, n_read, n_req, read_size;
161	struct dtl *dtl;
162
163	if ((len % sizeof(struct dtl_entry)) != 0)
164		return -EINVAL;
165
166	dtl = filp->private_data;
167
168	/* requested number of entries to read */
169	n_req = len / sizeof(struct dtl_entry);
170
171	/* actual number of entries read */
172	n_read = 0;
173
174	cur_idx = lppaca[dtl->cpu].dtl_idx;
175	last_idx = dtl->last_idx;
176
177	if (cur_idx - last_idx > dtl->buf_entries) {
178		pr_debug("%s: hv buffer overflow for cpu %d, samples lost\n",
179				__func__, dtl->cpu);
180	}
181
182	cur_idx  %= dtl->buf_entries;
183	last_idx %= dtl->buf_entries;
184
185	/* read the tail of the buffer if we've wrapped */
186	if (last_idx > cur_idx) {
187		read_size = min(n_req, dtl->buf_entries - last_idx);
188
189		rc = copy_to_user(buf, &dtl->buf[last_idx],
190				read_size * sizeof(struct dtl_entry));
191		if (rc)
192			return -EFAULT;
193
194		last_idx = 0;
195		n_req -= read_size;
196		n_read += read_size;
197		buf += read_size * sizeof(struct dtl_entry);
198	}
199
200	/* .. and now the head */
201	read_size = min(n_req, cur_idx - last_idx);
202	rc = copy_to_user(buf, &dtl->buf[last_idx],
203			read_size * sizeof(struct dtl_entry));
204	if (rc)
205		return -EFAULT;
206
207	n_read += read_size;
208	dtl->last_idx += n_read;
209
210	return n_read * sizeof(struct dtl_entry);
211}
212
213static const struct file_operations dtl_fops = {
214	.open		= dtl_file_open,
215	.release	= dtl_file_release,
216	.read		= dtl_file_read,
217	.llseek		= no_llseek,
218};
219
220static struct dentry *dtl_dir;
221
222static int dtl_setup_file(struct dtl *dtl)
223{
224	char name[10];
225
226	sprintf(name, "cpu-%d", dtl->cpu);
227
228	dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
229	if (!dtl->file)
230		return -ENOMEM;
231
232	return 0;
233}
234
235static int dtl_init(void)
236{
237	struct dentry *event_mask_file, *buf_entries_file;
238	int rc, i;
239
240	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
241		return -ENODEV;
242
243	/* set up common debugfs structure */
244
245	rc = -ENOMEM;
246	dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
247	if (!dtl_dir) {
248		printk(KERN_WARNING "%s: can't create dtl root dir\n",
249				__func__);
250		goto err;
251	}
252
253	event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
254				dtl_dir, &dtl_event_mask);
255	buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600,
256				dtl_dir, &dtl_buf_entries);
257
258	if (!event_mask_file || !buf_entries_file) {
259		printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
260		goto err_remove_dir;
261	}
262
263	/* set up the per-cpu log structures */
264	for_each_possible_cpu(i) {
265		struct dtl *dtl = &per_cpu(cpu_dtl, i);
266		dtl->cpu = i;
267
268		rc = dtl_setup_file(dtl);
269		if (rc)
270			goto err_remove_dir;
271	}
272
273	return 0;
274
275err_remove_dir:
276	debugfs_remove_recursive(dtl_dir);
277err:
278	return rc;
279}
280arch_initcall(dtl_init);
281