nvme_sysctl.c revision 241658
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_sysctl.c 241658 2012-10-18 00:37:11Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/sysctl.h>
33
34#include "nvme_private.h"
35
36/*
37 * CTLTYPE_S64 and sysctl_handle_64 were added in r217616.  Define these
38 *  explicitly here for older kernels that don't include the r217616
39 *  changeset.
40 */
41#ifndef CTLTYPE_S64
42#define CTLTYPE_S64		CTLTYPE_QUAD
43#define sysctl_handle_64	sysctl_handle_quad
44#endif
45
46static void
47nvme_dump_queue(struct nvme_qpair *qpair)
48{
49	struct nvme_completion *cpl;
50	struct nvme_command *cmd;
51	int i;
52
53	printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
54
55	printf("Completion queue:\n");
56	for (i = 0; i < qpair->num_entries; i++) {
57		cpl = &qpair->cpl[i];
58		printf("%05d: ", i);
59		nvme_dump_completion(cpl);
60	}
61
62	printf("Submission queue:\n");
63	for (i = 0; i < qpair->num_entries; i++) {
64		cmd = &qpair->cmd[i];
65		printf("%05d: ", i);
66		nvme_dump_command(cmd);
67	}
68}
69
70
71static int
72nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
73{
74	struct nvme_qpair 	*qpair = arg1;
75	uint32_t		val = 0;
76
77	int error = sysctl_handle_int(oidp, &val, 0, req);
78
79	if (error)
80		return (error);
81
82	if (val != 0)
83		nvme_dump_queue(qpair);
84
85	return (0);
86}
87
88static int
89nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
90{
91	struct nvme_controller *ctrlr = arg1;
92	uint32_t oldval = ctrlr->int_coal_time;
93	int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
94	    req);
95
96	if (error)
97		return (error);
98
99	if (oldval != ctrlr->int_coal_time)
100		nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
101		    ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
102		    NULL);
103
104	return (0);
105}
106
107static int
108nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
109{
110	struct nvme_controller *ctrlr = arg1;
111	uint32_t oldval = ctrlr->int_coal_threshold;
112	int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
113	    req);
114
115	if (error)
116		return (error);
117
118	if (oldval != ctrlr->int_coal_threshold)
119		nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
120		    ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
121		    NULL);
122
123	return (0);
124}
125
126static void
127nvme_qpair_reset_stats(struct nvme_qpair *qpair)
128{
129
130	qpair->num_cmds = 0;
131	qpair->num_intr_handler_calls = 0;
132}
133
134static int
135nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
136{
137	struct nvme_controller 	*ctrlr = arg1;
138	int64_t			num_cmds = 0;
139	int			i;
140
141	num_cmds = ctrlr->adminq.num_cmds;
142
143	for (i = 0; i < ctrlr->num_io_queues; i++)
144		num_cmds += ctrlr->ioq[i].num_cmds;
145
146	return (sysctl_handle_64(oidp, &num_cmds, 0, req));
147}
148
149static int
150nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
151{
152	struct nvme_controller 	*ctrlr = arg1;
153	int64_t			num_intr_handler_calls = 0;
154	int			i;
155
156	num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
157
158	for (i = 0; i < ctrlr->num_io_queues; i++)
159		num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
160
161	return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
162}
163
164static int
165nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
166{
167	struct nvme_controller 	*ctrlr = arg1;
168	uint32_t		i, val = 0;
169
170	int error = sysctl_handle_int(oidp, &val, 0, req);
171
172	if (error)
173		return (error);
174
175	if (val != 0) {
176		nvme_qpair_reset_stats(&ctrlr->adminq);
177
178		for (i = 0; i < ctrlr->num_io_queues; i++)
179			nvme_qpair_reset_stats(&ctrlr->ioq[i]);
180	}
181
182	return (0);
183}
184
185
186static void
187nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
188    struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
189{
190	struct sysctl_oid_list	*que_list = SYSCTL_CHILDREN(que_tree);
191
192	SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
193	    CTLFLAG_RD, &qpair->num_entries, 0,
194	    "Number of entries in hardware queue");
195	SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_tr",
196	    CTLFLAG_RD, &qpair->num_tr, 0,
197	    "Number of trackers allocated");
198	SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
199	    CTLFLAG_RD, &qpair->sq_head, 0,
200	    "Current head of submission queue (as observed by driver)");
201	SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
202	    CTLFLAG_RD, &qpair->sq_tail, 0,
203	    "Current tail of submission queue (as observed by driver)");
204	SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
205	    CTLFLAG_RD, &qpair->cq_head, 0,
206	    "Current head of completion queue (as observed by driver)");
207
208	SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
209	    CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
210	SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
211	    CTLFLAG_RD, &qpair->num_intr_handler_calls,
212	    "Number of times interrupt handler was invoked (will typically be "
213	    "less than number of actual interrupts generated due to "
214	    "coalescing)");
215
216	SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
217	    "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
218	    nvme_sysctl_dump_debug, "IU", "Dump debug data");
219}
220
221void
222nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
223{
224	struct sysctl_ctx_list	*ctrlr_ctx;
225	struct sysctl_oid	*ctrlr_tree, *que_tree;
226	struct sysctl_oid_list	*ctrlr_list;
227#define QUEUE_NAME_LENGTH	16
228	char			queue_name[QUEUE_NAME_LENGTH];
229	int			i;
230
231	ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
232	ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
233	ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
234
235	if (ctrlr->is_started) {
236		SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
237		    "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
238		    nvme_sysctl_int_coal_time, "IU",
239		    "Interrupt coalescing timeout (in microseconds)");
240
241		SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
242		    "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
243		    nvme_sysctl_int_coal_threshold, "IU",
244		    "Interrupt coalescing threshold");
245
246		SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
247		    "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
248		    ctrlr, 0, nvme_sysctl_num_cmds, "IU",
249		    "Number of commands submitted");
250
251		SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
252		    "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
253		    ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
254		    "Number of times interrupt handler was invoked (will "
255		    "typically be less than number of actual interrupts "
256		    "generated due to coalescing)");
257
258		SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
259		    "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
260		    nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
261	}
262
263	que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
264	    CTLFLAG_RD, NULL, "Admin Queue");
265
266	nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
267
268	for (i = 0; i < ctrlr->num_io_queues; i++) {
269		snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
270		que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
271		    queue_name, CTLFLAG_RD, NULL, "IO Queue");
272		nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
273		    que_tree);
274	}
275}
276