Deleted Added
full compact
nvme_sysctl.c (240616) nvme_sysctl.c (241434)
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_sysctl.c 240616 2012-09-17 19:23:01Z jimharris $");
28__FBSDID("$FreeBSD: head/sys/dev/nvme/nvme_sysctl.c 241434 2012-10-10 23:35:16Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/sysctl.h>
33
34#include "nvme_private.h"
35
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/sysctl.h>
33
34#include "nvme_private.h"
35
36/*
37 * CTLTYPE_S64 and sysctl_handle_64 were added in r217616. Define these
38 * explicitly here for older kernels that don't include the r217616
39 * changeset.
40 */
41#ifndef CTLTYPE_S64
42#define CTLTYPE_S64 CTLTYPE_QUAD
43#define sysctl_handle_64 sysctl_handle_quad
44#endif
45
36static void
37nvme_dump_queue(struct nvme_qpair *qpair)
38{
39 struct nvme_completion *cpl;
40 struct nvme_command *cmd;
41 int i;
42
43 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);

--- 65 unchanged lines hidden (view full) ---

109 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
110 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
111 NULL);
112
113 return (0);
114}
115
116static void
46static void
47nvme_dump_queue(struct nvme_qpair *qpair)
48{
49 struct nvme_completion *cpl;
50 struct nvme_command *cmd;
51 int i;
52
53 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);

--- 65 unchanged lines hidden (view full) ---

119 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
120 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
121 NULL);
122
123 return (0);
124}
125
126static void
127nvme_qpair_reset_stats(struct nvme_qpair *qpair)
128{
129
130 qpair->num_cmds = 0;
131 qpair->num_intr_handler_calls = 0;
132}
133
134static int
135nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
136{
137 struct nvme_controller *ctrlr = arg1;
138 int64_t num_cmds = 0;
139 int i;
140
141 num_cmds = ctrlr->adminq.num_cmds;
142
143 for (i = 0; i < ctrlr->num_io_queues; i++)
144 num_cmds += ctrlr->ioq[i].num_cmds;
145
146 return (sysctl_handle_64(oidp, &num_cmds, 0, req));
147}
148
149static int
150nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
151{
152 struct nvme_controller *ctrlr = arg1;
153 int64_t num_intr_handler_calls = 0;
154 int i;
155
156 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
157
158 for (i = 0; i < ctrlr->num_io_queues; i++)
159 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
160
161 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
162}
163
164static int
165nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
166{
167 struct nvme_controller *ctrlr = arg1;
168 uint32_t i, val = 0;
169
170 int error = sysctl_handle_int(oidp, &val, 0, req);
171
172 if (error)
173 return (error);
174
175 if (val != 0) {
176 nvme_qpair_reset_stats(&ctrlr->adminq);
177
178 for (i = 0; i < ctrlr->num_io_queues; i++)
179 nvme_qpair_reset_stats(&ctrlr->ioq[i]);
180 }
181
182 return (0);
183}
184
185
186static void
117nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
118 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
119{
120 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
121
122 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
123 CTLFLAG_RD, &qpair->num_entries, 0,
124 "Number of entries in hardware queue");

--- 10 unchanged lines hidden (view full) ---

135 CTLFLAG_RD, &qpair->sq_tail, 0,
136 "Current tail of submission queue (as observed by driver)");
137 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
138 CTLFLAG_RD, &qpair->cq_head, 0,
139 "Current head of completion queue (as observed by driver)");
140
141 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
142 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
187nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
188 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
189{
190 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
191
192 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
193 CTLFLAG_RD, &qpair->num_entries, 0,
194 "Number of entries in hardware queue");

--- 10 unchanged lines hidden (view full) ---

205 CTLFLAG_RD, &qpair->sq_tail, 0,
206 "Current tail of submission queue (as observed by driver)");
207 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
208 CTLFLAG_RD, &qpair->cq_head, 0,
209 "Current head of completion queue (as observed by driver)");
210
211 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
212 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
213 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
214 CTLFLAG_RD, &qpair->num_intr_handler_calls,
215 "Number of times interrupt handler was invoked (will typically be "
216 "less than number of actual interrupts generated due to "
217 "coalescing)");
143
144 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
145 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
146 nvme_sysctl_dump_debug, "IU", "Dump debug data");
147}
148
149void
150nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)

--- 14 unchanged lines hidden (view full) ---

165 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
166 nvme_sysctl_int_coal_time, "IU",
167 "Interrupt coalescing timeout (in microseconds)");
168
169 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
170 "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
171 nvme_sysctl_int_coal_threshold, "IU",
172 "Interrupt coalescing threshold");
218
219 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
220 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
221 nvme_sysctl_dump_debug, "IU", "Dump debug data");
222}
223
224void
225nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)

--- 14 unchanged lines hidden (view full) ---

240 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
241 nvme_sysctl_int_coal_time, "IU",
242 "Interrupt coalescing timeout (in microseconds)");
243
244 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
245 "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
246 nvme_sysctl_int_coal_threshold, "IU",
247 "Interrupt coalescing threshold");
248
249 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
250 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
251 ctrlr, 0, nvme_sysctl_num_cmds, "IU",
252 "Number of commands submitted");
253
254 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
255 "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
256 ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
257 "Number of times interrupt handler was invoked (will "
258 "typically be less than number of actual interrupts "
259 "generated due to coalescing)");
260
261 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
262 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
263 nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
173 }
174
175 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
176 CTLFLAG_RD, NULL, "Admin Queue");
177
178 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
179
180 for (i = 0; i < ctrlr->num_io_queues; i++) {
181 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
182 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
183 queue_name, CTLFLAG_RD, NULL, "IO Queue");
184 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
185 que_tree);
186 }
187}
264 }
265
266 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
267 CTLFLAG_RD, NULL, "Admin Queue");
268
269 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
270
271 for (i = 0; i < ctrlr->num_io_queues; i++) {
272 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
273 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
274 queue_name, CTLFLAG_RD, NULL, "IO Queue");
275 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
276 que_tree);
277 }
278}