Deleted Added
full compact
nvme_sysctl.c (252221) nvme_sysctl.c (252222)
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_sysctl.c 240616 2012-09-17 19:23:01Z jimharris $");
28__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_sysctl.c 252222 2013-06-25 23:52:39Z jimharris $");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/sysctl.h>
33
34#include "nvme_private.h"
35
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/sysctl.h>
33
34#include "nvme_private.h"
35
36/*
37 * CTLTYPE_S64 and sysctl_handle_64 were added in r217616. Define these
38 * explicitly here for older kernels that don't include the r217616
39 * changeset.
40 */
41#ifndef CTLTYPE_S64
42#define CTLTYPE_S64 CTLTYPE_QUAD
43#define sysctl_handle_64 sysctl_handle_quad
44#endif
45
36static void
37nvme_dump_queue(struct nvme_qpair *qpair)
38{
39 struct nvme_completion *cpl;
40 struct nvme_command *cmd;
41 int i;
42
43 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);

--- 64 unchanged lines hidden (view full) ---

108 if (oldval != ctrlr->int_coal_threshold)
109 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
110 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
111 NULL);
112
113 return (0);
114}
115
46static void
47nvme_dump_queue(struct nvme_qpair *qpair)
48{
49 struct nvme_completion *cpl;
50 struct nvme_command *cmd;
51 int i;
52
53 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);

--- 64 unchanged lines hidden (view full) ---

118 if (oldval != ctrlr->int_coal_threshold)
119 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
120 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
121 NULL);
122
123 return (0);
124}
125
126static int
127nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
128{
129 struct nvme_controller *ctrlr = arg1;
130 uint32_t oldval = ctrlr->timeout_period;
131 int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req);
132
133 if (error)
134 return (error);
135
136 if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD ||
137 ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) {
138 ctrlr->timeout_period = oldval;
139 return (EINVAL);
140 }
141
142 return (0);
143}
144
116static void
145static void
146nvme_qpair_reset_stats(struct nvme_qpair *qpair)
147{
148
149 qpair->num_cmds = 0;
150 qpair->num_intr_handler_calls = 0;
151}
152
153static int
154nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
155{
156 struct nvme_controller *ctrlr = arg1;
157 int64_t num_cmds = 0;
158 int i;
159
160 num_cmds = ctrlr->adminq.num_cmds;
161
162 for (i = 0; i < ctrlr->num_io_queues; i++)
163 num_cmds += ctrlr->ioq[i].num_cmds;
164
165 return (sysctl_handle_64(oidp, &num_cmds, 0, req));
166}
167
168static int
169nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
170{
171 struct nvme_controller *ctrlr = arg1;
172 int64_t num_intr_handler_calls = 0;
173 int i;
174
175 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
176
177 for (i = 0; i < ctrlr->num_io_queues; i++)
178 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
179
180 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
181}
182
183static int
184nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
185{
186 struct nvme_controller *ctrlr = arg1;
187 uint32_t i, val = 0;
188
189 int error = sysctl_handle_int(oidp, &val, 0, req);
190
191 if (error)
192 return (error);
193
194 if (val != 0) {
195 nvme_qpair_reset_stats(&ctrlr->adminq);
196
197 for (i = 0; i < ctrlr->num_io_queues; i++)
198 nvme_qpair_reset_stats(&ctrlr->ioq[i]);
199 }
200
201 return (0);
202}
203
204
205static void
117nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
118 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
119{
120 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
121
122 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
123 CTLFLAG_RD, &qpair->num_entries, 0,
124 "Number of entries in hardware queue");
206nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
207 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
208{
209 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
210
211 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
212 CTLFLAG_RD, &qpair->num_entries, 0,
213 "Number of entries in hardware queue");
125 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_tr",
126 CTLFLAG_RD, &qpair->num_tr, 0,
127 "Number of trackers allocated");
128 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_prp_list",
129 CTLFLAG_RD, &qpair->num_prp_list, 0,
130 "Number of PRP lists allocated");
214 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
215 CTLFLAG_RD, &qpair->num_trackers, 0,
216 "Number of trackers pre-allocated for this queue pair");
131 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
132 CTLFLAG_RD, &qpair->sq_head, 0,
133 "Current head of submission queue (as observed by driver)");
134 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
135 CTLFLAG_RD, &qpair->sq_tail, 0,
136 "Current tail of submission queue (as observed by driver)");
137 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
138 CTLFLAG_RD, &qpair->cq_head, 0,
139 "Current head of completion queue (as observed by driver)");
140
141 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
142 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
217 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
218 CTLFLAG_RD, &qpair->sq_head, 0,
219 "Current head of submission queue (as observed by driver)");
220 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
221 CTLFLAG_RD, &qpair->sq_tail, 0,
222 "Current tail of submission queue (as observed by driver)");
223 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
224 CTLFLAG_RD, &qpair->cq_head, 0,
225 "Current head of completion queue (as observed by driver)");
226
227 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
228 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
229 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
230 CTLFLAG_RD, &qpair->num_intr_handler_calls,
231 "Number of times interrupt handler was invoked (will typically be "
232 "less than number of actual interrupts generated due to "
233 "coalescing)");
143
144 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
145 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
146 nvme_sysctl_dump_debug, "IU", "Dump debug data");
147}
148
149void
150nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)

--- 4 unchanged lines hidden (view full) ---

155#define QUEUE_NAME_LENGTH 16
156 char queue_name[QUEUE_NAME_LENGTH];
157 int i;
158
159 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
160 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
161 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
162
234
235 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
236 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
237 nvme_sysctl_dump_debug, "IU", "Dump debug data");
238}
239
240void
241nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)

--- 4 unchanged lines hidden (view full) ---

246#define QUEUE_NAME_LENGTH 16
247 char queue_name[QUEUE_NAME_LENGTH];
248 int i;
249
250 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
251 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
252 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
253
163 if (ctrlr->is_started) {
164 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
165 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
166 nvme_sysctl_int_coal_time, "IU",
167 "Interrupt coalescing timeout (in microseconds)");
254 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
255 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
256 nvme_sysctl_int_coal_time, "IU",
257 "Interrupt coalescing timeout (in microseconds)");
168
258
169 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
170 "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
171 nvme_sysctl_int_coal_threshold, "IU",
172 "Interrupt coalescing threshold");
173 }
259 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
260 "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
261 nvme_sysctl_int_coal_threshold, "IU",
262 "Interrupt coalescing threshold");
174
263
264 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
265 "timeout_period", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
266 nvme_sysctl_timeout_period, "IU",
267 "Timeout period (in seconds)");
268
269 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
270 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
271 ctrlr, 0, nvme_sysctl_num_cmds, "IU",
272 "Number of commands submitted");
273
274 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
275 "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
276 ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
277 "Number of times interrupt handler was invoked (will "
278 "typically be less than number of actual interrupts "
279 "generated due to coalescing)");
280
281 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
282 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
283 nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
284
175 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
176 CTLFLAG_RD, NULL, "Admin Queue");
177
178 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
179
180 for (i = 0; i < ctrlr->num_io_queues; i++) {
181 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
182 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
183 queue_name, CTLFLAG_RD, NULL, "IO Queue");
184 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
185 que_tree);
186 }
187}
285 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
286 CTLFLAG_RD, NULL, "Admin Queue");
287
288 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
289
290 for (i = 0; i < ctrlr->num_io_queues; i++) {
291 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
292 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
293 queue_name, CTLFLAG_RD, NULL, "IO Queue");
294 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
295 que_tree);
296 }
297}