nvme_ctrlr_cmd.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2013 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/dev/nvme/nvme_ctrlr_cmd.c 330897 2018-03-14 03:19:51Z eadler $");
31
32#include "nvme_private.h"
33
34void
35nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
36	nvme_cb_fn_t cb_fn, void *cb_arg)
37{
38	struct nvme_request *req;
39	struct nvme_command *cmd;
40
41	req = nvme_allocate_request_vaddr(payload,
42	    sizeof(struct nvme_controller_data), cb_fn, cb_arg);
43
44	cmd = &req->cmd;
45	cmd->opc = NVME_OPC_IDENTIFY;
46
47	/*
48	 * TODO: create an identify command data structure, which
49	 *  includes this CNS bit in cdw10.
50	 */
51	cmd->cdw10 = 1;
52
53	nvme_ctrlr_submit_admin_request(ctrlr, req);
54}
55
56void
57nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid,
58	void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
59{
60	struct nvme_request *req;
61	struct nvme_command *cmd;
62
63	req = nvme_allocate_request_vaddr(payload,
64	    sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
65
66	cmd = &req->cmd;
67	cmd->opc = NVME_OPC_IDENTIFY;
68
69	/*
70	 * TODO: create an identify command data structure
71	 */
72	cmd->nsid = nsid;
73
74	nvme_ctrlr_submit_admin_request(ctrlr, req);
75}
76
77void
78nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
79    struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
80    void *cb_arg)
81{
82	struct nvme_request *req;
83	struct nvme_command *cmd;
84
85	req = nvme_allocate_request_null(cb_fn, cb_arg);
86
87	cmd = &req->cmd;
88	cmd->opc = NVME_OPC_CREATE_IO_CQ;
89
90	/*
91	 * TODO: create a create io completion queue command data
92	 *  structure.
93	 */
94	cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
95	/* 0x3 = interrupts enabled | physically contiguous */
96	cmd->cdw11 = (vector << 16) | 0x3;
97	cmd->prp1 = io_que->cpl_bus_addr;
98
99	nvme_ctrlr_submit_admin_request(ctrlr, req);
100}
101
102void
103nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
104    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
105{
106	struct nvme_request *req;
107	struct nvme_command *cmd;
108
109	req = nvme_allocate_request_null(cb_fn, cb_arg);
110
111	cmd = &req->cmd;
112	cmd->opc = NVME_OPC_CREATE_IO_SQ;
113
114	/*
115	 * TODO: create a create io submission queue command data
116	 *  structure.
117	 */
118	cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
119	/* 0x1 = physically contiguous */
120	cmd->cdw11 = (io_que->id << 16) | 0x1;
121	cmd->prp1 = io_que->cmd_bus_addr;
122
123	nvme_ctrlr_submit_admin_request(ctrlr, req);
124}
125
126void
127nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
128    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
129{
130	struct nvme_request *req;
131	struct nvme_command *cmd;
132
133	req = nvme_allocate_request_null(cb_fn, cb_arg);
134
135	cmd = &req->cmd;
136	cmd->opc = NVME_OPC_DELETE_IO_CQ;
137
138	/*
139	 * TODO: create a delete io completion queue command data
140	 *  structure.
141	 */
142	cmd->cdw10 = io_que->id;
143
144	nvme_ctrlr_submit_admin_request(ctrlr, req);
145}
146
147void
148nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
149    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
150{
151	struct nvme_request *req;
152	struct nvme_command *cmd;
153
154	req = nvme_allocate_request_null(cb_fn, cb_arg);
155
156	cmd = &req->cmd;
157	cmd->opc = NVME_OPC_DELETE_IO_SQ;
158
159	/*
160	 * TODO: create a delete io submission queue command data
161	 *  structure.
162	 */
163	cmd->cdw10 = io_que->id;
164
165	nvme_ctrlr_submit_admin_request(ctrlr, req);
166}
167
168void
169nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
170    uint32_t cdw11, void *payload, uint32_t payload_size,
171    nvme_cb_fn_t cb_fn, void *cb_arg)
172{
173	struct nvme_request *req;
174	struct nvme_command *cmd;
175
176	req = nvme_allocate_request_null(cb_fn, cb_arg);
177
178	cmd = &req->cmd;
179	cmd->opc = NVME_OPC_SET_FEATURES;
180	cmd->cdw10 = feature;
181	cmd->cdw11 = cdw11;
182
183	nvme_ctrlr_submit_admin_request(ctrlr, req);
184}
185
186void
187nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
188    uint32_t cdw11, void *payload, uint32_t payload_size,
189    nvme_cb_fn_t cb_fn, void *cb_arg)
190{
191	struct nvme_request *req;
192	struct nvme_command *cmd;
193
194	req = nvme_allocate_request_null(cb_fn, cb_arg);
195
196	cmd = &req->cmd;
197	cmd->opc = NVME_OPC_GET_FEATURES;
198	cmd->cdw10 = feature;
199	cmd->cdw11 = cdw11;
200
201	nvme_ctrlr_submit_admin_request(ctrlr, req);
202}
203
204void
205nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
206    uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
207{
208	uint32_t cdw11;
209
210	cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
211	nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11,
212	    NULL, 0, cb_fn, cb_arg);
213}
214
215void
216nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
217    union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn,
218    void *cb_arg)
219{
220	uint32_t cdw11;
221
222	cdw11 = state.raw;
223	nvme_ctrlr_cmd_set_feature(ctrlr,
224	    NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
225	    cb_arg);
226}
227
228void
229nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
230    uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg)
231{
232	uint32_t cdw11;
233
234	if ((microseconds/100) >= 0x100) {
235		nvme_printf(ctrlr, "invalid coal time %d, disabling\n",
236		    microseconds);
237		microseconds = 0;
238		threshold = 0;
239	}
240
241	if (threshold >= 0x100) {
242		nvme_printf(ctrlr, "invalid threshold %d, disabling\n",
243		    threshold);
244		threshold = 0;
245		microseconds = 0;
246	}
247
248	cdw11 = ((microseconds/100) << 8) | threshold;
249	nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11,
250	    NULL, 0, cb_fn, cb_arg);
251}
252
253void
254nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
255    uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
256    void *cb_arg)
257{
258	struct nvme_request *req;
259	struct nvme_command *cmd;
260
261	req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
262
263	cmd = &req->cmd;
264	cmd->opc = NVME_OPC_GET_LOG_PAGE;
265	cmd->nsid = nsid;
266	cmd->cdw10 = ((payload_size/sizeof(uint32_t)) - 1) << 16;
267	cmd->cdw10 |= log_page;
268
269	nvme_ctrlr_submit_admin_request(ctrlr, req);
270}
271
272void
273nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
274    struct nvme_error_information_entry *payload, uint32_t num_entries,
275    nvme_cb_fn_t cb_fn, void *cb_arg)
276{
277
278	KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
279
280	/* Controller's error log page entries is 0-based. */
281	KASSERT(num_entries <= (ctrlr->cdata.elpe + 1),
282	    ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
283	    num_entries, ctrlr->cdata.elpe + 1));
284
285	if (num_entries > (ctrlr->cdata.elpe + 1))
286		num_entries = ctrlr->cdata.elpe + 1;
287
288	nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
289	    NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,
290	    cb_fn, cb_arg);
291}
292
293void
294nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
295    uint32_t nsid, struct nvme_health_information_page *payload,
296    nvme_cb_fn_t cb_fn, void *cb_arg)
297{
298
299	nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
300	    nsid, payload, sizeof(*payload), cb_fn, cb_arg);
301}
302
303void
304nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
305    struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
306{
307
308	nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
309	    NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
310	    cb_arg);
311}
312
313void
314nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
315    uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
316{
317	struct nvme_request *req;
318	struct nvme_command *cmd;
319
320	req = nvme_allocate_request_null(cb_fn, cb_arg);
321
322	cmd = &req->cmd;
323	cmd->opc = NVME_OPC_ABORT;
324	cmd->cdw10 = (cid << 16) | sqid;
325
326	nvme_ctrlr_submit_admin_request(ctrlr, req);
327}
328