1/*-
2 * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* $FreeBSD$ */
27
28/*
29 * Management interface for smartpqi driver
30 */
31
32#include "smartpqi_includes.h"
33
34/*
35 * Wrapper function to copy to user from kernel
36 */
37int
38os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
39		void *src_buf, int size, int mode)
40{
41	return(copyout(src_buf, dest_buf, size));
42}
43
44/*
45 * Wrapper function to copy from user to kernel
46 */
47int
48os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
49		void *src_buf, int size, int mode)
50{
51	return(copyin(src_buf, dest_buf, size));
52}
53
54/*
55 * Device open function for ioctl entry
56 */
57static int
58smartpqi_open(struct cdev *cdev, int flags, int devtype,
59		struct thread *td)
60{
61	return BSD_SUCCESS;
62}
63
64/*
65 * Device close function for ioctl entry
66 */
67static int
68smartpqi_close(struct cdev *cdev, int flags, int devtype,
69		struct thread *td)
70{
71	return BSD_SUCCESS;
72}
73
74/*
75 * ioctl for getting driver info
76 */
77static void
78smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
79{
80	struct pqisrc_softstate *softs = cdev->si_drv1;
81	pdriver_info driver_info = (pdriver_info)udata;
82
83	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
84
85	driver_info->major_version = PQISRC_OS_VERSION;
86	driver_info->minor_version = PQISRC_FEATURE_VERSION;
87	driver_info->release_version = PQISRC_PATCH_VERSION;
88	driver_info->build_revision = PQISRC_BUILD_VERSION;
89	driver_info->max_targets = PQI_MAX_DEVICES - 1;
90	driver_info->max_io = softs->max_io_for_scsi_ml;
91	driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
92
93	DBG_FUNC("OUT\n");
94}
95
96/*
97 * ioctl for getting controller info
98 */
99static void
100smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
101{
102	struct pqisrc_softstate *softs = cdev->si_drv1;
103	device_t dev = softs->os_specific.pqi_dev;
104	pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
105	uint32_t sub_vendor = 0;
106	uint32_t sub_device = 0;
107	uint32_t vendor = 0;
108	uint32_t device = 0;
109
110	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
111
112	pci_info->bus = pci_get_bus(dev);
113	pci_info->dev_fn = pci_get_function(dev);
114	pci_info->domain = pci_get_domain(dev);
115	sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
116	sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
117	pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
118	vendor = pci_get_vendor(dev);
119	device =  pci_get_device(dev);
120	pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
121	DBG_FUNC("OUT\n");
122}
123
124static inline int
125pqi_status_to_bsd_ioctl_status(int pqi_status)
126{
127	if (PQI_STATUS_SUCCESS == pqi_status)
128		return BSD_SUCCESS;
129	else
130		return EIO;
131}
132
133/*
134 * ioctl entry point for user
135 */
136static int
137smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
138		int flags, struct thread *td)
139{
140	int bsd_status, pqi_status;
141	struct pqisrc_softstate *softs = cdev->si_drv1;
142
143	DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
144
145	if (!udata) {
146		DBG_ERR("udata is null !!\n");
147		return EINVAL;
148	}
149
150	if (pqisrc_ctrl_offline(softs)){
151		return ENOTTY;
152	}
153
154	switch (cmd) {
155		case CCISS_GETDRIVVER:
156			smartpqi_get_driver_info_ioctl(udata, cdev);
157			bsd_status = BSD_SUCCESS;
158			break;
159		case CCISS_GETPCIINFO:
160			smartpqi_get_pci_info_ioctl(udata, cdev);
161			bsd_status = BSD_SUCCESS;
162			break;
163		case SMARTPQI_PASS_THRU:
164		case CCISS_PASSTHRU:
165			pqi_status = pqisrc_passthru_ioctl(softs, udata, 0);
166			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
167			break;
168		case CCISS_REGNEWD:
169			pqi_status = pqisrc_scan_devices(softs);
170			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
171			break;
172		default:
173			DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd);
174			bsd_status = ENOTTY;
175			break;
176	}
177
178	DBG_FUNC("OUT error = %d\n", bsd_status);
179	return bsd_status;
180}
181
182static struct cdevsw smartpqi_cdevsw =
183{
184	.d_version = D_VERSION,
185	.d_open    = smartpqi_open,
186	.d_close   = smartpqi_close,
187	.d_ioctl   = smartpqi_ioctl,
188	.d_name    = "smartpqi",
189};
190
191/*
192 * Function to create device node for ioctl
193 */
194int
195create_char_dev(struct pqisrc_softstate *softs, int card_index)
196{
197	int error = BSD_SUCCESS;
198
199	DBG_FUNC("IN idx = %d\n", card_index);
200
201	softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
202				UID_ROOT, GID_OPERATOR, 0640,
203				"smartpqi%u", card_index);
204	if(softs->os_specific.cdev) {
205		softs->os_specific.cdev->si_drv1 = softs;
206	} else {
207		error = ENXIO;
208	}
209
210	DBG_FUNC("OUT error = %d\n", error);
211
212	return error;
213}
214
215/*
216 * Function to destroy device node for ioctl
217 */
218void
219destroy_char_dev(struct pqisrc_softstate *softs)
220{
221	DBG_FUNC("IN\n");
222	if (softs->os_specific.cdev) {
223		destroy_dev(softs->os_specific.cdev);
224		softs->os_specific.cdev = NULL;
225	}
226	DBG_FUNC("OUT\n");
227}
228
229/*
230 * Function used to send passthru commands to adapter
231 * to support management tools. For eg. ssacli, sscon.
232 */
233int
234pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
235{
236	int ret = PQI_STATUS_SUCCESS;
237	char *drv_buf = NULL;
238	uint32_t tag = 0;
239	IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
240	dma_mem_t ioctl_dma_buf;
241	pqisrc_raid_req_t request;
242	raid_path_error_info_elem_t error_info;
243	ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
244	ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
245	rcb_t *rcb = NULL;
246
247	memset(&request, 0, sizeof(request));
248	memset(&error_info, 0, sizeof(error_info));
249
250	DBG_FUNC("IN");
251
252	if (pqisrc_ctrl_offline(softs))
253		return PQI_STATUS_FAILURE;
254
255	if (!arg)
256		return (PQI_STATUS_FAILURE);
257
258	if (iocommand->buf_size < 1 &&
259		iocommand->Request.Type.Direction != PQIIOCTL_NONE)
260		return PQI_STATUS_FAILURE;
261	if (iocommand->Request.CDBLen > sizeof(request.cdb))
262		return PQI_STATUS_FAILURE;
263
264	switch (iocommand->Request.Type.Direction) {
265		case PQIIOCTL_NONE:
266		case PQIIOCTL_WRITE:
267		case PQIIOCTL_READ:
268		case PQIIOCTL_BIDIRECTIONAL:
269			break;
270		default:
271			return PQI_STATUS_FAILURE;
272	}
273
274	if (iocommand->buf_size > 0) {
275		memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
276		ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
277		ioctl_dma_buf.size = iocommand->buf_size;
278		ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
279		/* allocate memory */
280		ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
281		if (ret) {
282			DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
283			ret = PQI_STATUS_FAILURE;
284			goto out;
285		}
286
287		DBG_INFO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
288		DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
289
290		drv_buf = (char *)ioctl_dma_buf.virt_addr;
291		if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
292			if ((ret = os_copy_from_user(softs, (void *)drv_buf,
293					(void *)iocommand->buf,
294					iocommand->buf_size, mode)) != 0) {
295				ret = PQI_STATUS_FAILURE;
296				goto free_mem;
297			}
298		}
299	}
300
301	request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
302	request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
303									PQI_REQUEST_HEADER_LENGTH;
304	memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
305		sizeof(request.lun_number));
306	memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
307	request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
308
309	switch (iocommand->Request.Type.Direction) {
310	case PQIIOCTL_NONE:
311		request.data_direction = SOP_DATA_DIR_NONE;
312		break;
313	case PQIIOCTL_WRITE:
314		request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
315		break;
316	case PQIIOCTL_READ:
317		request.data_direction = SOP_DATA_DIR_TO_DEVICE;
318		break;
319	case PQIIOCTL_BIDIRECTIONAL:
320		request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
321		break;
322	}
323
324	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
325	if (iocommand->buf_size > 0) {
326		request.buffer_length = iocommand->buf_size;
327		request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
328		request.sg_descriptors[0].len = iocommand->buf_size;
329		request.sg_descriptors[0].flags =  SG_FLAG_LAST;
330	}
331	tag = pqisrc_get_tag(&softs->taglist);
332	if (INVALID_ELEM == tag) {
333		DBG_ERR("Tag not available\n");
334		ret = PQI_STATUS_FAILURE;
335		goto free_mem;
336	}
337	request.request_id = tag;
338	request.response_queue_id = ob_q->q_id;
339	request.error_index = request.request_id;
340	if (softs->timeout_in_passthrough) {
341		request.timeout_in_sec = iocommand->Request.Timeout;
342	}
343
344	rcb = &softs->rcb[tag];
345	rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
346	rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
347	rcb->tag = tag;
348	rcb->req_pending = true;
349	/* Submit Command */
350	ret = pqisrc_submit_cmnd(softs, ib_q, &request);
351	if (ret != PQI_STATUS_SUCCESS) {
352		DBG_ERR("Unable to submit command\n");
353		goto err_out;
354	}
355
356	ret = pqisrc_wait_on_condition(softs, rcb,
357			PQISRC_PASSTHROUGH_CMD_TIMEOUT);
358	if (ret != PQI_STATUS_SUCCESS) {
359		DBG_ERR("Passthru IOCTL cmd timed out !!\n");
360		goto err_out;
361	}
362
363	memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
364
365
366	if (rcb->status) {
367		size_t sense_data_length;
368
369		memcpy(&error_info, rcb->error_info, sizeof(error_info));
370		iocommand->error_info.ScsiStatus = error_info.status;
371		sense_data_length = error_info.sense_data_len;
372
373		if (!sense_data_length)
374			sense_data_length = error_info.resp_data_len;
375
376		if (sense_data_length &&
377			(sense_data_length > sizeof(error_info.data)))
378				sense_data_length = sizeof(error_info.data);
379
380		if (sense_data_length) {
381			if (sense_data_length >
382				sizeof(iocommand->error_info.SenseInfo))
383				sense_data_length =
384					sizeof(iocommand->error_info.SenseInfo);
385			memcpy (iocommand->error_info.SenseInfo,
386					error_info.data, sense_data_length);
387			iocommand->error_info.SenseLen = sense_data_length;
388		}
389
390		if (error_info.data_out_result ==
391				PQI_RAID_DATA_IN_OUT_UNDERFLOW){
392			rcb->status = REQUEST_SUCCESS;
393		}
394	}
395
396	if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
397		(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
398
399		if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
400			(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
401				DBG_ERR("Failed to copy the response\n");
402				goto err_out;
403		}
404	}
405
406	os_reset_rcb(rcb);
407	pqisrc_put_tag(&softs->taglist, request.request_id);
408	if (iocommand->buf_size > 0)
409			os_dma_mem_free(softs,&ioctl_dma_buf);
410
411	DBG_FUNC("OUT\n");
412	return ret;
413err_out:
414	os_reset_rcb(rcb);
415	pqisrc_put_tag(&softs->taglist, request.request_id);
416
417free_mem:
418	if (iocommand->buf_size > 0)
419		os_dma_mem_free(softs, &ioctl_dma_buf);
420
421out:
422	DBG_FUNC("Failed OUT\n");
423	return PQI_STATUS_FAILURE;
424}
425