1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Greybus SPI library
4 *
5 * Copyright 2014-2016 Google Inc.
6 * Copyright 2014-2016 Linaro Ltd.
7 */
8
9#include <linux/bitops.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/greybus.h>
14#include <linux/spi/spi.h>
15
16#include "spilib.h"
17
18struct gb_spilib {
19	struct gb_connection	*connection;
20	struct device		*parent;
21	struct spi_transfer	*first_xfer;
22	struct spi_transfer	*last_xfer;
23	struct spilib_ops	*ops;
24	u32			rx_xfer_offset;
25	u32			tx_xfer_offset;
26	u32			last_xfer_size;
27	unsigned int		op_timeout;
28	u16			mode;
29	u16			flags;
30	u32			bits_per_word_mask;
31	u8			num_chipselect;
32	u32			min_speed_hz;
33	u32			max_speed_hz;
34};
35
36#define GB_SPI_STATE_MSG_DONE		((void *)0)
37#define GB_SPI_STATE_MSG_IDLE		((void *)1)
38#define GB_SPI_STATE_MSG_RUNNING	((void *)2)
39#define GB_SPI_STATE_OP_READY		((void *)3)
40#define GB_SPI_STATE_OP_DONE		((void *)4)
41#define GB_SPI_STATE_MSG_ERROR		((void *)-1)
42
43#define XFER_TIMEOUT_TOLERANCE		200
44
45static struct spi_controller *get_controller_from_spi(struct gb_spilib *spi)
46{
47	return gb_connection_get_data(spi->connection);
48}
49
50static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
51{
52	size_t headers_size;
53
54	data_max -= sizeof(struct gb_spi_transfer_request);
55	headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
56
57	return tx_size + headers_size > data_max ? 0 : 1;
58}
59
60static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
61				size_t data_max)
62{
63	size_t rx_xfer_size;
64
65	data_max -= sizeof(struct gb_spi_transfer_response);
66
67	if (rx_size + len > data_max)
68		rx_xfer_size = data_max - rx_size;
69	else
70		rx_xfer_size = len;
71
72	/* if this is a write_read, for symmetry read the same as write */
73	if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
74		rx_xfer_size = *tx_xfer_size;
75	if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
76		*tx_xfer_size = rx_xfer_size;
77
78	return rx_xfer_size;
79}
80
81static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
82				size_t data_max)
83{
84	size_t headers_size;
85
86	data_max -= sizeof(struct gb_spi_transfer_request);
87	headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
88
89	if (tx_size + headers_size + len > data_max)
90		return data_max - (tx_size + sizeof(struct gb_spi_transfer));
91
92	return len;
93}
94
95static void clean_xfer_state(struct gb_spilib *spi)
96{
97	spi->first_xfer = NULL;
98	spi->last_xfer = NULL;
99	spi->rx_xfer_offset = 0;
100	spi->tx_xfer_offset = 0;
101	spi->last_xfer_size = 0;
102	spi->op_timeout = 0;
103}
104
105static bool is_last_xfer_done(struct gb_spilib *spi)
106{
107	struct spi_transfer *last_xfer = spi->last_xfer;
108
109	if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
110	    (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
111		return true;
112
113	return false;
114}
115
116static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
117{
118	struct spi_transfer *last_xfer = spi->last_xfer;
119
120	if (msg->state != GB_SPI_STATE_OP_DONE)
121		return 0;
122
123	/*
124	 * if we transferred all content of the last transfer, reset values and
125	 * check if this was the last transfer in the message
126	 */
127	if (is_last_xfer_done(spi)) {
128		spi->tx_xfer_offset = 0;
129		spi->rx_xfer_offset = 0;
130		spi->op_timeout = 0;
131		if (last_xfer == list_last_entry(&msg->transfers,
132						 struct spi_transfer,
133						 transfer_list))
134			msg->state = GB_SPI_STATE_MSG_DONE;
135		else
136			spi->first_xfer = list_next_entry(last_xfer,
137							  transfer_list);
138		return 0;
139	}
140
141	spi->first_xfer = last_xfer;
142	if (last_xfer->tx_buf)
143		spi->tx_xfer_offset += spi->last_xfer_size;
144
145	if (last_xfer->rx_buf)
146		spi->rx_xfer_offset += spi->last_xfer_size;
147
148	return 0;
149}
150
151static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
152					  struct spi_message *msg)
153{
154	if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
155				    transfer_list))
156		return NULL;
157
158	return list_next_entry(xfer, transfer_list);
159}
160
161/* Routines to transfer data */
162static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
163		struct gb_connection *connection, struct spi_message *msg)
164{
165	struct gb_spi_transfer_request *request;
166	struct spi_device *dev = msg->spi;
167	struct spi_transfer *xfer;
168	struct gb_spi_transfer *gb_xfer;
169	struct gb_operation *operation;
170	u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
171	u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
172	u32 total_len = 0;
173	unsigned int xfer_timeout;
174	size_t data_max;
175	void *tx_data;
176
177	data_max = gb_operation_get_payload_size_max(connection);
178	xfer = spi->first_xfer;
179
180	/* Find number of transfers queued and tx/rx length in the message */
181
182	while (msg->state != GB_SPI_STATE_OP_READY) {
183		msg->state = GB_SPI_STATE_MSG_RUNNING;
184		spi->last_xfer = xfer;
185
186		if (!xfer->tx_buf && !xfer->rx_buf) {
187			dev_err(spi->parent,
188				"bufferless transfer, length %u\n", xfer->len);
189			msg->state = GB_SPI_STATE_MSG_ERROR;
190			return NULL;
191		}
192
193		tx_xfer_size = 0;
194		rx_xfer_size = 0;
195
196		if (xfer->tx_buf) {
197			len = xfer->len - spi->tx_xfer_offset;
198			if (!tx_header_fit_operation(tx_size, count, data_max))
199				break;
200			tx_xfer_size = calc_tx_xfer_size(tx_size, count,
201							 len, data_max);
202			spi->last_xfer_size = tx_xfer_size;
203		}
204
205		if (xfer->rx_buf) {
206			len = xfer->len - spi->rx_xfer_offset;
207			rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
208							 len, data_max);
209			spi->last_xfer_size = rx_xfer_size;
210		}
211
212		tx_size += tx_xfer_size;
213		rx_size += rx_xfer_size;
214
215		total_len += spi->last_xfer_size;
216		count++;
217
218		xfer = get_next_xfer(xfer, msg);
219		if (!xfer || total_len >= data_max)
220			msg->state = GB_SPI_STATE_OP_READY;
221	}
222
223	/*
224	 * In addition to space for all message descriptors we need
225	 * to have enough to hold all tx data.
226	 */
227	request_size = sizeof(*request);
228	request_size += count * sizeof(*gb_xfer);
229	request_size += tx_size;
230
231	/* Response consists only of incoming data */
232	operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
233					request_size, rx_size, GFP_KERNEL);
234	if (!operation)
235		return NULL;
236
237	request = operation->request->payload;
238	request->count = cpu_to_le16(count);
239	request->mode = dev->mode;
240	request->chip_select = spi_get_chipselect(dev, 0);
241
242	gb_xfer = &request->transfers[0];
243	tx_data = gb_xfer + count;	/* place tx data after last gb_xfer */
244
245	/* Fill in the transfers array */
246	xfer = spi->first_xfer;
247	while (msg->state != GB_SPI_STATE_OP_DONE) {
248		int xfer_delay;
249
250		if (xfer == spi->last_xfer)
251			xfer_len = spi->last_xfer_size;
252		else
253			xfer_len = xfer->len;
254
255		/* make sure we do not timeout in a slow transfer */
256		xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
257		xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
258
259		if (xfer_timeout > spi->op_timeout)
260			spi->op_timeout = xfer_timeout;
261
262		gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
263		gb_xfer->len = cpu_to_le32(xfer_len);
264		xfer_delay = spi_delay_to_ns(&xfer->delay, xfer) / 1000;
265		xfer_delay = clamp_t(u16, xfer_delay, 0, U16_MAX);
266		gb_xfer->delay_usecs = cpu_to_le16(xfer_delay);
267		gb_xfer->cs_change = xfer->cs_change;
268		gb_xfer->bits_per_word = xfer->bits_per_word;
269
270		/* Copy tx data */
271		if (xfer->tx_buf) {
272			gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
273			memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
274			       xfer_len);
275			tx_data += xfer_len;
276		}
277
278		if (xfer->rx_buf)
279			gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
280
281		if (xfer == spi->last_xfer) {
282			if (!is_last_xfer_done(spi))
283				gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
284			msg->state = GB_SPI_STATE_OP_DONE;
285			continue;
286		}
287
288		gb_xfer++;
289		xfer = get_next_xfer(xfer, msg);
290	}
291
292	msg->actual_length += total_len;
293
294	return operation;
295}
296
297static void gb_spi_decode_response(struct gb_spilib *spi,
298				   struct spi_message *msg,
299				   struct gb_spi_transfer_response *response)
300{
301	struct spi_transfer *xfer = spi->first_xfer;
302	void *rx_data = response->data;
303	u32 xfer_len;
304
305	while (xfer) {
306		/* Copy rx data */
307		if (xfer->rx_buf) {
308			if (xfer == spi->first_xfer)
309				xfer_len = xfer->len - spi->rx_xfer_offset;
310			else if (xfer == spi->last_xfer)
311				xfer_len = spi->last_xfer_size;
312			else
313				xfer_len = xfer->len;
314
315			memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
316			       xfer_len);
317			rx_data += xfer_len;
318		}
319
320		if (xfer == spi->last_xfer)
321			break;
322
323		xfer = list_next_entry(xfer, transfer_list);
324	}
325}
326
327static int gb_spi_transfer_one_message(struct spi_controller *ctlr,
328				       struct spi_message *msg)
329{
330	struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
331	struct gb_connection *connection = spi->connection;
332	struct gb_spi_transfer_response *response;
333	struct gb_operation *operation;
334	int ret = 0;
335
336	spi->first_xfer = list_first_entry_or_null(&msg->transfers,
337						   struct spi_transfer,
338						   transfer_list);
339	if (!spi->first_xfer) {
340		ret = -ENOMEM;
341		goto out;
342	}
343
344	msg->state = GB_SPI_STATE_MSG_IDLE;
345
346	while (msg->state != GB_SPI_STATE_MSG_DONE &&
347	       msg->state != GB_SPI_STATE_MSG_ERROR) {
348		operation = gb_spi_operation_create(spi, connection, msg);
349		if (!operation) {
350			msg->state = GB_SPI_STATE_MSG_ERROR;
351			ret = -EINVAL;
352			continue;
353		}
354
355		ret = gb_operation_request_send_sync_timeout(operation,
356							     spi->op_timeout);
357		if (!ret) {
358			response = operation->response->payload;
359			if (response)
360				gb_spi_decode_response(spi, msg, response);
361		} else {
362			dev_err(spi->parent,
363				"transfer operation failed: %d\n", ret);
364			msg->state = GB_SPI_STATE_MSG_ERROR;
365		}
366
367		gb_operation_put(operation);
368		setup_next_xfer(spi, msg);
369	}
370
371out:
372	msg->status = ret;
373	clean_xfer_state(spi);
374	spi_finalize_current_message(ctlr);
375
376	return ret;
377}
378
379static int gb_spi_prepare_transfer_hardware(struct spi_controller *ctlr)
380{
381	struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
382
383	return spi->ops->prepare_transfer_hardware(spi->parent);
384}
385
386static int gb_spi_unprepare_transfer_hardware(struct spi_controller *ctlr)
387{
388	struct gb_spilib *spi = spi_controller_get_devdata(ctlr);
389
390	spi->ops->unprepare_transfer_hardware(spi->parent);
391
392	return 0;
393}
394
395static int gb_spi_setup(struct spi_device *spi)
396{
397	/* Nothing to do for now */
398	return 0;
399}
400
401static void gb_spi_cleanup(struct spi_device *spi)
402{
403	/* Nothing to do for now */
404}
405
406/* Routines to get controller information */
407
408/*
409 * Map Greybus spi mode bits/flags/bpw into Linux ones.
410 * All bits are same for now and so these macro's return same values.
411 */
412#define gb_spi_mode_map(mode) mode
413#define gb_spi_flags_map(flags) flags
414
415static int gb_spi_get_master_config(struct gb_spilib *spi)
416{
417	struct gb_spi_master_config_response response;
418	u16 mode, flags;
419	int ret;
420
421	ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
422				NULL, 0, &response, sizeof(response));
423	if (ret < 0)
424		return ret;
425
426	mode = le16_to_cpu(response.mode);
427	spi->mode = gb_spi_mode_map(mode);
428
429	flags = le16_to_cpu(response.flags);
430	spi->flags = gb_spi_flags_map(flags);
431
432	spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
433	spi->num_chipselect = response.num_chipselect;
434
435	spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
436	spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
437
438	return 0;
439}
440
441static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
442{
443	struct spi_controller *ctlr = get_controller_from_spi(spi);
444	struct gb_spi_device_config_request request;
445	struct gb_spi_device_config_response response;
446	struct spi_board_info spi_board = { {0} };
447	struct spi_device *spidev;
448	int ret;
449	u8 dev_type;
450
451	request.chip_select = cs;
452
453	ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
454				&request, sizeof(request),
455				&response, sizeof(response));
456	if (ret < 0)
457		return ret;
458
459	dev_type = response.device_type;
460
461	if (dev_type == GB_SPI_SPI_DEV)
462		strscpy(spi_board.modalias, "spidev",
463			sizeof(spi_board.modalias));
464	else if (dev_type == GB_SPI_SPI_NOR)
465		strscpy(spi_board.modalias, "spi-nor",
466			sizeof(spi_board.modalias));
467	else if (dev_type == GB_SPI_SPI_MODALIAS)
468		memcpy(spi_board.modalias, response.name,
469		       sizeof(spi_board.modalias));
470	else
471		return -EINVAL;
472
473	spi_board.mode		= le16_to_cpu(response.mode);
474	spi_board.bus_num	= ctlr->bus_num;
475	spi_board.chip_select	= cs;
476	spi_board.max_speed_hz	= le32_to_cpu(response.max_speed_hz);
477
478	spidev = spi_new_device(ctlr, &spi_board);
479	if (!spidev)
480		return -EINVAL;
481
482	return 0;
483}
484
485int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
486			  struct spilib_ops *ops)
487{
488	struct gb_spilib *spi;
489	struct spi_controller *ctlr;
490	int ret;
491	u8 i;
492
493	/* Allocate master with space for data */
494	ctlr = spi_alloc_master(dev, sizeof(*spi));
495	if (!ctlr) {
496		dev_err(dev, "cannot alloc SPI master\n");
497		return -ENOMEM;
498	}
499
500	spi = spi_controller_get_devdata(ctlr);
501	spi->connection = connection;
502	gb_connection_set_data(connection, ctlr);
503	spi->parent = dev;
504	spi->ops = ops;
505
506	/* get controller configuration */
507	ret = gb_spi_get_master_config(spi);
508	if (ret)
509		goto exit_spi_put;
510
511	ctlr->bus_num = -1; /* Allow spi-core to allocate it dynamically */
512	ctlr->num_chipselect = spi->num_chipselect;
513	ctlr->mode_bits = spi->mode;
514	ctlr->flags = spi->flags;
515	ctlr->bits_per_word_mask = spi->bits_per_word_mask;
516
517	/* Attach methods */
518	ctlr->cleanup = gb_spi_cleanup;
519	ctlr->setup = gb_spi_setup;
520	ctlr->transfer_one_message = gb_spi_transfer_one_message;
521
522	if (ops && ops->prepare_transfer_hardware) {
523		ctlr->prepare_transfer_hardware =
524			gb_spi_prepare_transfer_hardware;
525	}
526
527	if (ops && ops->unprepare_transfer_hardware) {
528		ctlr->unprepare_transfer_hardware =
529			gb_spi_unprepare_transfer_hardware;
530	}
531
532	ctlr->auto_runtime_pm = true;
533
534	ret = spi_register_controller(ctlr);
535	if (ret < 0)
536		goto exit_spi_put;
537
538	/* now, fetch the devices configuration */
539	for (i = 0; i < spi->num_chipselect; i++) {
540		ret = gb_spi_setup_device(spi, i);
541		if (ret < 0) {
542			dev_err(dev, "failed to allocate spi device %d: %d\n",
543				i, ret);
544			goto exit_spi_unregister;
545		}
546	}
547
548	return 0;
549
550exit_spi_put:
551	spi_controller_put(ctlr);
552
553	return ret;
554
555exit_spi_unregister:
556	spi_unregister_controller(ctlr);
557
558	return ret;
559}
560EXPORT_SYMBOL_GPL(gb_spilib_master_init);
561
562void gb_spilib_master_exit(struct gb_connection *connection)
563{
564	struct spi_controller *ctlr = gb_connection_get_data(connection);
565
566	spi_unregister_controller(ctlr);
567}
568EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
569
570MODULE_LICENSE("GPL v2");
571