1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (c) Copyright 2002-2010, Ralink Technology, Inc.
4 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
5 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
6 */
7
8#include <linux/kernel.h>
9#include <linux/firmware.h>
10#include <linux/delay.h>
11#include <linux/usb.h>
12#include <linux/skbuff.h>
13
14#include "mt7601u.h"
15#include "dma.h"
16#include "mcu.h"
17#include "usb.h"
18#include "trace.h"
19
20#define MCU_FW_URB_MAX_PAYLOAD		0x3800
21#define MCU_FW_URB_SIZE			(MCU_FW_URB_MAX_PAYLOAD + 12)
22#define MCU_RESP_URB_SIZE		1024
23
24static inline int firmware_running(struct mt7601u_dev *dev)
25{
26	return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
27}
28
29static inline void skb_put_le32(struct sk_buff *skb, u32 val)
30{
31	put_unaligned_le32(val, skb_put(skb, 4));
32}
33
34static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
35					    u8 seq, enum mcu_cmd cmd)
36{
37	WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
38				     FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
39				     FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
40}
41
42static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
43					    struct sk_buff *skb, bool need_resp)
44{
45	u32 i, csum = 0;
46
47	for (i = 0; i < skb->len / 4; i++)
48		csum ^= get_unaligned_le32(skb->data + i * 4);
49
50	trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
51}
52
53static struct sk_buff *mt7601u_mcu_msg_alloc(const void *data, int len)
54{
55	struct sk_buff *skb;
56
57	WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
58
59	skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
60	if (skb) {
61		skb_reserve(skb, MT_DMA_HDR_LEN);
62		skb_put_data(skb, data, len);
63	}
64
65	return skb;
66}
67
68static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
69{
70	struct urb *urb = dev->mcu.resp.urb;
71	u32 rxfce;
72	int urb_status, ret, i = 5;
73
74	while (i--) {
75		if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
76						 msecs_to_jiffies(300))) {
77			dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
78			continue;
79		}
80
81		/* Make copies of important data before reusing the urb */
82		rxfce = get_unaligned_le32(dev->mcu.resp.buf);
83		urb_status = urb->status * mt7601u_urb_has_error(urb);
84
85		ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
86					     &dev->mcu.resp, GFP_KERNEL,
87					     mt7601u_complete_urb,
88					     &dev->mcu.resp_cmpl);
89		if (ret)
90			return ret;
91
92		if (urb_status)
93			dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
94				urb_status);
95
96		if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
97		    FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
98			return 0;
99
100		dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
101			FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
102			seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
103	}
104
105	dev_err(dev->dev, "Error: %s timed out\n", __func__);
106	return -ETIMEDOUT;
107}
108
109static int
110mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
111		     enum mcu_cmd cmd, bool wait_resp)
112{
113	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
114	unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
115					    dev->out_eps[MT_EP_OUT_INBAND_CMD]);
116	int sent, ret;
117	u8 seq = 0;
118
119	if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
120		consume_skb(skb);
121		return 0;
122	}
123
124	mutex_lock(&dev->mcu.mutex);
125
126	if (wait_resp)
127		while (!seq)
128			seq = ++dev->mcu.msg_seq & 0xf;
129
130	mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
131
132	if (dev->mcu.resp_cmpl.done)
133		dev_err(dev->dev, "Error: MCU response pre-completed!\n");
134
135	trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
136	trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
137	ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
138	if (ret) {
139		dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
140		goto out;
141	}
142	if (sent != skb->len)
143		dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
144
145	if (wait_resp)
146		ret = mt7601u_mcu_wait_resp(dev, seq);
147out:
148	mutex_unlock(&dev->mcu.mutex);
149
150	consume_skb(skb);
151
152	return ret;
153}
154
155static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
156				       enum mcu_function func, u32 val)
157{
158	struct sk_buff *skb;
159	struct {
160		__le32 id;
161		__le32 value;
162	} __packed __aligned(4) msg = {
163		.id = cpu_to_le32(func),
164		.value = cpu_to_le32(val),
165	};
166
167	skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
168	if (!skb)
169		return -ENOMEM;
170	return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
171}
172
173int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
174{
175	int ret;
176
177	if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
178		return 0;
179
180	ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
181					  use_hvga);
182	if (ret) {
183		dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
184		return ret;
185	}
186
187	dev->tssi_read_trig = true;
188
189	return 0;
190}
191
192int
193mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
194{
195	struct sk_buff *skb;
196	struct {
197		__le32 id;
198		__le32 value;
199	} __packed __aligned(4) msg = {
200		.id = cpu_to_le32(cal),
201		.value = cpu_to_le32(val),
202	};
203
204	skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg));
205	if (!skb)
206		return -ENOMEM;
207	return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
208}
209
210int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
211			    const struct mt76_reg_pair *data, int n)
212{
213	const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
214	struct sk_buff *skb;
215	int cnt, i, ret;
216
217	if (!n)
218		return 0;
219
220	cnt = min(max_vals_per_cmd, n);
221
222	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
223	if (!skb)
224		return -ENOMEM;
225	skb_reserve(skb, MT_DMA_HDR_LEN);
226
227	for (i = 0; i < cnt; i++) {
228		skb_put_le32(skb, base + data[i].reg);
229		skb_put_le32(skb, data[i].value);
230	}
231
232	ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
233	if (ret)
234		return ret;
235
236	return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
237}
238
239int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
240			     const u32 *data, int n)
241{
242	const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
243	struct sk_buff *skb;
244	int cnt, i, ret;
245
246	if (!n)
247		return 0;
248
249	cnt = min(max_regs_per_cmd, n);
250
251	skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
252	if (!skb)
253		return -ENOMEM;
254	skb_reserve(skb, MT_DMA_HDR_LEN);
255
256	skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
257	for (i = 0; i < cnt; i++)
258		skb_put_le32(skb, data[i]);
259
260	ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
261	if (ret)
262		return ret;
263
264	return mt7601u_burst_write_regs(dev, offset + cnt * 4,
265					data + cnt, n - cnt);
266}
267
268struct mt76_fw_header {
269	__le32 ilm_len;
270	__le32 dlm_len;
271	__le16 build_ver;
272	__le16 fw_ver;
273	u8 pad[4];
274	char build_time[16];
275};
276
277struct mt76_fw {
278	struct mt76_fw_header hdr;
279	u8 ivb[MT_MCU_IVB_SIZE];
280	u8 ilm[];
281};
282
283static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
284			    const struct mt7601u_dma_buf *dma_buf,
285			    const void *data, u32 len, u32 dst_addr)
286{
287	DECLARE_COMPLETION_ONSTACK(cmpl);
288	struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
289	__le32 reg;
290	u32 val;
291	int ret;
292
293	reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
294			  FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
295			  FIELD_PREP(MT_TXD_INFO_LEN, len));
296	memcpy(buf.buf, &reg, sizeof(reg));
297	memcpy(buf.buf + sizeof(reg), data, len);
298	memset(buf.buf + sizeof(reg) + len, 0, 8);
299
300	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
301				       MT_FCE_DMA_ADDR, dst_addr);
302	if (ret)
303		return ret;
304	len = roundup(len, 4);
305	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
306				       MT_FCE_DMA_LEN, len << 16);
307	if (ret)
308		return ret;
309
310	buf.len = MT_DMA_HDR_LEN + len + 4;
311	ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
312				     &buf, GFP_KERNEL,
313				     mt7601u_complete_urb, &cmpl);
314	if (ret)
315		return ret;
316
317	if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
318		dev_err(dev->dev, "Error: firmware upload timed out\n");
319		usb_kill_urb(buf.urb);
320		return -ETIMEDOUT;
321	}
322	if (mt7601u_urb_has_error(buf.urb)) {
323		dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
324			buf.urb->status);
325		return buf.urb->status;
326	}
327
328	val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
329	val++;
330	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
331
332	return 0;
333}
334
335static int
336mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
337	       const void *data, int len, u32 dst_addr)
338{
339	int n, ret;
340
341	if (len == 0)
342		return 0;
343
344	n = min(MCU_FW_URB_MAX_PAYLOAD, len);
345	ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
346	if (ret)
347		return ret;
348
349	if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
350		return -ETIMEDOUT;
351
352	return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
353}
354
355static int
356mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
357{
358	struct mt7601u_dma_buf dma_buf;
359	void *ivb;
360	u32 ilm_len, dlm_len;
361	int i, ret;
362
363	ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
364	if (!ivb)
365		return -ENOMEM;
366	if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
367		ret = -ENOMEM;
368		goto error;
369	}
370
371	ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
372	dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
373		ilm_len, sizeof(fw->ivb));
374	ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
375	if (ret)
376		goto error;
377
378	dlm_len = le32_to_cpu(fw->hdr.dlm_len);
379	dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
380	ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
381			     dlm_len, MT_MCU_DLM_OFFSET);
382	if (ret)
383		goto error;
384
385	ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
386				     0x12, 0, ivb, sizeof(fw->ivb));
387	if (ret < 0)
388		goto error;
389	ret = 0;
390
391	for (i = 100; i && !firmware_running(dev); i--)
392		msleep(10);
393	if (!i) {
394		ret = -ETIMEDOUT;
395		goto error;
396	}
397
398	dev_dbg(dev->dev, "Firmware running!\n");
399error:
400	kfree(ivb);
401	mt7601u_usb_free_buf(dev, &dma_buf);
402
403	return ret;
404}
405
406static int mt7601u_load_firmware(struct mt7601u_dev *dev)
407{
408	const struct firmware *fw;
409	const struct mt76_fw_header *hdr;
410	int len, ret;
411	u32 val;
412
413	mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
414					 MT_USB_DMA_CFG_TX_BULK_EN));
415
416	if (firmware_running(dev))
417		return firmware_request_cache(dev->dev, MT7601U_FIRMWARE);
418
419	ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
420	if (ret)
421		return ret;
422
423	if (!fw || !fw->data || fw->size < sizeof(*hdr))
424		goto err_inv_fw;
425
426	hdr = (const struct mt76_fw_header *) fw->data;
427
428	if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
429		goto err_inv_fw;
430
431	len = sizeof(*hdr);
432	len += le32_to_cpu(hdr->ilm_len);
433	len += le32_to_cpu(hdr->dlm_len);
434
435	if (fw->size != len)
436		goto err_inv_fw;
437
438	val = le16_to_cpu(hdr->fw_ver);
439	dev_info(dev->dev,
440		 "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
441		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
442		 le16_to_cpu(hdr->build_ver), hdr->build_time);
443
444	len = le32_to_cpu(hdr->ilm_len);
445
446	mt7601u_wr(dev, 0x94c, 0);
447	mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
448
449	mt7601u_vendor_reset(dev);
450	msleep(5);
451
452	mt7601u_wr(dev, 0xa44, 0);
453	mt7601u_wr(dev, 0x230, 0x84210);
454	mt7601u_wr(dev, 0x400, 0x80c00);
455	mt7601u_wr(dev, 0x800, 1);
456
457	mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
458					 MT_PBF_CFG_TX1Q_EN |
459					 MT_PBF_CFG_TX2Q_EN |
460					 MT_PBF_CFG_TX3Q_EN));
461
462	mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
463
464	mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
465					 MT_USB_DMA_CFG_TX_BULK_EN));
466	val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
467	val &= ~MT_USB_DMA_CFG_TX_CLR;
468	mt7601u_wr(dev, MT_USB_DMA_CFG, val);
469
470	/* FCE tx_fs_base_ptr */
471	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
472	/* FCE tx_fs_max_cnt */
473	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
474	/* FCE pdma enable */
475	mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
476	/* FCE skip_fs_en */
477	mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
478
479	ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
480
481	release_firmware(fw);
482
483	return ret;
484
485err_inv_fw:
486	dev_err(dev->dev, "Invalid firmware image\n");
487	release_firmware(fw);
488	return -ENOENT;
489}
490
491int mt7601u_mcu_init(struct mt7601u_dev *dev)
492{
493	int ret;
494
495	mutex_init(&dev->mcu.mutex);
496
497	ret = mt7601u_load_firmware(dev);
498	if (ret)
499		return ret;
500
501	set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
502
503	return 0;
504}
505
506int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
507{
508	int ret;
509
510	ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
511	if (ret)
512		return ret;
513
514	init_completion(&dev->mcu.resp_cmpl);
515	if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
516		mt7601u_usb_free_buf(dev, &dev->mcu.resp);
517		return -ENOMEM;
518	}
519
520	ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
521				     &dev->mcu.resp, GFP_KERNEL,
522				     mt7601u_complete_urb, &dev->mcu.resp_cmpl);
523	if (ret) {
524		mt7601u_usb_free_buf(dev, &dev->mcu.resp);
525		return ret;
526	}
527
528	return 0;
529}
530
531void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
532{
533	usb_kill_urb(dev->mcu.resp.urb);
534	mt7601u_usb_free_buf(dev, &dev->mcu.resp);
535}
536