1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2017 Intel Deutschland GmbH
4 * Copyright (C) 2018-2020 Intel Corporation
5 */
6#ifdef CONFIG_INET
7#include <net/tso.h>
8#endif
9#include <linux/tcp.h>
10
11#include "iwl-debug.h"
12#include "iwl-csr.h"
13#include "iwl-io.h"
14#include "internal.h"
15#include "fw/api/tx.h"
16#include "queue/tx.h"
17
18/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
19
20/*
21 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
22 * @priv: device private data point
23 * @cmd: a pointer to the ucode command structure
24 *
25 * The function returns < 0 values to indicate the operation
26 * failed. On success, it returns the index (>= 0) of command in the
27 * command queue.
28 */
29int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
30			       struct iwl_host_cmd *cmd)
31{
32	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
33	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
34	struct iwl_device_cmd *out_cmd;
35	struct iwl_cmd_meta *out_meta;
36	void *dup_buf = NULL;
37	dma_addr_t phys_addr;
38	int i, cmd_pos, idx;
39	u16 copy_size, cmd_size, tb0_size;
40	bool had_nocopy = false;
41	u8 group_id = iwl_cmd_groupid(cmd->id);
42	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
43	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
44	struct iwl_tfh_tfd *tfd;
45	unsigned long flags;
46
47	copy_size = sizeof(struct iwl_cmd_header_wide);
48	cmd_size = sizeof(struct iwl_cmd_header_wide);
49
50	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
51		cmddata[i] = cmd->data[i];
52		cmdlen[i] = cmd->len[i];
53
54		if (!cmd->len[i])
55			continue;
56
57		/* need at least IWL_FIRST_TB_SIZE copied */
58		if (copy_size < IWL_FIRST_TB_SIZE) {
59			int copy = IWL_FIRST_TB_SIZE - copy_size;
60
61			if (copy > cmdlen[i])
62				copy = cmdlen[i];
63			cmdlen[i] -= copy;
64			cmddata[i] += copy;
65			copy_size += copy;
66		}
67
68		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
69			had_nocopy = true;
70			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
71				idx = -EINVAL;
72				goto free_dup_buf;
73			}
74		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
75			/*
76			 * This is also a chunk that isn't copied
77			 * to the static buffer so set had_nocopy.
78			 */
79			had_nocopy = true;
80
81			/* only allowed once */
82			if (WARN_ON(dup_buf)) {
83				idx = -EINVAL;
84				goto free_dup_buf;
85			}
86
87			dup_buf = kmemdup(cmddata[i], cmdlen[i],
88					  GFP_ATOMIC);
89			if (!dup_buf)
90				return -ENOMEM;
91		} else {
92			/* NOCOPY must not be followed by normal! */
93			if (WARN_ON(had_nocopy)) {
94				idx = -EINVAL;
95				goto free_dup_buf;
96			}
97			copy_size += cmdlen[i];
98		}
99		cmd_size += cmd->len[i];
100	}
101
102	/*
103	 * If any of the command structures end up being larger than the
104	 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
105	 * separate TFDs, then we will need to increase the size of the buffers
106	 */
107	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
108		 "Command %s (%#x) is too large (%d bytes)\n",
109		 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
110		idx = -EINVAL;
111		goto free_dup_buf;
112	}
113
114	spin_lock_irqsave(&txq->lock, flags);
115
116	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
117	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
118	memset(tfd, 0, sizeof(*tfd));
119
120	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
121		spin_unlock_irqrestore(&txq->lock, flags);
122
123		IWL_ERR(trans, "No space in command queue\n");
124		iwl_op_mode_cmd_queue_full(trans->op_mode);
125		idx = -ENOSPC;
126		goto free_dup_buf;
127	}
128
129	out_cmd = txq->entries[idx].cmd;
130	out_meta = &txq->entries[idx].meta;
131
132	/* re-initialize to NULL */
133	memset(out_meta, 0, sizeof(*out_meta));
134	if (cmd->flags & CMD_WANT_SKB)
135		out_meta->source = cmd;
136
137	/* set up the header */
138	out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
139	out_cmd->hdr_wide.group_id = group_id;
140	out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
141	out_cmd->hdr_wide.length =
142		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
143	out_cmd->hdr_wide.reserved = 0;
144	out_cmd->hdr_wide.sequence =
145		cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
146					 INDEX_TO_SEQ(txq->write_ptr));
147
148	cmd_pos = sizeof(struct iwl_cmd_header_wide);
149	copy_size = sizeof(struct iwl_cmd_header_wide);
150
151	/* and copy the data that needs to be copied */
152	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
153		int copy;
154
155		if (!cmd->len[i])
156			continue;
157
158		/* copy everything if not nocopy/dup */
159		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
160					   IWL_HCMD_DFL_DUP))) {
161			copy = cmd->len[i];
162
163			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
164			cmd_pos += copy;
165			copy_size += copy;
166			continue;
167		}
168
169		/*
170		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
171		 * in total (for bi-directional DMA), but copy up to what
172		 * we can fit into the payload for debug dump purposes.
173		 */
174		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
175
176		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
177		cmd_pos += copy;
178
179		/* However, treat copy_size the proper way, we need it below */
180		if (copy_size < IWL_FIRST_TB_SIZE) {
181			copy = IWL_FIRST_TB_SIZE - copy_size;
182
183			if (copy > cmd->len[i])
184				copy = cmd->len[i];
185			copy_size += copy;
186		}
187	}
188
189	IWL_DEBUG_HC(trans,
190		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
191		     iwl_get_cmd_string(trans, cmd->id), group_id,
192		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
193		     cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
194
195	/* start the TFD with the minimum copy bytes */
196	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
197	memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
198	iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
199			    tb0_size);
200
201	/* map first command fragment, if any remains */
202	if (copy_size > tb0_size) {
203		phys_addr = dma_map_single(trans->dev,
204					   (u8 *)out_cmd + tb0_size,
205					   copy_size - tb0_size,
206					   DMA_TO_DEVICE);
207		if (dma_mapping_error(trans->dev, phys_addr)) {
208			idx = -ENOMEM;
209			iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
210			goto out;
211		}
212		iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
213				    copy_size - tb0_size);
214	}
215
216	/* map the remaining (adjusted) nocopy/dup fragments */
217	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
218		void *data = (void *)(uintptr_t)cmddata[i];
219
220		if (!cmdlen[i])
221			continue;
222		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
223					   IWL_HCMD_DFL_DUP)))
224			continue;
225		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
226			data = dup_buf;
227		phys_addr = dma_map_single(trans->dev, data,
228					   cmdlen[i], DMA_TO_DEVICE);
229		if (dma_mapping_error(trans->dev, phys_addr)) {
230			idx = -ENOMEM;
231			iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
232			goto out;
233		}
234		iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
235	}
236
237	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
238	out_meta->flags = cmd->flags;
239	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
240		kfree_sensitive(txq->entries[idx].free_buf);
241	txq->entries[idx].free_buf = dup_buf;
242
243	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
244
245	/* start timer if queue currently empty */
246	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
247		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
248
249	spin_lock(&trans_pcie->reg_lock);
250	/* Increment and update queue's write index */
251	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
252	iwl_txq_inc_wr_ptr(trans, txq);
253	spin_unlock(&trans_pcie->reg_lock);
254
255out:
256	spin_unlock_irqrestore(&txq->lock, flags);
257free_dup_buf:
258	if (idx < 0)
259		kfree(dup_buf);
260	return idx;
261}
262