1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2020 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <asm/byteorder.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
13#include <linux/errno.h>
14#include <linux/ethtool.h>
15#include <linux/if_ether.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/netdevice.h>
21#include <linux/pci.h>
22#include <linux/skbuff.h>
23
24#include "bnxt_hsi.h"
25#include "bnxt.h"
26#include "bnxt_hwrm.h"
27
28static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
29{
30	return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
31}
32
33/**
34 * __hwrm_req_init() - Initialize an HWRM request.
35 * @bp: The driver context.
36 * @req: A pointer to the request pointer to initialize.
37 * @req_type: The request type. This will be converted to the little endian
38 *	before being written to the req_type field of the returned request.
39 * @req_len: The length of the request to be allocated.
40 *
41 * Allocate DMA resources and initialize a new HWRM request object of the
42 * given type. The response address field in the request is configured with
43 * the DMA bus address that has been mapped for the response and the passed
44 * request is pointed to kernel virtual memory mapped for the request (such
45 * that short_input indirection can be accomplished without copying). The
46 * request���s target and completion ring are initialized to default values and
47 * can be overridden by writing to the returned request object directly.
48 *
49 * The initialized request can be further customized by writing to its fields
50 * directly, taking care to covert such fields to little endian. The request
51 * object will be consumed (and all its associated resources release) upon
52 * passing it to hwrm_req_send() unless ownership of the request has been
53 * claimed by the caller via a call to hwrm_req_hold(). If the request is not
54 * consumed, either because it is never sent or because ownership has been
55 * claimed, then it must be released by a call to hwrm_req_drop().
56 *
57 * Return: zero on success, negative error code otherwise:
58 *	E2BIG: the type of request pointer is too large to fit.
59 *	ENOMEM: an allocation failure occurred.
60 */
61int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
62{
63	struct bnxt_hwrm_ctx *ctx;
64	dma_addr_t dma_handle;
65	u8 *req_addr;
66
67	if (req_len > BNXT_HWRM_CTX_OFFSET)
68		return -E2BIG;
69
70	req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
71				  &dma_handle);
72	if (!req_addr)
73		return -ENOMEM;
74
75	ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
76	/* safety first, sentinel used to check for invalid requests */
77	ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
78	ctx->req_len = req_len;
79	ctx->req = (struct input *)req_addr;
80	ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
81	ctx->dma_handle = dma_handle;
82	ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
83	ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
84	ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
85	ctx->gfp = GFP_KERNEL;
86	ctx->slice_addr = NULL;
87
88	/* initialize common request fields */
89	ctx->req->req_type = cpu_to_le16(req_type);
90	ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
91	ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
92	ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
93	*req = ctx->req;
94
95	return 0;
96}
97
98static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
99{
100	void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
101	struct input *req = (struct input *)req_addr;
102	struct bnxt_hwrm_ctx *ctx = ctx_addr;
103	u64 sentinel;
104
105	if (!req) {
106		/* can only be due to software bug, be loud */
107		netdev_err(bp->dev, "null HWRM request");
108		dump_stack();
109		return NULL;
110	}
111
112	/* HWRM API has no type safety, verify sentinel to validate address */
113	sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
114	if (ctx->sentinel != sentinel) {
115		/* can only be due to software bug, be loud */
116		netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
117			   (u32)le16_to_cpu(req->req_type));
118		dump_stack();
119		return NULL;
120	}
121
122	return ctx;
123}
124
125/**
126 * hwrm_req_timeout() - Set the completion timeout for the request.
127 * @bp: The driver context.
128 * @req: The request to set the timeout.
129 * @timeout: The timeout in milliseconds.
130 *
131 * Set the timeout associated with the request for subsequent calls to
132 * hwrm_req_send(). Some requests are long running and require a different
133 * timeout than the default.
134 */
135void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
136{
137	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
138
139	if (ctx)
140		ctx->timeout = timeout;
141}
142
143/**
144 * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
145 * @bp: The driver context.
146 * @req: The request for which calls to hwrm_req_dma_slice() will have altered
147 *	allocation flags.
148 * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
149 *	whenever it is used to allocate backing memory for slices. Note that
150 *	calls to hwrm_req_dma_slice() will not always result in new allocations,
151 *	however, memory suballocated from the request buffer is already
152 *	__GFP_ZERO.
153 *
154 * Sets the GFP allocation flags associated with the request for subsequent
155 * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
156 * for slice allocations.
157 */
158void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
159{
160	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
161
162	if (ctx)
163		ctx->gfp = gfp;
164}
165
166/**
167 * hwrm_req_replace() - Replace request data.
168 * @bp: The driver context.
169 * @req: The request to modify. A call to hwrm_req_replace() is conceptually
170 *	an assignment of new_req to req. Subsequent calls to HWRM API functions,
171 *	such as hwrm_req_send(), should thus use req and not new_req (in fact,
172 *	calls to HWRM API functions will fail if non-managed request objects
173 *	are passed).
174 * @len: The length of new_req.
175 * @new_req: The pre-built request to copy or reference.
176 *
177 * Replaces the request data in req with that of new_req. This is useful in
178 * scenarios where a request object has already been constructed by a third
179 * party prior to creating a resource managed request using hwrm_req_init().
180 * Depending on the length, hwrm_req_replace() will either copy the new
181 * request data into the DMA memory allocated for req, or it will simply
182 * reference the new request and use it in lieu of req during subsequent
183 * calls to hwrm_req_send(). The resource management is associated with
184 * req and is independent of and does not apply to new_req. The caller must
185 * ensure that the lifetime of new_req is least as long as req. Any slices
186 * that may have been associated with the original request are released.
187 *
188 * Return: zero on success, negative error code otherwise:
189 *     E2BIG: Request is too large.
190 *     EINVAL: Invalid request to modify.
191 */
192int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
193{
194	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
195	struct input *internal_req = req;
196	u16 req_type;
197
198	if (!ctx)
199		return -EINVAL;
200
201	if (len > BNXT_HWRM_CTX_OFFSET)
202		return -E2BIG;
203
204	/* free any existing slices */
205	ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
206	if (ctx->slice_addr) {
207		dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
208				  ctx->slice_addr, ctx->slice_handle);
209		ctx->slice_addr = NULL;
210	}
211	ctx->gfp = GFP_KERNEL;
212
213	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
214		memcpy(internal_req, new_req, len);
215	} else {
216		internal_req->req_type = ((struct input *)new_req)->req_type;
217		ctx->req = new_req;
218	}
219
220	ctx->req_len = len;
221	ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
222					  BNXT_HWRM_RESP_OFFSET);
223
224	/* update sentinel for potentially new request type */
225	req_type = le16_to_cpu(internal_req->req_type);
226	ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
227
228	return 0;
229}
230
231/**
232 * hwrm_req_flags() - Set non internal flags of the ctx
233 * @bp: The driver context.
234 * @req: The request containing the HWRM command
235 * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
236 *
237 * ctx flags can be used by the callers to instruct how the subsequent
238 * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
239 * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
240 * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
241 * even if FW is not responding.
242 * This generic function can be used to set any flag that is not an internal flag
243 * of the HWRM module.
244 */
245void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
246{
247	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
248
249	if (ctx)
250		ctx->flags |= (flags & HWRM_API_FLAGS);
251}
252
253/**
254 * hwrm_req_hold() - Claim ownership of the request's resources.
255 * @bp: The driver context.
256 * @req: A pointer to the request to own. The request will no longer be
257 *	consumed by calls to hwrm_req_send().
258 *
259 * Take ownership of the request. Ownership places responsibility on the
260 * caller to free the resources associated with the request via a call to
261 * hwrm_req_drop(). The caller taking ownership implies that a subsequent
262 * call to hwrm_req_send() will not consume the request (ie. sending will
263 * not free the associated resources if the request is owned by the caller).
264 * Taking ownership returns a reference to the response. Retaining and
265 * accessing the response data is the most common reason to take ownership
266 * of the request. Ownership can also be acquired in order to reuse the same
267 * request object across multiple invocations of hwrm_req_send().
268 *
269 * Return: A pointer to the response object.
270 *
271 * The resources associated with the response will remain available to the
272 * caller until ownership of the request is relinquished via a call to
273 * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
274 * a valid request is provided. A returned NULL value would imply a driver
275 * bug and the implementation will complain loudly in the logs to aid in
276 * detection. It should not be necessary to check the result for NULL.
277 */
278void *hwrm_req_hold(struct bnxt *bp, void *req)
279{
280	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
281	struct input *input = (struct input *)req;
282
283	if (!ctx)
284		return NULL;
285
286	if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
287		/* can only be due to software bug, be loud */
288		netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
289			   (u32)le16_to_cpu(input->req_type));
290		dump_stack();
291		return NULL;
292	}
293
294	ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
295	return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
296}
297
298static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
299{
300	void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
301	dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
302
303	/* unmap any auxiliary DMA slice */
304	if (ctx->slice_addr)
305		dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
306				  ctx->slice_addr, ctx->slice_handle);
307
308	/* invalidate, ensure ownership, sentinel and dma_handle are cleared */
309	memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
310
311	/* return the buffer to the DMA pool */
312	if (dma_handle)
313		dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
314}
315
316/**
317 * hwrm_req_drop() - Release all resources associated with the request.
318 * @bp: The driver context.
319 * @req: The request to consume, releasing the associated resources. The
320 *	request object, any slices, and its associated response are no
321 *	longer valid.
322 *
323 * It is legal to call hwrm_req_drop() on an unowned request, provided it
324 * has not already been consumed by hwrm_req_send() (for example, to release
325 * an aborted request). A given request should not be dropped more than once,
326 * nor should it be dropped after having been consumed by hwrm_req_send(). To
327 * do so is an error (the context will not be found and a stack trace will be
328 * rendered in the kernel log).
329 */
330void hwrm_req_drop(struct bnxt *bp, void *req)
331{
332	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
333
334	if (ctx)
335		__hwrm_ctx_drop(bp, ctx);
336}
337
338static int __hwrm_to_stderr(u32 hwrm_err)
339{
340	switch (hwrm_err) {
341	case HWRM_ERR_CODE_SUCCESS:
342		return 0;
343	case HWRM_ERR_CODE_RESOURCE_LOCKED:
344		return -EROFS;
345	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
346		return -EACCES;
347	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
348		return -ENOSPC;
349	case HWRM_ERR_CODE_INVALID_PARAMS:
350	case HWRM_ERR_CODE_INVALID_FLAGS:
351	case HWRM_ERR_CODE_INVALID_ENABLES:
352	case HWRM_ERR_CODE_UNSUPPORTED_TLV:
353	case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
354		return -EINVAL;
355	case HWRM_ERR_CODE_NO_BUFFER:
356		return -ENOMEM;
357	case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
358	case HWRM_ERR_CODE_BUSY:
359		return -EAGAIN;
360	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
361		return -EOPNOTSUPP;
362	case HWRM_ERR_CODE_PF_UNAVAILABLE:
363		return -ENODEV;
364	default:
365		return -EIO;
366	}
367}
368
369static struct bnxt_hwrm_wait_token *
370__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
371{
372	struct bnxt_hwrm_wait_token *token;
373
374	token = kzalloc(sizeof(*token), GFP_KERNEL);
375	if (!token)
376		return NULL;
377
378	mutex_lock(&bp->hwrm_cmd_lock);
379
380	token->dst = dst;
381	token->state = BNXT_HWRM_PENDING;
382	if (dst == BNXT_HWRM_CHNL_CHIMP) {
383		token->seq_id = bp->hwrm_cmd_seq++;
384		hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
385	} else {
386		token->seq_id = bp->hwrm_cmd_kong_seq++;
387	}
388
389	return token;
390}
391
392static void
393__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
394{
395	if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
396		hlist_del_rcu(&token->node);
397		kfree_rcu(token, rcu);
398	} else {
399		kfree(token);
400	}
401	mutex_unlock(&bp->hwrm_cmd_lock);
402}
403
404void
405hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
406{
407	struct bnxt_hwrm_wait_token *token;
408
409	rcu_read_lock();
410	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
411		if (token->seq_id == seq_id) {
412			WRITE_ONCE(token->state, state);
413			rcu_read_unlock();
414			return;
415		}
416	}
417	rcu_read_unlock();
418	netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
419}
420
421static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
422{
423	u32 ring = le16_to_cpu(req->cmpl_ring);
424	u32 type = le16_to_cpu(req->req_type);
425	u32 tgt = le16_to_cpu(req->target_id);
426	u32 seq = le16_to_cpu(req->seq_id);
427	char opt[32] = "\n";
428
429	if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
430		snprintf(opt, 16, " ring %d\n", ring);
431
432	if (unlikely(tgt != BNXT_HWRM_TARGET))
433		snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
434
435	netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
436		   type, seq, opt);
437}
438
439#define hwrm_err(bp, ctx, fmt, ...)				       \
440	do {							       \
441		if ((ctx)->flags & BNXT_HWRM_CTX_SILENT)	       \
442			netdev_dbg((bp)->dev, fmt, __VA_ARGS__);       \
443		else						       \
444			netdev_err((bp)->dev, fmt, __VA_ARGS__);       \
445	} while (0)
446
447static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
448{
449	if (req_type == HWRM_VER_GET)
450		return false;
451
452	if (!bp->fw_health || !bp->fw_health->status_reliable)
453		return false;
454
455	*fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
456	return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
457}
458
459static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
460{
461	u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
462	enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
463	u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
464	struct bnxt_hwrm_wait_token *token = NULL;
465	struct hwrm_short_input short_input = {0};
466	u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
467	unsigned int i, timeout, tmo_count;
468	u32 *data = (u32 *)ctx->req;
469	u32 msg_len = ctx->req_len;
470	u32 req_type, sts;
471	int rc = -EBUSY;
472	u16 len = 0;
473	u8 *valid;
474
475	if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
476		memset(ctx->resp, 0, PAGE_SIZE);
477
478	req_type = le16_to_cpu(ctx->req->req_type);
479	if (BNXT_NO_FW_ACCESS(bp) &&
480	    (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
481		netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
482			   req_type);
483		goto exit;
484	}
485
486	if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
487	    msg_len > bp->hwrm_max_ext_req_len) {
488		netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x",
489			    req_type);
490		rc = -E2BIG;
491		goto exit;
492	}
493
494	if (bnxt_kong_hwrm_message(bp, ctx->req)) {
495		dst = BNXT_HWRM_CHNL_KONG;
496		bar_offset = BNXT_GRCPF_REG_KONG_COMM;
497		doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
498		if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
499			netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
500				   req_type);
501			rc = -EINVAL;
502			goto exit;
503		}
504	}
505
506	token = __hwrm_acquire_token(bp, dst);
507	if (!token) {
508		rc = -ENOMEM;
509		goto exit;
510	}
511	ctx->req->seq_id = cpu_to_le16(token->seq_id);
512
513	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
514	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
515		short_input.req_type = ctx->req->req_type;
516		short_input.signature =
517				cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
518		short_input.size = cpu_to_le16(msg_len);
519		short_input.req_addr = cpu_to_le64(ctx->dma_handle);
520
521		data = (u32 *)&short_input;
522		msg_len = sizeof(short_input);
523
524		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
525	}
526
527	/* Ensure any associated DMA buffers are written before doorbell */
528	wmb();
529
530	/* Write request msg to hwrm channel */
531	__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
532
533	for (i = msg_len; i < max_req_len; i += 4)
534		writel(0, bp->bar0 + bar_offset + i);
535
536	/* Ring channel doorbell */
537	writel(1, bp->bar0 + doorbell_offset);
538
539	hwrm_req_dbg(bp, ctx->req);
540
541	if (!pci_is_enabled(bp->pdev)) {
542		rc = -ENODEV;
543		goto exit;
544	}
545
546	/* Limit timeout to an upper limit */
547	timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
548	/* convert timeout to usec */
549	timeout *= 1000;
550
551	i = 0;
552	/* Short timeout for the first few iterations:
553	 * number of loops = number of loops for short timeout +
554	 * number of loops for standard timeout.
555	 */
556	tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
557	timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
558	tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
559
560	if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
561		/* Wait until hwrm response cmpl interrupt is processed */
562		while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
563		       i++ < tmo_count) {
564			/* Abort the wait for completion if the FW health
565			 * check has failed.
566			 */
567			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
568				goto exit;
569			/* on first few passes, just barely sleep */
570			if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
571				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
572					     HWRM_SHORT_MAX_TIMEOUT);
573			} else {
574				if (hwrm_wait_must_abort(bp, req_type, &sts)) {
575					hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
576						 req_type, sts);
577					goto exit;
578				}
579				usleep_range(HWRM_MIN_TIMEOUT,
580					     HWRM_MAX_TIMEOUT);
581			}
582		}
583
584		if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
585			hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
586				 req_type);
587			goto exit;
588		}
589		len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
590		valid = ((u8 *)ctx->resp) + len - 1;
591	} else {
592		__le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
593		int j;
594
595		/* Check if response len is updated */
596		for (i = 0; i < tmo_count; i++) {
597			/* Abort the wait for completion if the FW health
598			 * check has failed.
599			 */
600			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
601				goto exit;
602
603			if (token &&
604			    READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
605				__hwrm_release_token(bp, token);
606				token = NULL;
607			}
608
609			len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
610			if (len) {
611				__le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
612
613				if (resp_seq == ctx->req->seq_id)
614					break;
615				if (resp_seq != seen_out_of_seq) {
616					netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
617						    le16_to_cpu(resp_seq),
618						    req_type,
619						    le16_to_cpu(ctx->req->seq_id));
620					seen_out_of_seq = resp_seq;
621				}
622			}
623
624			/* on first few passes, just barely sleep */
625			if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
626				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
627					     HWRM_SHORT_MAX_TIMEOUT);
628			} else {
629				if (hwrm_wait_must_abort(bp, req_type, &sts)) {
630					hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
631						 req_type,
632						 le16_to_cpu(ctx->req->seq_id),
633						 len, sts);
634					goto exit;
635				}
636				usleep_range(HWRM_MIN_TIMEOUT,
637					     HWRM_MAX_TIMEOUT);
638			}
639		}
640
641		if (i >= tmo_count) {
642			hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
643				 hwrm_total_timeout(i), req_type,
644				 le16_to_cpu(ctx->req->seq_id), len);
645			goto exit;
646		}
647
648		/* Last byte of resp contains valid bit */
649		valid = ((u8 *)ctx->resp) + len - 1;
650		for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
651			/* make sure we read from updated DMA memory */
652			dma_rmb();
653			if (*valid)
654				break;
655			if (j < 10) {
656				udelay(1);
657				j++;
658			} else {
659				usleep_range(20, 30);
660				j += 20;
661			}
662		}
663
664		if (j >= HWRM_VALID_BIT_DELAY_USEC) {
665			hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
666				 hwrm_total_timeout(i) + j, req_type,
667				 le16_to_cpu(ctx->req->seq_id), len, *valid);
668			goto exit;
669		}
670	}
671
672	/* Zero valid bit for compatibility.  Valid bit in an older spec
673	 * may become a new field in a newer spec.  We must make sure that
674	 * a new field not implemented by old spec will read zero.
675	 */
676	*valid = 0;
677	rc = le16_to_cpu(ctx->resp->error_code);
678	if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
679		netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
680			    req_type);
681	else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
682		hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
683			 req_type, token->seq_id, rc);
684	rc = __hwrm_to_stderr(rc);
685exit:
686	if (token)
687		__hwrm_release_token(bp, token);
688	if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
689		ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
690	else
691		__hwrm_ctx_drop(bp, ctx);
692	return rc;
693}
694
695/**
696 * hwrm_req_send() - Execute an HWRM command.
697 * @bp: The driver context.
698 * @req: A pointer to the request to send. The DMA resources associated with
699 *	the request will be released (ie. the request will be consumed) unless
700 *	ownership of the request has been assumed by the caller via a call to
701 *	hwrm_req_hold().
702 *
703 * Send an HWRM request to the device and wait for a response. The request is
704 * consumed if it is not owned by the caller. This function will block until
705 * the request has either completed or times out due to an error.
706 *
707 * Return: A result code.
708 *
709 * The result is zero on success, otherwise the negative error code indicates
710 * one of the following errors:
711 *	E2BIG: The request was too large.
712 *	EBUSY: The firmware is in a fatal state or the request timed out
713 *	EACCESS: HWRM access denied.
714 *	ENOSPC: HWRM resource allocation error.
715 *	EINVAL: Request parameters are invalid.
716 *	ENOMEM: HWRM has no buffers.
717 *	EAGAIN: HWRM busy or reset in progress.
718 *	EOPNOTSUPP: Invalid request type.
719 *	EIO: Any other error.
720 * Error handling is orthogonal to request ownership. An unowned request will
721 * still be consumed on error. If the caller owns the request, then the caller
722 * is responsible for releasing the resources. Otherwise, hwrm_req_send() will
723 * always consume the request.
724 */
725int hwrm_req_send(struct bnxt *bp, void *req)
726{
727	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
728
729	if (!ctx)
730		return -EINVAL;
731
732	return __hwrm_send(bp, ctx);
733}
734
735/**
736 * hwrm_req_send_silent() - A silent version of hwrm_req_send().
737 * @bp: The driver context.
738 * @req: The request to send without logging.
739 *
740 * The same as hwrm_req_send(), except that the request is silenced using
741 * hwrm_req_silence() prior the call. This version of the function is
742 * provided solely to preserve the legacy API���s flavor for this functionality.
743 *
744 * Return: A result code, see hwrm_req_send().
745 */
746int hwrm_req_send_silent(struct bnxt *bp, void *req)
747{
748	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
749	return hwrm_req_send(bp, req);
750}
751
752/**
753 * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
754 * @bp: The driver context.
755 * @req: The request for which indirect data will be associated.
756 * @size: The size of the allocation.
757 * @dma_handle: The bus address associated with the allocation. The HWRM API has
758 *	no knowledge about the type of the request and so cannot infer how the
759 *	caller intends to use the indirect data. Thus, the caller is
760 *	responsible for configuring the request object appropriately to
761 *	point to the associated indirect memory. Note, DMA handle has the
762 *	same definition as it does in dma_alloc_coherent(), the caller is
763 *	responsible for endian conversions via cpu_to_le64() before assigning
764 *	this address.
765 *
766 * Allocates DMA mapped memory for indirect data related to a request. The
767 * lifetime of the DMA resources will be bound to that of the request (ie.
768 * they will be automatically released when the request is either consumed by
769 * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
770 * efficiently suballocated out of the request buffer space, hence the name
771 * slice, while larger requests are satisfied via an underlying call to
772 * dma_alloc_coherent(). Multiple suballocations are supported, however, only
773 * one externally mapped region is.
774 *
775 * Return: The kernel virtual address of the DMA mapping.
776 */
777void *
778hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
779{
780	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
781	u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
782	struct input *input = req;
783	u8 *addr, *req_addr = req;
784	u32 max_offset, offset;
785
786	if (!ctx)
787		return NULL;
788
789	max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
790	offset = max_offset - size;
791	offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
792	addr = req_addr + offset;
793
794	if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
795		ctx->allocated = end - addr;
796		*dma_handle = ctx->dma_handle + offset;
797		return addr;
798	}
799
800	/* could not suballocate from ctx buffer, try create a new mapping */
801	if (ctx->slice_addr) {
802		/* if one exists, can only be due to software bug, be loud */
803		netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
804			   (u32)le16_to_cpu(input->req_type));
805		dump_stack();
806		return NULL;
807	}
808
809	addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
810
811	if (!addr)
812		return NULL;
813
814	ctx->slice_addr = addr;
815	ctx->slice_size = size;
816	ctx->slice_handle = *dma_handle;
817
818	return addr;
819}
820