1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5 */
6#ifndef _VNIC_WQ_COPY_H_
7#define _VNIC_WQ_COPY_H_
8
9#include <linux/pci.h>
10#include "vnic_wq.h"
11#include "fcpio.h"
12
13#define	VNIC_WQ_COPY_MAX 1
14
15struct vnic_wq_copy {
16	unsigned int index;
17	struct vnic_dev *vdev;
18	struct vnic_wq_ctrl __iomem *ctrl;	/* memory-mapped */
19	struct vnic_dev_ring ring;
20	unsigned to_use_index;
21	unsigned to_clean_index;
22};
23
24static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
25{
26	return wq->ring.desc_avail;
27}
28
29static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
30{
31	return wq->ring.desc_count - 1 - wq->ring.desc_avail;
32}
33
34static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
35{
36	struct fcpio_host_req *desc = wq->ring.descs;
37	return &desc[wq->to_use_index];
38}
39
40static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
41{
42
43	((wq->to_use_index + 1) == wq->ring.desc_count) ?
44		(wq->to_use_index = 0) : (wq->to_use_index++);
45	wq->ring.desc_avail--;
46
47	/* Adding write memory barrier prevents compiler and/or CPU
48	 * reordering, thus avoiding descriptor posting before
49	 * descriptor is initialized. Otherwise, hardware can read
50	 * stale descriptor fields.
51	 */
52	wmb();
53
54	iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
55}
56
57static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
58{
59	unsigned int cnt;
60
61	if (wq->to_clean_index <= index)
62		cnt = (index - wq->to_clean_index) + 1;
63	else
64		cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
65
66	wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
67	wq->ring.desc_avail += cnt;
68
69}
70
71static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
72	u16 completed_index,
73	void (*q_service)(struct vnic_wq_copy *wq,
74	struct fcpio_host_req *wq_desc))
75{
76	struct fcpio_host_req *wq_desc = wq->ring.descs;
77	unsigned int curr_index;
78
79	while (1) {
80
81		if (q_service)
82			(*q_service)(wq, &wq_desc[wq->to_clean_index]);
83
84		wq->ring.desc_avail++;
85
86		curr_index = wq->to_clean_index;
87
88		/* increment the to-clean index so that we start
89		 * with an unprocessed index next time we enter the loop
90		 */
91		((wq->to_clean_index + 1) == wq->ring.desc_count) ?
92			(wq->to_clean_index = 0) : (wq->to_clean_index++);
93
94		if (curr_index == completed_index)
95			break;
96
97		/* we have cleaned all the entries */
98		if ((completed_index == (u16)-1) &&
99		    (wq->to_clean_index == wq->to_use_index))
100			break;
101	}
102}
103
104void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
105int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
106void vnic_wq_copy_free(struct vnic_wq_copy *wq);
107int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
108	unsigned int index, unsigned int desc_count, unsigned int desc_size);
109void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
110	unsigned int error_interrupt_enable,
111	unsigned int error_interrupt_offset);
112void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
113	void (*q_clean)(struct vnic_wq_copy *wq,
114	struct fcpio_host_req *wq_desc));
115
116#endif /* _VNIC_WQ_COPY_H_ */
117