1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014 IBM Corp.
4 */
5
6#include <linux/pci.h>
7#include <misc/cxl.h>
8#include "cxl.h"
9
10static int cxl_pci_probe_mode(struct pci_bus *bus)
11{
12	return PCI_PROBE_NORMAL;
13}
14
15static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
16{
17	return -ENODEV;
18}
19
20static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
21{
22	/*
23	 * MSI should never be set but need still need to provide this call
24	 * back.
25	 */
26}
27
28static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
29{
30	struct pci_controller *phb;
31	struct cxl_afu *afu;
32	struct cxl_context *ctx;
33
34	phb = pci_bus_to_host(dev->bus);
35	afu = (struct cxl_afu *)phb->private_data;
36
37	if (!cxl_ops->link_ok(afu->adapter, afu)) {
38		dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
39		return false;
40	}
41
42	dev->dev.archdata.dma_offset = PAGE_OFFSET;
43
44	/*
45	 * Allocate a context to do cxl things too.  If we eventually do real
46	 * DMA ops, we'll need a default context to attach them to
47	 */
48	ctx = cxl_dev_context_init(dev);
49	if (IS_ERR(ctx))
50		return false;
51	dev->dev.archdata.cxl_ctx = ctx;
52
53	return (cxl_ops->afu_check_and_enable(afu) == 0);
54}
55
56static void cxl_pci_disable_device(struct pci_dev *dev)
57{
58	struct cxl_context *ctx = cxl_get_context(dev);
59
60	if (ctx) {
61		if (ctx->status == STARTED) {
62			dev_err(&dev->dev, "Default context started\n");
63			return;
64		}
65		dev->dev.archdata.cxl_ctx = NULL;
66		cxl_release_context(ctx);
67	}
68}
69
70static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
71{
72	/* Should we do an AFU reset here ? */
73}
74
75static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
76{
77	return (bus << 8) + devfn;
78}
79
80static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
81{
82	struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
83
84	return phb ? phb->private_data : NULL;
85}
86
87static void cxl_afu_configured_put(struct cxl_afu *afu)
88{
89	atomic_dec_if_positive(&afu->configured_state);
90}
91
92static bool cxl_afu_configured_get(struct cxl_afu *afu)
93{
94	return atomic_inc_unless_negative(&afu->configured_state);
95}
96
97static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
98				       struct cxl_afu *afu, int *_record)
99{
100	int record;
101
102	record = cxl_pcie_cfg_record(bus->number, devfn);
103	if (record > afu->crs_num)
104		return PCIBIOS_DEVICE_NOT_FOUND;
105
106	*_record = record;
107	return 0;
108}
109
110static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
111				int offset, int len, u32 *val)
112{
113	int rc, record;
114	struct cxl_afu *afu;
115	u8 val8;
116	u16 val16;
117	u32 val32;
118
119	afu = pci_bus_to_afu(bus);
120	/* Grab a reader lock on afu. */
121	if (afu == NULL || !cxl_afu_configured_get(afu))
122		return PCIBIOS_DEVICE_NOT_FOUND;
123
124	rc = cxl_pcie_config_info(bus, devfn, afu, &record);
125	if (rc)
126		goto out;
127
128	switch (len) {
129	case 1:
130		rc = cxl_ops->afu_cr_read8(afu, record, offset,	&val8);
131		*val = val8;
132		break;
133	case 2:
134		rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
135		*val = val16;
136		break;
137	case 4:
138		rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
139		*val = val32;
140		break;
141	default:
142		WARN_ON(1);
143	}
144
145out:
146	cxl_afu_configured_put(afu);
147	return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
148}
149
150static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
151				 int offset, int len, u32 val)
152{
153	int rc, record;
154	struct cxl_afu *afu;
155
156	afu = pci_bus_to_afu(bus);
157	/* Grab a reader lock on afu. */
158	if (afu == NULL || !cxl_afu_configured_get(afu))
159		return PCIBIOS_DEVICE_NOT_FOUND;
160
161	rc = cxl_pcie_config_info(bus, devfn, afu, &record);
162	if (rc)
163		goto out;
164
165	switch (len) {
166	case 1:
167		rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
168		break;
169	case 2:
170		rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
171		break;
172	case 4:
173		rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
174		break;
175	default:
176		WARN_ON(1);
177	}
178
179out:
180	cxl_afu_configured_put(afu);
181	return rc ? PCIBIOS_SET_FAILED : 0;
182}
183
184static struct pci_ops cxl_pcie_pci_ops =
185{
186	.read = cxl_pcie_read_config,
187	.write = cxl_pcie_write_config,
188};
189
190
191static struct pci_controller_ops cxl_pci_controller_ops =
192{
193	.probe_mode = cxl_pci_probe_mode,
194	.enable_device_hook = cxl_pci_enable_device_hook,
195	.disable_device = cxl_pci_disable_device,
196	.release_device = cxl_pci_disable_device,
197	.reset_secondary_bus = cxl_pci_reset_secondary_bus,
198	.setup_msi_irqs = cxl_setup_msi_irqs,
199	.teardown_msi_irqs = cxl_teardown_msi_irqs,
200};
201
202int cxl_pci_vphb_add(struct cxl_afu *afu)
203{
204	struct pci_controller *phb;
205	struct device_node *vphb_dn;
206	struct device *parent;
207
208	/*
209	 * If there are no AFU configuration records we won't have anything to
210	 * expose under the vPHB, so skip creating one, returning success since
211	 * this is still a valid case. This will also opt us out of EEH
212	 * handling since we won't have anything special to do if there are no
213	 * kernel drivers attached to the vPHB, and EEH handling is not yet
214	 * supported in the peer model.
215	 */
216	if (!afu->crs_num)
217		return 0;
218
219	/* The parent device is the adapter. Reuse the device node of
220	 * the adapter.
221	 * We don't seem to care what device node is used for the vPHB,
222	 * but tools such as lsvpd walk up the device parents looking
223	 * for a valid location code, so we might as well show devices
224	 * attached to the adapter as being located on that adapter.
225	 */
226	parent = afu->adapter->dev.parent;
227	vphb_dn = parent->of_node;
228
229	/* Alloc and setup PHB data structure */
230	phb = pcibios_alloc_controller(vphb_dn);
231	if (!phb)
232		return -ENODEV;
233
234	/* Setup parent in sysfs */
235	phb->parent = parent;
236
237	/* Setup the PHB using arch provided callback */
238	phb->ops = &cxl_pcie_pci_ops;
239	phb->cfg_addr = NULL;
240	phb->cfg_data = NULL;
241	phb->private_data = afu;
242	phb->controller_ops = cxl_pci_controller_ops;
243
244	/* Scan the bus */
245	pcibios_scan_phb(phb);
246	if (phb->bus == NULL)
247		return -ENXIO;
248
249	/* Set release hook on root bus */
250	pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
251				    pcibios_free_controller_deferred,
252				    (void *) phb);
253
254	/* Claim resources. This might need some rework as well depending
255	 * whether we are doing probe-only or not, like assigning unassigned
256	 * resources etc...
257	 */
258	pcibios_claim_one_bus(phb->bus);
259
260	/* Add probed PCI devices to the device model */
261	pci_bus_add_devices(phb->bus);
262
263	afu->phb = phb;
264
265	return 0;
266}
267
268void cxl_pci_vphb_remove(struct cxl_afu *afu)
269{
270	struct pci_controller *phb;
271
272	/* If there is no configuration record we won't have one of these */
273	if (!afu || !afu->phb)
274		return;
275
276	phb = afu->phb;
277	afu->phb = NULL;
278
279	pci_remove_root_bus(phb->bus);
280	/*
281	 * We don't free phb here - that's handled by
282	 * pcibios_free_controller_deferred()
283	 */
284}
285
286bool cxl_pci_is_vphb_device(struct pci_dev *dev)
287{
288	struct pci_controller *phb;
289
290	phb = pci_bus_to_host(dev->bus);
291
292	return (phb->ops == &cxl_pcie_pci_ops);
293}
294
295struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
296{
297	struct pci_controller *phb;
298
299	phb = pci_bus_to_host(dev->bus);
300
301	return (struct cxl_afu *)phb->private_data;
302}
303EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
304
305unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
306{
307	return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
308}
309EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
310