acpi_pci_layerscape_gen4.c revision 1.5
1/* $NetBSD: acpi_pci_layerscape_gen4.c,v 1.5 2022/10/15 11:07:38 jmcneill Exp $ */
2
3/*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill@invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * NXP Layerscape PCIe Gen4 controller (not ECAM compliant)
34 */
35
36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: acpi_pci_layerscape_gen4.c,v 1.5 2022/10/15 11:07:38 jmcneill Exp $");
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/device.h>
42#include <sys/intr.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/kmem.h>
46#include <sys/mutex.h>
47#include <sys/cpu.h>
48
49#include <dev/pci/pcireg.h>
50#include <dev/pci/pcivar.h>
51#include <dev/pci/pciconf.h>
52
53#include <dev/acpi/acpivar.h>
54#include <dev/acpi/acpi_pci.h>
55#include <dev/acpi/acpi_mcfg.h>
56
57#include <arm/acpi/acpi_pci_machdep.h>
58
59#define	PAB_CTRL			0x808
60#define	 PAB_CTRL_PAGE_SEL		__BITS(18,13)
61#define	PAB_AXI_AMAP_PEX_WIN_L(x)	(0xba8 + 0x10 * (x))
62#define	PAB_AXI_AMAP_PEX_WIN_H(x)	(0xbac + 0x10 * (x))
63#define	INDIRECT_ADDR_BOUNDARY		0xc00
64
65#define	LUT_BASE			0x80000
66#define	LUT_GCR				0x28
67#define	LUT_GCR_RRE			__BIT(0)
68
69#define	REG_TO_PAGE_INDEX(reg)	(((reg) >> 10) & 0x3ff)
70#define	REG_TO_PAGE_ADDR(reg)	(((reg) & 0x3ff) | INDIRECT_ADDR_BOUNDARY)
71
72#define	PAB_TARGET_BUS(b)		((b) << 24)
73#define	PAB_TARGET_DEV(d)		((d) << 19)
74#define	PAB_TARGET_FUNC(f)		((f) << 16)
75
76struct acpi_pci_layerscape_gen4 {
77	bus_space_tag_t bst;
78	bus_space_handle_t bsh;
79	bus_space_handle_t win_bsh;
80	uint8_t rev;
81	kmutex_t lock;
82};
83
84static void
85acpi_pci_layerscape_gen4_ccsr_setpage(struct acpi_pci_layerscape_gen4 *pcie, u_int page_index)
86{
87	uint32_t val;
88
89	val = bus_space_read_4(pcie->bst, pcie->bsh, PAB_CTRL);
90	val &= ~PAB_CTRL_PAGE_SEL;
91	val |= __SHIFTIN(page_index, PAB_CTRL_PAGE_SEL);
92	bus_space_write_4(pcie->bst, pcie->bsh, PAB_CTRL, val);
93}
94
95static uint32_t
96acpi_pci_layerscape_gen4_ccsr_read4(struct acpi_pci_layerscape_gen4 *pcie, bus_size_t reg)
97{
98	const bool indirect = reg >= INDIRECT_ADDR_BOUNDARY;
99	const u_int page_index = indirect ? REG_TO_PAGE_INDEX(reg) : 0;
100	const bus_size_t page_addr = indirect ? REG_TO_PAGE_ADDR(reg) : reg;
101
102	acpi_pci_layerscape_gen4_ccsr_setpage(pcie, page_index);
103	return bus_space_read_4(pcie->bst, pcie->bsh, page_addr);
104}
105
106static void
107acpi_pci_layerscape_gen4_ccsr_write4(struct acpi_pci_layerscape_gen4 *pcie,
108    bus_size_t reg, pcireg_t data)
109{
110	const bool indirect = reg >= INDIRECT_ADDR_BOUNDARY;
111	const u_int page_index = indirect ? REG_TO_PAGE_INDEX(reg) : 0;
112	const bus_size_t page_addr = indirect ? REG_TO_PAGE_ADDR(reg) : reg;
113
114	acpi_pci_layerscape_gen4_ccsr_setpage(pcie, page_index);
115	bus_space_write_4(pcie->bst, pcie->bsh, page_addr, data);
116}
117
118static void
119acpi_pci_layerscape_gen4_select_target(struct acpi_pci_layerscape_gen4 *pcie,
120    pci_chipset_tag_t pc, pcitag_t tag)
121{
122	int b, d, f;
123
124	pci_decompose_tag(pc, tag, &b, &d, &f);
125
126	const uint32_t target = PAB_TARGET_BUS(b) |
127	    PAB_TARGET_DEV(d) | PAB_TARGET_FUNC(f);
128
129	acpi_pci_layerscape_gen4_ccsr_write4(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target);
130	acpi_pci_layerscape_gen4_ccsr_write4(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0);
131}
132
133static bool
134acpi_pci_layerscape_gen4_is_tag_okay(pci_chipset_tag_t pc, pcitag_t tag, int reg)
135{
136	struct acpi_pci_context *ap = pc->pc_conf_v;
137	int b, d, f;
138
139	pci_decompose_tag(pc, tag, &b, &d, &f);
140
141	if (b <= ap->ap_bus + 1 && d > 0)
142		return false;
143
144	if (b != ap->ap_bus)
145		return acpimcfg_conf_valid(pc, tag, reg);
146
147	return true;
148}
149
150static int
151acpi_pci_layerscape_gen4_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t *data)
152{
153	struct acpi_pci_context *ap = pc->pc_conf_v;
154	struct acpi_pci_layerscape_gen4 *pcie = ap->ap_conf_priv;
155	int b, d, f;
156
157	pci_decompose_tag(pc, tag, &b, &d, &f);
158
159	if (!acpi_pci_layerscape_gen4_is_tag_okay(pc, tag, reg)) {
160		*data = -1;
161		return EINVAL;
162	}
163
164	mutex_enter(&pcie->lock);
165
166	if (pcie->rev == 0x10 && reg == PCI_ID_REG)
167		bus_space_write_4(pcie->bst, pcie->bsh, LUT_BASE + LUT_GCR, 0);
168
169	if (b == ap->ap_bus) {
170		*data = acpi_pci_layerscape_gen4_ccsr_read4(pcie, reg);
171	} else {
172		acpi_pci_layerscape_gen4_select_target(pcie, pc, tag);
173		*data = bus_space_read_4(pcie->bst, pcie->win_bsh, reg);
174	}
175
176	if (pcie->rev == 0x10 && reg == PCI_ID_REG)
177		bus_space_write_4(pcie->bst, pcie->bsh, LUT_BASE + LUT_GCR, LUT_GCR_RRE);
178
179	mutex_exit(&pcie->lock);
180
181	return 0;
182}
183
184static int
185acpi_pci_layerscape_gen4_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data)
186{
187	struct acpi_pci_context *ap = pc->pc_conf_v;
188	struct acpi_pci_layerscape_gen4 *pcie = ap->ap_conf_priv;
189	int b, d, f;
190
191	pci_decompose_tag(pc, tag, &b, &d, &f);
192
193	if (!acpi_pci_layerscape_gen4_is_tag_okay(pc, tag, reg))
194		return EINVAL;
195
196	mutex_enter(&pcie->lock);
197
198	if (b == ap->ap_bus) {
199		acpi_pci_layerscape_gen4_ccsr_write4(pcie, reg, data);
200	} else {
201		acpi_pci_layerscape_gen4_select_target(pcie, pc, tag);
202		bus_space_write_4(pcie->bst, pcie->win_bsh, reg, data);
203	}
204
205	mutex_exit(&pcie->lock);
206
207	return 0;
208}
209
210static UINT64
211acpi_pci_layerscape_win_base(ACPI_INTEGER seg)
212{
213	ACPI_TABLE_MCFG *mcfg;
214	ACPI_MCFG_ALLOCATION *ama;
215	ACPI_STATUS rv;
216	uint32_t off;
217	int i;
218
219	rv = AcpiGetTable(ACPI_SIG_MCFG, 0, (ACPI_TABLE_HEADER **)&mcfg);
220	if (ACPI_FAILURE(rv))
221		return 0;
222
223	off = sizeof(ACPI_TABLE_MCFG);
224	ama = ACPI_ADD_PTR(ACPI_MCFG_ALLOCATION, mcfg, off);
225	for (i = 0; off + sizeof(ACPI_MCFG_ALLOCATION) <= mcfg->Header.Length; i++) {
226		if (ama->PciSegment == seg)
227			return ama->Address;
228		off += sizeof(ACPI_MCFG_ALLOCATION);
229		ama = ACPI_ADD_PTR(ACPI_MCFG_ALLOCATION, mcfg, off);
230	}
231
232	return 0;	/* not found */
233}
234
235static ACPI_STATUS
236acpi_pci_layerscape_gen4_map(ACPI_HANDLE handle, UINT32 level, void *ctx, void **retval)
237{
238	struct acpi_pci_context *ap = ctx;
239	struct acpi_resources res;
240	struct acpi_mem *mem;
241	struct acpi_pci_layerscape_gen4 *pcie;
242	bus_space_handle_t bsh;
243	ACPI_HANDLE parent;
244	ACPI_INTEGER seg;
245	ACPI_STATUS rv;
246	UINT64 win_base;
247	int error;
248
249	rv = AcpiGetParent(handle, &parent);
250	if (ACPI_FAILURE(rv))
251		return rv;
252	rv = acpi_eval_integer(parent, "_SEG", &seg);
253	if (ACPI_FAILURE(rv))
254		seg = 0;
255	if (ap->ap_seg != seg)
256		return AE_OK;
257
258	rv = acpi_resource_parse(ap->ap_dev, handle, "_CRS", &res, &acpi_resource_parse_ops_quiet);
259	if (ACPI_FAILURE(rv))
260		return rv;
261
262	mem = acpi_res_mem(&res, 0);
263	if (mem == NULL) {
264		acpi_resource_cleanup(&res);
265		return AE_NOT_FOUND;
266	}
267
268	win_base = acpi_pci_layerscape_win_base(seg);
269	if (win_base == 0) {
270		aprint_error_dev(ap->ap_dev, "couldn't find MCFG entry for segment %ld\n", seg);
271		return AE_NOT_FOUND;
272	}
273
274	error = bus_space_map(ap->ap_bst, mem->ar_base, mem->ar_length,
275	    BUS_SPACE_MAP_NONPOSTED, &bsh);
276	if (error != 0)
277		return AE_NO_MEMORY;
278
279	pcie = kmem_alloc(sizeof(*pcie), KM_SLEEP);
280	pcie->bst = ap->ap_bst;
281	pcie->bsh = bsh;
282	mutex_init(&pcie->lock, MUTEX_DEFAULT, IPL_HIGH);
283
284	error = bus_space_map(ap->ap_bst, win_base, PCI_EXTCONF_SIZE,
285	    BUS_SPACE_MAP_NONPOSTED, &pcie->win_bsh);
286	if (error != 0)
287		return AE_NO_MEMORY;
288
289	const pcireg_t cr = bus_space_read_4(pcie->bst, pcie->bsh, PCI_CLASS_REG);
290	pcie->rev = PCI_REVISION(cr);
291
292	ap->ap_conf_read = acpi_pci_layerscape_gen4_conf_read;
293	ap->ap_conf_write = acpi_pci_layerscape_gen4_conf_write;
294	ap->ap_conf_priv = pcie;
295
296	aprint_verbose_dev(ap->ap_dev,
297	    "PCIe segment %lu: Layerscape Gen4 rev. %#x found at %#lx-%#lx\n",
298	    seg, pcie->rev, mem->ar_base, mem->ar_base + mem->ar_length - 1);
299
300	return AE_CTRL_TERMINATE;
301}
302
303void
304acpi_pci_layerscape_gen4_init(struct acpi_pci_context *ap)
305{
306	ACPI_STATUS rv;
307
308	rv = AcpiGetDevices(__UNCONST("NXP0016"), acpi_pci_layerscape_gen4_map, ap, NULL);
309	if (ACPI_FAILURE(rv))
310		return;
311}
312