pl310.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
5 * Copyright (c) 2011
6 *	Ben Gray <ben.r.gray@gmail.com>.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. The name of the company nor the name of the author may be used to
18 *    endorse or promote products derived from this software without specific
19 *    prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
30 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/arm/arm/pl310.c 330897 2018-03-14 03:19:51Z eadler $");
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/kernel.h>
39#include <sys/rman.h>
40#include <sys/module.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <machine/intr.h>
44
45#include <machine/bus.h>
46#include <machine/pl310.h>
47
48#include <dev/fdt/fdt_common.h>
49#include <dev/ofw/openfirm.h>
50#include <dev/ofw/ofw_bus.h>
51#include <dev/ofw/ofw_bus_subr.h>
52
53/*
54 * Define this if you need to disable PL310 for debugging purpose
55 * Spec:
56 * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0246e/DDI0246E_l2c310_r3p1_trm.pdf
57 */
58
59/*
60 * Hardcode errata for now
61 * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246b/pr01s02s02.html
62 */
63#define	PL310_ERRATA_588369
64#define	PL310_ERRATA_753970
65#define	PL310_ERRATA_727915
66
67#define	PL310_LOCK(sc) do {		\
68	mtx_lock_spin(&(sc)->sc_mtx);	\
69} while(0);
70
71#define	PL310_UNLOCK(sc) do {		\
72	mtx_unlock_spin(&(sc)->sc_mtx);	\
73} while(0);
74
75static int pl310_enabled = 1;
76TUNABLE_INT("hw.pl310.enabled", &pl310_enabled);
77
78static uint32_t g_l2cache_way_mask;
79
80static const uint32_t g_l2cache_line_size = 32;
81static const uint32_t g_l2cache_align_mask = (32 - 1);
82
83static uint32_t g_l2cache_size;
84static uint32_t g_way_size;
85static uint32_t g_ways_assoc;
86
87static struct pl310_softc *pl310_softc;
88
89static struct ofw_compat_data compat_data[] = {
90	{"arm,pl310",		true}, /* Non-standard, FreeBSD. */
91	{"arm,pl310-cache",	true},
92	{NULL,			false}
93};
94
95static void
96pl310_print_config(struct pl310_softc *sc)
97{
98	uint32_t aux, prefetch;
99	const char *dis = "disabled";
100	const char *ena = "enabled";
101
102	aux = pl310_read4(sc, PL310_AUX_CTRL);
103	prefetch = pl310_read4(sc, PL310_PREFETCH_CTRL);
104
105	device_printf(sc->sc_dev, "Early BRESP response: %s\n",
106		(aux & AUX_CTRL_EARLY_BRESP) ? ena : dis);
107	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
108		(aux & AUX_CTRL_INSTR_PREFETCH) ? ena : dis);
109	device_printf(sc->sc_dev, "Data prefetch: %s\n",
110		(aux & AUX_CTRL_DATA_PREFETCH) ? ena : dis);
111	device_printf(sc->sc_dev, "Non-secure interrupt control: %s\n",
112		(aux & AUX_CTRL_NS_INT_CTRL) ? ena : dis);
113	device_printf(sc->sc_dev, "Non-secure lockdown: %s\n",
114		(aux & AUX_CTRL_NS_LOCKDOWN) ? ena : dis);
115	device_printf(sc->sc_dev, "Share override: %s\n",
116		(aux & AUX_CTRL_SHARE_OVERRIDE) ? ena : dis);
117
118	device_printf(sc->sc_dev, "Double linefill: %s\n",
119		(prefetch & PREFETCH_CTRL_DL) ? ena : dis);
120	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
121		(prefetch & PREFETCH_CTRL_INSTR_PREFETCH) ? ena : dis);
122	device_printf(sc->sc_dev, "Data prefetch: %s\n",
123		(prefetch & PREFETCH_CTRL_DATA_PREFETCH) ? ena : dis);
124	device_printf(sc->sc_dev, "Double linefill on WRAP request: %s\n",
125		(prefetch & PREFETCH_CTRL_DL_ON_WRAP) ? ena : dis);
126	device_printf(sc->sc_dev, "Prefetch drop: %s\n",
127		(prefetch & PREFETCH_CTRL_PREFETCH_DROP) ? ena : dis);
128	device_printf(sc->sc_dev, "Incr double Linefill: %s\n",
129		(prefetch & PREFETCH_CTRL_INCR_DL) ? ena : dis);
130	device_printf(sc->sc_dev, "Not same ID on exclusive sequence: %s\n",
131		(prefetch & PREFETCH_CTRL_NOTSAMEID) ? ena : dis);
132	device_printf(sc->sc_dev, "Prefetch offset: %d\n",
133		(prefetch & PREFETCH_CTRL_OFFSET_MASK));
134}
135
136void
137pl310_set_ram_latency(struct pl310_softc *sc, uint32_t which_reg,
138   uint32_t read, uint32_t write, uint32_t setup)
139{
140	uint32_t v;
141
142	KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
143	    which_reg == PL310_DATA_RAM_CTRL,
144	    ("bad pl310 ram latency register address"));
145
146	v = pl310_read4(sc, which_reg);
147	if (setup != 0) {
148		KASSERT(setup <= 8, ("bad pl310 setup latency: %d", setup));
149		v &= ~RAM_CTRL_SETUP_MASK;
150		v |= (setup - 1) << RAM_CTRL_SETUP_SHIFT;
151	}
152	if (read != 0) {
153		KASSERT(read <= 8, ("bad pl310 read latency: %d", read));
154		v &= ~RAM_CTRL_READ_MASK;
155		v |= (read - 1) << RAM_CTRL_READ_SHIFT;
156	}
157	if (write != 0) {
158		KASSERT(write <= 8, ("bad pl310 write latency: %d", write));
159		v &= ~RAM_CTRL_WRITE_MASK;
160		v |= (write - 1) << RAM_CTRL_WRITE_SHIFT;
161	}
162	pl310_write4(sc, which_reg, v);
163}
164
165static int
166pl310_filter(void *arg)
167{
168	struct pl310_softc *sc = arg;
169	uint32_t intr;
170
171	intr = pl310_read4(sc, PL310_INTR_MASK);
172
173	if (!sc->sc_enabled && (intr & INTR_MASK_ECNTR)) {
174		/*
175		 * This is for debug purpose, so be blunt about it
176		 * We disable PL310 only when something fishy is going
177		 * on and we need to make sure L2 cache is 100% disabled
178		 */
179		panic("pl310: caches disabled but cache event detected\n");
180	}
181
182	return (FILTER_HANDLED);
183}
184
185static __inline void
186pl310_wait_background_op(uint32_t off, uint32_t mask)
187{
188
189	while (pl310_read4(pl310_softc, off) & mask)
190		continue;
191}
192
193
194/**
195 *	pl310_cache_sync - performs a cache sync operation
196 *
197 *	According to the TRM:
198 *
199 *  "Before writing to any other register you must perform an explicit
200 *   Cache Sync operation. This is particularly important when the cache is
201 *   enabled and changes to how the cache allocates new lines are to be made."
202 *
203 *
204 */
205static __inline void
206pl310_cache_sync(void)
207{
208
209	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
210		return;
211
212#ifdef PL310_ERRATA_753970
213	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
214		/* Write uncached PL310 register */
215		pl310_write4(pl310_softc, 0x740, 0xffffffff);
216	else
217#endif
218		pl310_write4(pl310_softc, PL310_CACHE_SYNC, 0xffffffff);
219}
220
221
222static void
223pl310_wbinv_all(void)
224{
225
226	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
227		return;
228
229	PL310_LOCK(pl310_softc);
230#ifdef PL310_ERRATA_727915
231	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r2p0) {
232		int i, j;
233
234		for (i = 0; i < g_ways_assoc; i++) {
235			for (j = 0; j < g_way_size / g_l2cache_line_size; j++) {
236				pl310_write4(pl310_softc,
237				    PL310_CLEAN_INV_LINE_IDX,
238				    (i << 28 | j << 5));
239			}
240		}
241		pl310_cache_sync();
242		PL310_UNLOCK(pl310_softc);
243		return;
244
245	}
246	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
247		platform_pl310_write_debug(pl310_softc, 3);
248#endif
249	pl310_write4(pl310_softc, PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
250	pl310_wait_background_op(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
251	pl310_cache_sync();
252#ifdef PL310_ERRATA_727915
253	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
254		platform_pl310_write_debug(pl310_softc, 0);
255#endif
256	PL310_UNLOCK(pl310_softc);
257}
258
259static void
260pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
261{
262
263	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
264		return;
265
266	PL310_LOCK(pl310_softc);
267	if (start & g_l2cache_align_mask) {
268		size += start & g_l2cache_align_mask;
269		start &= ~g_l2cache_align_mask;
270	}
271	if (size & g_l2cache_align_mask) {
272		size &= ~g_l2cache_align_mask;
273	   	size += g_l2cache_line_size;
274	}
275
276
277#ifdef PL310_ERRATA_727915
278	platform_pl310_write_debug(pl310_softc, 3);
279#endif
280	while (size > 0) {
281#ifdef PL310_ERRATA_588369
282		if (pl310_softc->sc_rtl_revision <= CACHE_ID_RELEASE_r1p0) {
283			/*
284			 * Errata 588369 says that clean + inv may keep the
285			 * cache line if it was clean, the recommanded
286			 * workaround is to clean then invalidate the cache
287			 * line, with write-back and cache linefill disabled.
288			 */
289			pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
290			pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
291		} else
292#endif
293			pl310_write4(pl310_softc, PL310_CLEAN_INV_LINE_PA,
294			    start);
295		start += g_l2cache_line_size;
296		size -= g_l2cache_line_size;
297	}
298#ifdef PL310_ERRATA_727915
299	platform_pl310_write_debug(pl310_softc, 0);
300#endif
301
302	pl310_cache_sync();
303	PL310_UNLOCK(pl310_softc);
304}
305
306static void
307pl310_wb_range(vm_paddr_t start, vm_size_t size)
308{
309
310	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
311		return;
312
313	PL310_LOCK(pl310_softc);
314	if (start & g_l2cache_align_mask) {
315		size += start & g_l2cache_align_mask;
316		start &= ~g_l2cache_align_mask;
317	}
318
319	if (size & g_l2cache_align_mask) {
320		size &= ~g_l2cache_align_mask;
321		size += g_l2cache_line_size;
322	}
323
324	while (size > 0) {
325		pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
326		start += g_l2cache_line_size;
327		size -= g_l2cache_line_size;
328	}
329
330	pl310_cache_sync();
331	PL310_UNLOCK(pl310_softc);
332}
333
334static void
335pl310_inv_range(vm_paddr_t start, vm_size_t size)
336{
337
338	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
339		return;
340
341	PL310_LOCK(pl310_softc);
342	if (start & g_l2cache_align_mask) {
343		size += start & g_l2cache_align_mask;
344		start &= ~g_l2cache_align_mask;
345	}
346	if (size & g_l2cache_align_mask) {
347		size &= ~g_l2cache_align_mask;
348		size += g_l2cache_line_size;
349	}
350	while (size > 0) {
351		pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
352		start += g_l2cache_line_size;
353		size -= g_l2cache_line_size;
354	}
355
356	pl310_cache_sync();
357	PL310_UNLOCK(pl310_softc);
358}
359
360static void
361pl310_drain_writebuf(void)
362{
363
364	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
365		return;
366
367	PL310_LOCK(pl310_softc);
368	pl310_cache_sync();
369	PL310_UNLOCK(pl310_softc);
370}
371
372static void
373pl310_set_way_sizes(struct pl310_softc *sc)
374{
375	uint32_t aux_value;
376
377	aux_value = pl310_read4(sc, PL310_AUX_CTRL);
378	g_way_size = (aux_value & AUX_CTRL_WAY_SIZE_MASK) >>
379	    AUX_CTRL_WAY_SIZE_SHIFT;
380	g_way_size = 1 << (g_way_size + 13);
381	if (aux_value & (1 << AUX_CTRL_ASSOCIATIVITY_SHIFT))
382		g_ways_assoc = 16;
383	else
384		g_ways_assoc = 8;
385	g_l2cache_way_mask = (1 << g_ways_assoc) - 1;
386	g_l2cache_size = g_way_size * g_ways_assoc;
387}
388
389/*
390 * Setup interrupt handling.  This is done only if the cache controller is
391 * disabled, for debugging.  We set counters so when a cache event happens we'll
392 * get interrupted and be warned that something is wrong, because no cache
393 * events should happen if we're disabled.
394 */
395static void
396pl310_config_intr(void *arg)
397{
398	struct pl310_softc * sc;
399
400	sc = arg;
401
402	/* activate the interrupt */
403	bus_setup_intr(sc->sc_dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
404	    pl310_filter, NULL, sc, &sc->sc_irq_h);
405
406	/* Cache Line Eviction for Counter 0 */
407	pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
408	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_CO);
409	/* Data Read Request for Counter 1 */
410	pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
411	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_DRREQ);
412
413	/* Enable and clear pending interrupts */
414	pl310_write4(sc, PL310_INTR_CLEAR, INTR_MASK_ECNTR);
415	pl310_write4(sc, PL310_INTR_MASK, INTR_MASK_ALL);
416
417	/* Enable counters and reset C0 and C1 */
418	pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
419	    EVENT_COUNTER_CTRL_ENABLED |
420	    EVENT_COUNTER_CTRL_C0_RESET |
421	    EVENT_COUNTER_CTRL_C1_RESET);
422
423	config_intrhook_disestablish(sc->sc_ich);
424	free(sc->sc_ich, M_DEVBUF);
425	sc->sc_ich = NULL;
426}
427
428static int
429pl310_probe(device_t dev)
430{
431
432	if (!ofw_bus_status_okay(dev))
433		return (ENXIO);
434	if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
435		return (ENXIO);
436	device_set_desc(dev, "PL310 L2 cache controller");
437	return (0);
438}
439
440static int
441pl310_attach(device_t dev)
442{
443	struct pl310_softc *sc = device_get_softc(dev);
444	int rid;
445	uint32_t cache_id, debug_ctrl;
446
447	sc->sc_dev = dev;
448	rid = 0;
449	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
450	    RF_ACTIVE);
451	if (sc->sc_mem_res == NULL)
452		panic("%s: Cannot map registers", device_get_name(dev));
453
454	/* Allocate an IRQ resource */
455	rid = 0;
456	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
457	                                        RF_ACTIVE | RF_SHAREABLE);
458	if (sc->sc_irq_res == NULL) {
459		device_printf(dev, "cannot allocate IRQ, not using interrupt\n");
460	}
461
462	pl310_softc = sc;
463	mtx_init(&sc->sc_mtx, "pl310lock", NULL, MTX_SPIN);
464
465	cache_id = pl310_read4(sc, PL310_CACHE_ID);
466	sc->sc_rtl_revision = (cache_id >> CACHE_ID_RELEASE_SHIFT) &
467	    CACHE_ID_RELEASE_MASK;
468	device_printf(dev, "Part number: 0x%x, release: 0x%x\n",
469	    (cache_id >> CACHE_ID_PARTNUM_SHIFT) & CACHE_ID_PARTNUM_MASK,
470	    (cache_id >> CACHE_ID_RELEASE_SHIFT) & CACHE_ID_RELEASE_MASK);
471
472	/*
473	 * If L2 cache is already enabled then something has violated the rules,
474	 * because caches are supposed to be off at kernel entry.  The cache
475	 * must be disabled to write the configuration registers without
476	 * triggering an access error (SLVERR), but there's no documented safe
477	 * procedure for disabling the L2 cache in the manual.  So we'll try to
478	 * invent one:
479	 *  - Use the debug register to force write-through mode and prevent
480	 *    linefills (allocation of new lines on read); now anything we do
481	 *    will not cause new data to come into the L2 cache.
482	 *  - Writeback and invalidate the current contents.
483	 *  - Disable the controller.
484	 *  - Restore the original debug settings.
485	 */
486	if (pl310_read4(sc, PL310_CTRL) & CTRL_ENABLED) {
487		device_printf(dev, "Warning: L2 Cache should not already be "
488		    "active; trying to de-activate and re-initialize...\n");
489		sc->sc_enabled = 1;
490		debug_ctrl = pl310_read4(sc, PL310_DEBUG_CTRL);
491		platform_pl310_write_debug(sc, debug_ctrl |
492		    DEBUG_CTRL_DISABLE_WRITEBACK | DEBUG_CTRL_DISABLE_LINEFILL);
493		pl310_set_way_sizes(sc);
494		pl310_wbinv_all();
495		platform_pl310_write_ctrl(sc, CTRL_DISABLED);
496		platform_pl310_write_debug(sc, debug_ctrl);
497	}
498	sc->sc_enabled = pl310_enabled;
499
500	if (sc->sc_enabled) {
501		platform_pl310_init(sc);
502		pl310_set_way_sizes(sc); /* platform init might change these */
503		pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
504		pl310_wait_background_op(PL310_INV_WAY, 0xffff);
505		platform_pl310_write_ctrl(sc, CTRL_ENABLED);
506		device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
507		    (g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
508		if (bootverbose)
509			pl310_print_config(sc);
510	} else {
511		if (sc->sc_irq_res != NULL) {
512			sc->sc_ich = malloc(sizeof(*sc->sc_ich), M_DEVBUF, M_WAITOK);
513			sc->sc_ich->ich_func = pl310_config_intr;
514			sc->sc_ich->ich_arg = sc;
515			if (config_intrhook_establish(sc->sc_ich) != 0) {
516				device_printf(dev,
517				    "config_intrhook_establish failed\n");
518				free(sc->sc_ich, M_DEVBUF);
519				return(ENXIO);
520			}
521		}
522
523		device_printf(dev, "L2 Cache disabled\n");
524	}
525
526	/* Set the l2 functions in the set of cpufuncs */
527	cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
528	cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
529	cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
530	cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
531	cpufuncs.cf_l2cache_drain_writebuf = pl310_drain_writebuf;
532
533	return (0);
534}
535
536static device_method_t pl310_methods[] = {
537	DEVMETHOD(device_probe, pl310_probe),
538	DEVMETHOD(device_attach, pl310_attach),
539	DEVMETHOD_END
540};
541
542static driver_t pl310_driver = {
543        "l2cache",
544        pl310_methods,
545        sizeof(struct pl310_softc),
546};
547static devclass_t pl310_devclass;
548
549EARLY_DRIVER_MODULE(pl310, simplebus, pl310_driver, pl310_devclass, 0, 0,
550    BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
551
552