1/*
2 * File:         arch/blackfin/mach-common/cplbmgtr.S
3 * Based on:
4 * Author:       LG Soft India
5 *
6 * Created:      ?
7 * Description:  CPLB replacement routine for CPLB mismatch
8 *
9 * Modified:
10 *               Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs:         Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
28 */
29
30/* Usage: int _cplb_mgr(is_data_miss,int enable_cache)
31 * is_data_miss==2 => Mark as Dirty, write to the clean data page
32 * is_data_miss==1 => Replace a data CPLB.
33 * is_data_miss==0 => Replace an instruction CPLB.
34 *
35 * Returns:
36 * CPLB_RELOADED	=> Successfully updated CPLB table.
37 * CPLB_NO_UNLOCKED	=> All CPLBs are locked, so cannot be evicted.
38 *			   This indicates that the CPLBs in the configuration
39 *			   tablei are badly configured, as this should never
40 *			   occur.
41 * CPLB_NO_ADDR_MATCH	=> The address being accessed, that triggered the
42 *			   exception, is not covered by any of the CPLBs in
43 *			   the configuration table. The application is
44 *			   presumably misbehaving.
45 * CPLB_PROT_VIOL	=> The address being accessed, that triggered the
46 *			   exception, was not a first-write to a clean Write
47 *			   Back Data page, and so presumably is a genuine
48 *			   violation of the page's protection attributes.
49 *			   The application is misbehaving.
50 */
51
52#include <linux/linkage.h>
53#include <asm/blackfin.h>
54#include <asm/cplb.h>
55
56#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
57.section .l1.text
58#else
59.text
60#endif
61
62.align 2;
63ENTRY(_cplb_mgr)
64
65	[--SP]=( R7:4,P5:3 );
66
67	CC = R0 == 2;
68	IF CC JUMP .Ldcplb_write;
69
70	CC = R0 == 0;
71	IF !CC JUMP .Ldcplb_miss_compare;
72
73	/* ICPLB Miss Exception. We need to choose one of the
74	* currently-installed CPLBs, and replace it with one
75	* from the configuration table.
76 	*/
77
78	P4.L = (ICPLB_FAULT_ADDR & 0xFFFF);
79	P4.H = (ICPLB_FAULT_ADDR >> 16);
80
81	P1 = 16;
82	P5.L = _page_size_table;
83	P5.H = _page_size_table;
84
85	P0.L = (ICPLB_DATA0 & 0xFFFF);
86	P0.H = (ICPLB_DATA0 >> 16);
87	R4 = [P4];		/* Get faulting address*/
88	R6 = 64;		/* Advance past the fault address, which*/
89	R6 = R6 + R4;		/* we'll use if we find a match*/
90	R3 = ((16 << 8) | 2);	/* Extract mask, bits 16 and 17.*/
91
92	R5 = 0;
93.Lisearch:
94
95	R1 = [P0-0x100];	/* Address for this CPLB */
96
97	R0 = [P0++];		/* Info for this CPLB*/
98	CC = BITTST(R0,0);	/* Is the CPLB valid?*/
99	IF !CC JUMP .Lnomatch;	/* Skip it, if not.*/
100	CC = R4 < R1(IU);	/* If fault address less than page start*/
101	IF CC JUMP .Lnomatch;	/* then skip this one.*/
102	R2 = EXTRACT(R0,R3.L) (Z);	/* Get page size*/
103	P1 = R2;
104	P1 = P5 + (P1<<2);	/* index into page-size table*/
105	R2 = [P1];		/* Get the page size*/
106	R1 = R1 + R2;		/* and add to page start, to get page end*/
107	CC = R4 < R1(IU);	/* and see whether fault addr is in page.*/
108	IF !CC R4 = R6;		/* If so, advance the address and finish loop.*/
109	IF !CC JUMP .Lisearch_done;
110.Lnomatch:
111	/* Go around again*/
112	R5 += 1;
113	CC = BITTST(R5, 4);	/* i.e CC = R5 >= 16*/
114	IF !CC JUMP .Lisearch;
115
116.Lisearch_done:
117	I0 = R4;		/* Fault address we'll search for*/
118
119	/* set up pointers */
120	P0.L = (ICPLB_DATA0 & 0xFFFF);
121	P0.H = (ICPLB_DATA0 >> 16);
122
123	/* The replacement procedure for ICPLBs */
124
125	P4.L = (IMEM_CONTROL & 0xFFFF);
126	P4.H = (IMEM_CONTROL >> 16);
127
128	/* disable cplbs */
129	R5 = [P4];		/* Control Register*/
130	BITCLR(R5,ENICPLB_P);
131	CLI R1;
132	SSYNC;		/* SSYNC required before writing to IMEM_CONTROL. */
133	.align 8;
134	[P4] = R5;
135	SSYNC;
136	STI R1;
137
138	R1 = -1;		/* end point comparison */
139	R3 = 16;		/* counter */
140
141	/* Search through CPLBs for first non-locked entry */
142	/* Overwrite it by moving everyone else up by 1 */
143.Licheck_lock:
144	R0 = [P0++];
145	R3 = R3 + R1;
146	CC = R3 == R1;
147	IF CC JUMP .Lall_locked;
148	CC = BITTST(R0, 0);		/* an invalid entry is good */
149	IF !CC JUMP .Lifound_victim;
150	CC = BITTST(R0,1);		/* but a locked entry isn't */
151	IF CC JUMP .Licheck_lock;
152
153.Lifound_victim:
154#ifdef CONFIG_CPLB_INFO
155	R7 = [P0 - 0x104];
156	P2.L = _ipdt_table;
157	P2.H = _ipdt_table;
158	P3.L = _ipdt_swapcount_table;
159	P3.H = _ipdt_swapcount_table;
160	P3 += -4;
161.Licount:
162	R2 = [P2];	/* address from config table */
163	P2 += 8;
164	P3 += 8;
165	CC = R2==-1;
166	IF CC JUMP .Licount_done;
167	CC = R7==R2;
168	IF !CC JUMP .Licount;
169	R7 = [P3];
170	R7 += 1;
171	[P3] = R7;
172	CSYNC;
173.Licount_done:
174#endif
175	LC0=R3;
176	LSETUP(.Lis_move,.Lie_move) LC0;
177.Lis_move:
178	R0 = [P0];
179	[P0 - 4] = R0;
180	R0 = [P0 - 0x100];
181	[P0-0x104] = R0;
182.Lie_move:P0+=4;
183
184	/* We've made space in the ICPLB table, so that ICPLB15
185	 * is now free to be overwritten. Next, we have to determine
186	 * which CPLB we need to install, from the configuration
187	 * table. This is a matter of getting the start-of-page
188	 * addresses and page-lengths from the config table, and
189	 * determining whether the fault address falls within that
190	 * range.
191 	 */
192
193	P2.L = _ipdt_table;
194	P2.H = _ipdt_table;
195#ifdef	CONFIG_CPLB_INFO
196	P3.L = _ipdt_swapcount_table;
197	P3.H = _ipdt_swapcount_table;
198	P3 += -8;
199#endif
200	P0.L = _page_size_table;
201	P0.H = _page_size_table;
202
203	/* Retrieve our fault address (which may have been advanced
204	 * because the faulting instruction crossed a page boundary).
205	 */
206
207	R0 = I0;
208
209	/* An extraction pattern, to get the page-size bits from
210	 * the CPLB data entry. Bits 16-17, so two bits at posn 16.
211	 */
212
213	R1 = ((16<<8)|2);
214.Linext:	R4 = [P2++];	/* address from config table */
215	R2 = [P2++];	/* data from config table */
216#ifdef	CONFIG_CPLB_INFO
217	P3 += 8;
218#endif
219
220	CC = R4 == -1;	/* End of config table*/
221	IF CC JUMP .Lno_page_in_table;
222
223	/* See if failed address > start address */
224	CC = R4 <= R0(IU);
225 	IF !CC JUMP .Linext;
226
227	/* extract page size (17:16)*/
228	R3 = EXTRACT(R2, R1.L) (Z);
229
230	/* add page size to addr to get range */
231
232	P5 = R3;
233	P5 = P0 + (P5 << 2);	/* scaled, for int access*/
234	R3 = [P5];
235	R3 = R3 + R4;
236
237	/* See if failed address < (start address + page size) */
238	CC = R0 < R3(IU);
239	IF !CC JUMP .Linext;
240
241	/* We've found a CPLB in the config table that covers
242	 * the faulting address, so install this CPLB into the
243	 * last entry of the table.
244	 */
245
246	P1.L = (ICPLB_DATA15 & 0xFFFF);		/* ICPLB_DATA15 */
247	P1.H = (ICPLB_DATA15 >> 16);
248	[P1] = R2;
249	[P1-0x100] = R4;
250#ifdef	CONFIG_CPLB_INFO
251	R3 = [P3];
252	R3 += 1;
253	[P3] = R3;
254#endif
255
256	/* P4 points to IMEM_CONTROL, and R5 contains its old
257	 * value, after we disabled ICPLBS. Re-enable them.
258	 */
259
260	BITSET(R5,ENICPLB_P);
261	CLI R2;
262	SSYNC;		/* SSYNC required before writing to IMEM_CONTROL. */
263	.align 8;
264	[P4] = R5;
265	SSYNC;
266	STI R2;
267
268	( R7:4,P5:3 ) = [SP++];
269	R0 = CPLB_RELOADED;
270	RTS;
271
272/* FAILED CASES*/
273.Lno_page_in_table:
274	( R7:4,P5:3 ) = [SP++];
275	R0 = CPLB_NO_ADDR_MATCH;
276	RTS;
277.Lall_locked:
278	( R7:4,P5:3 ) = [SP++];
279	R0 = CPLB_NO_UNLOCKED;
280	RTS;
281.Lprot_violation:
282	( R7:4,P5:3 ) = [SP++];
283	R0 = CPLB_PROT_VIOL;
284	RTS;
285
286.Ldcplb_write:
287
288	/* if a DCPLB is marked as write-back (CPLB_WT==0), and
289	 * it is clean (CPLB_DIRTY==0), then a write to the
290	 * CPLB's page triggers a protection violation. We have to
291	 * mark the CPLB as dirty, to indicate that there are
292	 * pending writes associated with the CPLB.
293	 */
294
295	P4.L = (DCPLB_STATUS & 0xFFFF);
296	P4.H = (DCPLB_STATUS >> 16);
297	P3.L = (DCPLB_DATA0 & 0xFFFF);
298	P3.H = (DCPLB_DATA0 >> 16);
299	R5 = [P4];
300
301	/* A protection violation can be caused by more than just writes
302	 * to a clean WB page, so we have to ensure that:
303	 * - It's a write
304	 * - to a clean WB page
305	 * - and is allowed in the mode the access occurred.
306	 */
307
308	CC = BITTST(R5, 16);	/* ensure it was a write*/
309	IF !CC JUMP .Lprot_violation;
310
311	/* to check the rest, we have to retrieve the DCPLB.*/
312
313	/* The low half of DCPLB_STATUS is a bit mask*/
314
315	R2 = R5.L (Z);	/* indicating which CPLB triggered the event.*/
316	R3 = 30;	/* so we can use this to determine the offset*/
317	R2.L = SIGNBITS R2;
318	R2 = R2.L (Z);	/* into the DCPLB table.*/
319	R3 = R3 - R2;
320	P4 = R3;
321	P3 = P3 + (P4<<2);
322	R3 = [P3];	/* Retrieve the CPLB*/
323
324	/* Now we can check whether it's a clean WB page*/
325
326	CC = BITTST(R3, 14);	/* 0==WB, 1==WT*/
327	IF CC JUMP .Lprot_violation;
328	CC = BITTST(R3, 7);	/* 0 == clean, 1 == dirty*/
329	IF CC JUMP .Lprot_violation;
330
331	/* Check whether the write is allowed in the mode that was active.*/
332
333	R2 = 1<<3;		/* checking write in user mode*/
334	CC = BITTST(R5, 17);	/* 0==was user, 1==was super*/
335	R5 = CC;
336	R2 <<= R5;		/* if was super, check write in super mode*/
337	R2 = R3 & R2;
338	CC = R2 == 0;
339	IF CC JUMP .Lprot_violation;
340
341	/* It's a genuine write-to-clean-page.*/
342
343	BITSET(R3, 7);		/* mark as dirty*/
344	[P3] = R3;		/* and write back.*/
345	NOP;
346	CSYNC;
347	( R7:4,P5:3 ) = [SP++];
348	R0 = CPLB_RELOADED;
349	RTS;
350
351.Ldcplb_miss_compare:
352
353	/* Data CPLB Miss event. We need to choose a CPLB to
354	 * evict, and then locate a new CPLB to install from the
355	 * config table, that covers the faulting address.
356	 */
357
358	P1.L = (DCPLB_DATA15 & 0xFFFF);
359	P1.H = (DCPLB_DATA15 >> 16);
360
361	P4.L = (DCPLB_FAULT_ADDR & 0xFFFF);
362	P4.H = (DCPLB_FAULT_ADDR >> 16);
363	R4 = [P4];
364	I0 = R4;
365
366	/* The replacement procedure for DCPLBs*/
367
368	R6 = R1;	/* Save for later*/
369
370	/* Turn off CPLBs while we work.*/
371	P4.L = (DMEM_CONTROL & 0xFFFF);
372	P4.H = (DMEM_CONTROL >> 16);
373	R5 = [P4];
374	BITCLR(R5,ENDCPLB_P);
375	CLI R0;
376	SSYNC;		/* SSYNC required before writing to DMEM_CONTROL. */
377	.align 8;
378	[P4] = R5;
379	SSYNC;
380	STI R0;
381
382	/* Start looking for a CPLB to evict. Our order of preference
383	 * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs
384	 * are no good.
385	 */
386
387	I1.L = (DCPLB_DATA0 & 0xFFFF);
388	I1.H = (DCPLB_DATA0 >> 16);
389	P1 = 2;
390	P2 = 16;
391	I2.L = _dcplb_preference;
392	I2.H = _dcplb_preference;
393	LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1;
394.Lsdsearch1:
395	R0 = [I2++];		/* Get the bits we're interested in*/
396	P0 = I1;		/* Go back to start of table*/
397	LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2;
398.Lsdsearch2:
399	R1 = [P0++];		/* Fetch each installed CPLB in turn*/
400	R2 = R1 & R0;		/* and test for interesting bits.*/
401	CC = R2 == 0;		/* If none are set, it'll do.*/
402	IF !CC JUMP .Lskip_stack_check;
403
404	R2 = [P0 - 0x104]; 	/* R2 - PageStart */
405	P3.L = _page_size_table; /* retrieve end address */
406	P3.H = _page_size_table; /* retrieve end address */
407	R3 = 0x1002;		/* 16th - position, 2 bits -length */
408#ifdef ANOMALY_05000209
409	nop;			/* Anomaly 05000209 */
410#endif
411	R7 = EXTRACT(R1,R3.l);
412	R7 = R7 << 2;		/* Page size index offset */
413	P5 = R7;
414	P3 = P3 + P5;
415	R7 = [P3];		/* page size in bytes */
416
417	R7 = R2 + R7;		/* R7 - PageEnd */
418	R4 = SP; 		/* Test SP is in range */
419
420	CC = R7 < R4;		/* if PageEnd < SP */
421	IF CC JUMP .Ldfound_victim;
422	R3 = 0x284;		/* stack length from start of trap till
423				 * the point.
424				 * 20 stack locations for future modifications
425				 */
426	R4 = R4 + R3;
427	CC = R4 < R2;		/* if SP + stacklen < PageStart */
428	IF CC JUMP .Ldfound_victim;
429.Lskip_stack_check:
430
431.Ledsearch2: NOP;
432.Ledsearch1: NOP;
433
434	/* If we got here, we didn't find a DCPLB we considered
435	 * replacable, which means all of them were locked.
436	 */
437
438	JUMP .Lall_locked;
439.Ldfound_victim:
440
441#ifdef CONFIG_CPLB_INFO
442	R7 = [P0 - 0x104];
443	P2.L = _dpdt_table;
444	P2.H = _dpdt_table;
445	P3.L = _dpdt_swapcount_table;
446	P3.H = _dpdt_swapcount_table;
447	P3 += -4;
448.Ldicount:
449	R2 = [P2];
450	P2 += 8;
451	P3 += 8;
452	CC = R2==-1;
453	IF CC JUMP .Ldicount_done;
454	CC = R7==R2;
455	IF !CC JUMP .Ldicount;
456	R7 = [P3];
457	R7 += 1;
458	[P3] = R7;
459.Ldicount_done:
460#endif
461
462	/* Clean down the hardware loops*/
463	R2 = 0;
464	LC1 = R2;
465	LC0 = R2;
466
467	/* There's a suitable victim in [P0-4] (because we've
468	 * advanced already).
469	 */
470
471.LDdoverwrite:
472
473	/* [P0-4] is a suitable victim CPLB, so we want to
474	 * overwrite it by moving all the following CPLBs
475	 * one space closer to the start.
476	 */
477
478	R1.L = (DCPLB_DATA16 & 0xFFFF);		/* DCPLB_DATA15 + 4 */
479	R1.H = (DCPLB_DATA16 >> 16);
480	R0 = P0;
481
482	/* If the victim happens to be in DCPLB15,
483	 * we don't need to move anything.
484	 */
485
486	CC = R1 == R0;
487	IF CC JUMP .Lde_moved;
488	R1 = R1 - R0;
489	R1 >>= 2;
490	P1 = R1;
491	LSETUP(.Lds_move, .Lde_move) LC0=P1;
492.Lds_move:
493	R0 = [P0++];	/* move data */
494	[P0 - 8] = R0;
495	R0 = [P0-0x104]	/* move address */
496.Lde_move: [P0-0x108] = R0;
497
498	/* We've now made space in DCPLB15 for the new CPLB to be
499	 * installed. The next stage is to locate a CPLB in the
500	 * config table that covers the faulting address.
501	 */
502
503.Lde_moved:NOP;
504	R0 = I0;		/* Our faulting address */
505
506	P2.L = _dpdt_table;
507	P2.H = _dpdt_table;
508#ifdef	CONFIG_CPLB_INFO
509	P3.L = _dpdt_swapcount_table;
510	P3.H = _dpdt_swapcount_table;
511	P3 += -8;
512#endif
513
514	P1.L = _page_size_table;
515	P1.H = _page_size_table;
516
517	/* An extraction pattern, to retrieve bits 17:16.*/
518
519	R1 = (16<<8)|2;
520.Ldnext:	R4 = [P2++];	/* address */
521	R2 = [P2++];	/* data */
522#ifdef	CONFIG_CPLB_INFO
523	P3 += 8;
524#endif
525
526	CC = R4 == -1;
527	IF CC JUMP .Lno_page_in_table;
528
529	/* See if failed address > start address */
530	CC = R4 <= R0(IU);
531	IF !CC JUMP .Ldnext;
532
533	/* extract page size (17:16)*/
534	R3 = EXTRACT(R2, R1.L) (Z);
535
536	/* add page size to addr to get range */
537
538	P5 = R3;
539	P5 = P1 + (P5 << 2);
540	R3 = [P5];
541	R3 = R3 + R4;
542
543	/* See if failed address < (start address + page size) */
544	CC = R0 < R3(IU);
545	IF !CC JUMP .Ldnext;
546
547	/* We've found the CPLB that should be installed, so
548	 * write it into CPLB15, masking off any caching bits
549	 * if necessary.
550	 */
551
552	P1.L = (DCPLB_DATA15 & 0xFFFF);
553	P1.H = (DCPLB_DATA15 >> 16);
554
555	/* If the DCPLB has cache bits set, but caching hasn't
556	 * been enabled, then we want to mask off the cache-in-L1
557	 * bit before installing. Moreover, if caching is off, we
558	 * also want to ensure that the DCPLB has WT mode set, rather
559	 * than WB, since WB pages still trigger first-write exceptions
560	 * even when not caching is off, and the page isn't marked as
561	 * cachable. Finally, we could mark the page as clean, not dirty,
562	 * but we choose to leave that decision to the user; if the user
563	 * chooses to have a CPLB pre-defined as dirty, then they always
564	 * pay the cost of flushing during eviction, but don't pay the
565	 * cost of first-write exceptions to mark the page as dirty.
566	 */
567
568#ifdef CONFIG_BLKFIN_WT
569	BITSET(R6, 14);		/* Set WT*/
570#endif
571
572	[P1] = R2;
573	[P1-0x100] = R4;
574#ifdef	CONFIG_CPLB_INFO
575	R3 = [P3];
576	R3 += 1;
577	[P3] = R3;
578#endif
579
580	/* We've installed the CPLB, so re-enable CPLBs. P4
581	 * points to DMEM_CONTROL, and R5 is the value we
582	 * last wrote to it, when we were disabling CPLBs.
583	 */
584
585	BITSET(R5,ENDCPLB_P);
586	CLI R2;
587	.align 8;
588	[P4] = R5;
589	SSYNC;
590	STI R2;
591
592	( R7:4,P5:3 ) = [SP++];
593	R0 = CPLB_RELOADED;
594	RTS;
595ENDPROC(_cplb_mgr)
596
597.data
598.align 4;
599_page_size_table:
600.byte4	0x00000400;	/* 1K */
601.byte4	0x00001000;	/* 4K */
602.byte4	0x00100000;	/* 1M */
603.byte4	0x00400000;	/* 4M */
604
605.align 4;
606_dcplb_preference:
607.byte4	0x00000001;	/* valid bit */
608.byte4	0x00000002;	/* lock bit */
609