cvmx-tlb.c revision 215976
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43/**
44 * @file
45 *
46 * cvmx-tlb supplies per core TLB access functions for simple executive
47 * applications.
48 *
49 * <hr>$Revision: 41586 $<hr>
50 */
51#include "cvmx.h"
52#include "cvmx-tlb.h"
53
54//#define DEBUG
55
56/**
57 * @INTERNAL
58 * Convert page mask to string
59 */
60static inline const char* __mask_to_str(uint64_t mask)
61{
62    /* Most OCTEON processor does not support 1K page sizes */
63    uint64_t non_1k_mask = mask + CVMX_TLB_PAGEMASK_4K;
64
65    switch (non_1k_mask) {
66    case CVMX_TLB_PAGEMASK_4K:     return "4kb";
67    case CVMX_TLB_PAGEMASK_16K:    return "16kb";
68    case CVMX_TLB_PAGEMASK_64K:    return "64kb";
69    case CVMX_TLB_PAGEMASK_256K:   return "256kb";
70    case CVMX_TLB_PAGEMASK_1M:     return "1Mb";
71    case CVMX_TLB_PAGEMASK_4M:     return "4Mb";
72    case CVMX_TLB_PAGEMASK_16M:    return "16Mb";
73    case CVMX_TLB_PAGEMASK_64M:    return "64Mb";
74    case CVMX_TLB_PAGEMASK_256M:   return "256Mb";
75    }
76
77    return "";
78}
79
80/**
81 * @INTERNAL
82 * issue the tlb read instruction
83 */
84static inline void __tlb_read(void){
85    CVMX_EHB;
86    CVMX_TLBR;
87    CVMX_EHB;
88}
89
90/**
91 * @INTERNAL
92 * issue the tlb write instruction
93 */
94static inline void __tlb_write(void){
95
96    CVMX_EHB;
97    CVMX_TLBWI;
98    CVMX_EHB;
99}
100
101/**
102 * @INTERNAL
103 * issue the tlb read instruction
104 */
105static inline int __tlb_probe(uint64_t hi){
106    int index;
107    CVMX_EHB;
108    CVMX_MT_ENTRY_HIGH(hi);
109    CVMX_TLBP;
110    CVMX_EHB;
111
112    CVMX_MF_TLB_INDEX(index);
113
114    if (index < 0) index = -1;
115
116    return index;
117}
118
119/**
120 * @INTERNAL
121 * read a single tlb entry
122 *
123 * return 0: tlb entry is read
124 *    -1: index is invalid
125 */
126static inline int __tlb_read_index(uint32_t tlbi){
127
128    if (tlbi >= cvmx_tlb_size_limit()) {
129        return -1;
130    }
131
132    CVMX_MT_TLB_INDEX(tlbi);
133    __tlb_read();
134
135    return 0;
136}
137
138/**
139 * @INTERNAL
140 * write a single tlb entry
141 *
142 * return 0: tlb entry is read
143 *    -1: index is invalid
144 */
145static inline int __tlb_write_index(uint32_t tlbi,
146        			    uint64_t hi, uint64_t lo0,
147				    uint64_t lo1, uint64_t pagemask)
148{
149
150    if (tlbi >= cvmx_tlb_size_limit()) {
151        return -1;
152    }
153
154#ifdef DEBUG
155    cvmx_dprintf("cvmx-tlb-dbg: "
156	    "write TLB %d: hi %lx, lo0 %lx, lo1 %lx, pagemask %lx \n",
157		tlbi, hi, lo0, lo1, pagemask);
158#endif
159
160    CVMX_MT_TLB_INDEX(tlbi);
161    CVMX_MT_ENTRY_HIGH(hi);
162    CVMX_MT_ENTRY_LO_0(lo0);
163    CVMX_MT_ENTRY_LO_1(lo1);
164    CVMX_MT_PAGEMASK(pagemask);
165    __tlb_write();
166
167    return 0;
168}
169
170/**
171 * @INTERNAL
172 * Determine if a TLB entry is free to use
173 */
174static inline int __tlb_entry_is_free(uint32_t tlbi) {
175    int ret = 0;
176    uint64_t lo0 = 0, lo1 = 0;
177
178    if (tlbi < cvmx_tlb_size_limit()) {
179
180        __tlb_read_index(tlbi);
181
182        /* Unused entries have neither even nor odd page mapped */
183    	CVMX_MF_ENTRY_LO_0(lo0);
184    	CVMX_MF_ENTRY_LO_1(lo1);
185
186        if ( !(lo0 & TLB_VALID) && !(lo1 & TLB_VALID)) {
187            ret = 1;
188        }
189    }
190
191    return ret;
192}
193
194
195/**
196 * @INTERNAL
197 * dump a single tlb entry
198 */
199static inline void __tlb_dump_index(uint32_t tlbi)
200{
201    if (tlbi < cvmx_tlb_size_limit()) {
202
203        if (__tlb_entry_is_free(tlbi)) {
204#ifdef DEBUG
205            cvmx_dprintf("Index: %3d Free \n", tlbi);
206#endif
207        } else {
208            uint64_t lo0, lo1, pgmask;
209            uint32_t hi, c0, c1;
210#ifdef DEBUG
211            int width = 13;
212#endif
213
214            __tlb_read_index(tlbi);
215
216            CVMX_MF_ENTRY_HIGH(hi);
217            CVMX_MF_ENTRY_LO_0(lo0);
218            CVMX_MF_ENTRY_LO_1(lo1);
219            CVMX_MF_PAGEMASK(pgmask);
220
221
222#ifdef DEBUG
223            cvmx_dprintf("Index: %3d pgmask=%s ", tlbi, __mask_to_str(pgmask));
224#endif
225
226            c0 = ( lo0 >> 3 ) & 7;
227            c1 = ( lo1 >> 3 ) & 7;
228
229#ifdef DEBUG
230            cvmx_dprintf("va=%0*lx asid=%02x\n",
231                               width, (hi & ~0x1fffUL), hi & 0xff);
232
233            cvmx_dprintf("\t[pa=%0*lx c=%d d=%d v=%d g=%d] ",
234                               width,
235                               (lo0 << 6) & PAGE_MASK, c0,
236                               (lo0 & 4) ? 1 : 0,
237                               (lo0 & 2) ? 1 : 0,
238                               (lo0 & 1) ? 1 : 0);
239            cvmx_dprintf("[pa=%0*lx c=%d d=%d v=%d g=%d]\n",
240                               width,
241                               (lo1 << 6) & PAGE_MASK, c1,
242                               (lo1 & 4) ? 1 : 0,
243                               (lo1 & 2) ? 1 : 0,
244                               (lo1 & 1) ? 1 : 0);
245
246#endif
247        }
248    }
249}
250
251/**
252 * @INTERNAL
253 * dump a single tlb entry
254 */
255static inline uint32_t __tlb_wired_index() {
256    uint32_t  tlbi;
257
258    CVMX_MF_TLB_WIRED(tlbi);
259    return tlbi;
260}
261
262/**
263 *  Set up a wired entry. This function is designed to be used by Simple
264 *  Executive to set up its virtual to physical address mapping at start up
265 *  time. After the mapping is set up, the remaining unused TLB entries can
266 *  be use for run time shared memory mapping.
267 *
268 *  Calling this function causes the C0 wired index register to increase.
269 *  Wired index register points to the separation between fixed TLB mapping
270 *  and run time shared memory mapping.
271 *
272 *  @param  hi      Entry Hi
273 *  @param  lo0     Entry Low0
274 *  @param  lo1     Entry Low1
275 *  @param  pagemask Pagemask
276 *
277 *  @return 0: the entry is added
278 *  @return -1: out of TLB entry
279 */
280int cvmx_tlb_add_wired_entry( uint64_t hi, uint64_t lo0,
281                              uint64_t lo1, uint64_t pagemask)
282{
283    uint64_t index;
284    int ret = -1;
285
286    index = __tlb_wired_index();
287
288    /* Check to make sure if the index is free to use */
289    if (index < cvmx_tlb_size_limit() && __tlb_entry_is_free(index) ) {
290        /* increase the wired index by 1*/
291        __tlb_write_index(index, hi, lo0, lo1, pagemask);
292        CVMX_MT_TLB_WIRED(index + 1);
293        ret = 0;
294    }
295
296    return ret;
297}
298
299/**
300 *  Find a free entry that can be used for share memory mapping.
301 *
302 *  @return -1: no free entry found
303 *  @return :  a free entry
304 */
305int cvmx_tlb_allocate_runtime_entry(void)
306{
307    uint32_t i, ret = -1;
308
309    for (i = __tlb_wired_index(); i< cvmx_tlb_size_limit(); i++) {
310
311    	/* Check to make sure the index is free to use */
312        if (__tlb_entry_is_free(i)) {
313		/* Found and return */
314        	ret = i;
315        	break;
316	}
317    }
318
319    return ret;
320}
321
322/**
323 *  Invalidate the TLB entry. Remove previous mapping if one was set up
324 */
325void cvmx_tlb_free_runtime_entry(uint32_t tlbi)
326{
327    /* Invalidate an unwired TLB entry */
328    if ((tlbi < cvmx_tlb_size_limit()) && (tlbi >= __tlb_wired_index())) {
329        __tlb_write_index(tlbi, 0xffffffff80000000ULL, 0, 0, 0);
330    }
331}
332
333
334/**
335 *  Program a single TLB entry to enable the provided vaddr to paddr mapping.
336 *
337 *  @param index  Index of the TLB entry
338 *  @param vaddr  The virtual address for this mapping
339 *  @param paddr  The physical address for this mapping
340 *  @param size   Size of the mapping
341 *  @param tlb_flags  Entry mapping flags
342 */
343
344void cvmx_tlb_write_entry(int index, uint64_t vaddr, uint64_t paddr,
345			uint64_t size, uint64_t tlb_flags) {
346	uint64_t lo0, lo1, hi, pagemask;
347
348	if ( __is_power_of_two(size) ) {
349		if ( (__log2(size) & 1 ) == 0) {
350			/* size is not power of 4,  we only need to map
351  			   one page, figure out even or odd page to map */
352			if ((vaddr >> __log2(size) & 1))  {
353				lo0 =  0;
354				lo1 =  ((paddr >> 12) << 6) | tlb_flags;
355				hi =   ((vaddr - size) >> 12) << 12;
356			}else {
357				lo0 =  ((paddr >> 12) << 6) | tlb_flags;
358				lo1 =  0;
359				hi =   ((vaddr) >> 12) << 12;
360			}
361			pagemask = (size - 1) & (~1<<11);
362		}else {
363			lo0 =  ((paddr >> 12)<< 6) | tlb_flags;
364			lo1 =  (((paddr + size /2) >> 12) << 6) | tlb_flags;
365			hi =   ((vaddr) >> 12) << 12;
366			pagemask = ((size/2) -1) & (~1<<11);
367		}
368
369
370        	__tlb_write_index(index, hi, lo0, lo1, pagemask);
371
372	}
373}
374
375
376/**
377 *  Program a single TLB entry to enable the provided vaddr to paddr mapping.
378 *  This version adds a wired entry that should not be changed at run time
379 *
380 *  @param vaddr  The virtual address for this mapping
381 *  @param paddr  The physical address for this mapping
382 *  @param size   Size of the mapping
383 *  @param tlb_flags  Entry mapping flags
384 *  @return -1: TLB out of entries
385 * 	     0:  fixed entry added
386 */
387int cvmx_tlb_add_fixed_entry( uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t tlb_flags) {
388
389    uint64_t index;
390    int ret = 0;
391
392    CVMX_MF_TLB_WIRED(index);
393
394    /* Check to make sure if the index is free to use */
395    if (index < cvmx_tlb_size_limit() && __tlb_entry_is_free(index) ) {
396	cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
397
398	if (!__tlb_entry_is_free(index)) {
399        	/* Bump up the wired register*/
400        	CVMX_MT_TLB_WIRED(index + 1);
401		ret  = 1;
402	}
403    }
404    return ret;
405}
406
407
408/**
409 *  Program a single TLB entry to enable the provided vaddr to paddr mapping.
410 *  This version writes a runtime entry. It will check the index to make sure
411 *  not to overwrite any fixed entries.
412 *
413 *  @param index  Index of the TLB entry
414 *  @param vaddr  The virtual address for this mapping
415 *  @param paddr  The physical address for this mapping
416 *  @param size   Size of the mapping
417 *  @param tlb_flags  Entry mapping flags
418 */
419void cvmx_tlb_write_runtime_entry(int index, uint64_t vaddr, uint64_t paddr,
420                          uint64_t size, uint64_t tlb_flags)
421{
422
423    int wired_index;
424    CVMX_MF_TLB_WIRED(wired_index);
425
426    if (index >= wired_index) {
427	cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
428    }
429
430}
431
432
433
434/**
435 * Find the TLB index of a given virtual address
436 *
437 *  @param vaddr  The virtual address to look up
438 *  @return  -1  not TLB mapped
439 *           >=0 TLB TLB index
440 */
441int cvmx_tlb_lookup(uint64_t vaddr) {
442	uint64_t hi= (vaddr >> 12 ) << 12; /* We always use ASID 0 */
443
444	return  __tlb_probe(hi);
445}
446
447/**
448 *  Debug routine to show all shared memory mapping
449 */
450void cvmx_tlb_dump_shared_mapping(void) {
451    uint32_t tlbi;
452
453    for ( tlbi = __tlb_wired_index(); tlbi<cvmx_tlb_size_limit(); tlbi++ ) {
454        __tlb_dump_index(tlbi);
455    }
456}
457
458/**
459 *  Debug routine to show all TLB entries of this core
460 *
461 */
462void cvmx_tlb_dump_all(void) {
463
464    uint32_t tlbi;
465
466    for (tlbi = 0; tlbi<= cvmx_tlb_size_limit(); tlbi++ ) {
467        __tlb_dump_index(tlbi);
468    }
469}
470
471