1/* $Id: cache.h,v 1.1.1.1 2007/08/03 18:53:34 Exp $
2 *
3 * include/asm-sh/cache.h
4 *
5 * Copyright 1999 (C) Niibe Yutaka
6 * Copyright 2002, 2003 (C) Paul Mundt
7 */
8#ifndef __ASM_SH_CACHE_H
9#define __ASM_SH_CACHE_H
10#ifdef __KERNEL__
11
12#include <asm/cpu/cache.h>
13
14#define SH_CACHE_VALID		1
15#define SH_CACHE_UPDATED	2
16#define SH_CACHE_COMBINED	4
17#define SH_CACHE_ASSOC		8
18
19#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
20#define SMP_CACHE_BYTES		L1_CACHE_BYTES
21
22#define L1_CACHE_ALIGN(x)	(((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
23
24#ifndef __ASSEMBLY__
25struct cache_info {
26	unsigned int ways;		/* Number of cache ways */
27	unsigned int sets;		/* Number of cache sets */
28	unsigned int linesz;		/* Cache line size (bytes) */
29
30	unsigned int way_size;		/* sets * line size */
31
32	/*
33	 * way_incr is the address offset for accessing the next way
34	 * in memory mapped cache array ops.
35	 */
36	unsigned int way_incr;
37	unsigned int entry_shift;
38	unsigned int entry_mask;
39
40	/*
41	 * Compute a mask which selects the address bits which overlap between
42	 * 1. those used to select the cache set during indexing
43	 * 2. those in the physical page number.
44	 */
45	unsigned int alias_mask;
46
47	unsigned int n_aliases;		/* Number of aliases */
48
49	unsigned long flags;
50};
51#endif /* __ASSEMBLY__ */
52#endif /* __KERNEL__ */
53#endif /* __ASM_SH_CACHE_H */
54