1/*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 * Adapted for the alpha wildfire architecture Jan 2001.
4 */
5#ifndef _ASM_MMZONE_H_
6#define _ASM_MMZONE_H_
7
8#include <asm/smp.h>
9
10struct bootmem_data_t; /* stupid forward decl. */
11
12/*
13 * Following are macros that are specific to this numa platform.
14 */
15
16extern pg_data_t node_data[];
17
18#define alpha_pa_to_nid(pa)		\
19        (alpha_mv.pa_to_nid 		\
20	 ? alpha_mv.pa_to_nid(pa)	\
21	 : (0))
22#define node_mem_start(nid)		\
23        (alpha_mv.node_mem_start 	\
24	 ? alpha_mv.node_mem_start(nid) \
25	 : (0UL))
26#define node_mem_size(nid)		\
27        (alpha_mv.node_mem_size 	\
28	 ? alpha_mv.node_mem_size(nid) 	\
29	 : ((nid) ? (0UL) : (~0UL)))
30
31#define pa_to_nid(pa)		alpha_pa_to_nid(pa)
32#define NODE_DATA(nid)		(&node_data[(nid)])
33
34#define node_localnr(pfn, nid)	((pfn) - NODE_DATA(nid)->node_start_pfn)
35
36#define PLAT_NODE_DATA_LOCALNR(p, n)	\
37	(((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
38
39#ifdef CONFIG_DISCONTIGMEM
40
41/*
42 * Following are macros that each numa implementation must define.
43 */
44
45/*
46 * Given a kernel address, find the home node of the underlying memory.
47 */
48#define kvaddr_to_nid(kaddr)	pa_to_nid(__pa(kaddr))
49#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
50
51/*
52 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
53 * and returns the kaddr corresponding to first physical page in the
54 * node's mem_map.
55 */
56#define LOCAL_BASE_ADDR(kaddr)						  \
57    ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn  \
58			 << PAGE_SHIFT))
59
60#define kern_addr_valid(kaddr)	(0)
61
62#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
63
64#define VALID_PAGE(page)	(((page) - mem_map) < max_mapnr)
65
66#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> 32))
67#define pgd_page(pgd)		(pfn_to_page(pgd_val(pgd) >> 32))
68#define pte_pfn(pte)		(pte_val(pte) >> 32)
69
70#define mk_pte(page, pgprot)						     \
71({								 	     \
72	pte_t pte;                                                           \
73	unsigned long pfn;                                                   \
74									     \
75	pfn = page_to_pfn(page) << 32; \
76	pte_val(pte) = pfn | pgprot_val(pgprot);			     \
77									     \
78	pte;								     \
79})
80
81#define pte_page(x)							\
82({									\
83       	unsigned long kvirt;						\
84	struct page * __xx;						\
85									\
86	kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT));	\
87	__xx = virt_to_page(kvirt);					\
88									\
89	__xx;                                                           \
90})
91
92#define page_to_pa(page)						\
93	(page_to_pfn(page) << PAGE_SHIFT)
94
95#define pfn_to_nid(pfn)		pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
96#define pfn_valid(pfn)							\
97	(((pfn) - node_start_pfn(pfn_to_nid(pfn))) <			\
98	 node_spanned_pages(pfn_to_nid(pfn)))					\
99
100#define virt_addr_valid(kaddr)	pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
101
102#endif /* CONFIG_DISCONTIGMEM */
103
104#endif /* _ASM_MMZONE_H_ */
105