1#include <linux/module.h>
2#include <linux/highmem.h>
3#include <asm/tlbflush.h>
4
5void *__kmap(struct page *page)
6{
7	void *addr;
8
9	might_sleep();
10	if (!PageHighMem(page))
11		return page_address(page);
12	addr = kmap_high(page);
13	flush_tlb_one((unsigned long)addr);
14
15	return addr;
16}
17
18void __kunmap(struct page *page)
19{
20	if (in_interrupt())
21		BUG();
22	if (!PageHighMem(page))
23		return;
24	kunmap_high(page);
25}
26
27/*
28 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
29 * no global lock is needed and because the kmap code must perform a global TLB
30 * invalidation when the kmap pool wraps.
31 *
32 * However when holding an atomic kmap is is not legal to sleep, so atomic
33 * kmaps are appropriate for short, tight code paths only.
34 */
35
36/*
37 * need an array per cpu, and each array has to be cache aligned
38 */
39struct kmap_map {
40	struct page *page;
41	void        *vaddr;
42};
43
44struct {
45	struct kmap_map map[KM_TYPE_NR];
46} ____cacheline_aligned_in_smp kmap_atomic_maps[NR_CPUS];
47
48
49
50void *
51kmap_atomic_page_address(struct page *page)
52{
53	int i;
54
55	for (i = 0; i < KM_TYPE_NR; i++)
56		if (kmap_atomic_maps[smp_processor_id()].map[i].page == page)
57			return(kmap_atomic_maps[smp_processor_id()].map[i].vaddr);
58
59	return((struct page *)0);
60}
61
62void *__kmap_atomic(struct page *page, enum km_type type)
63{
64	enum fixed_addresses idx;
65	unsigned long vaddr;
66
67	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
68	pagefault_disable();
69	if (!PageHighMem(page))
70		return page_address(page);
71
72	idx = type + KM_TYPE_NR*smp_processor_id();
73	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
74#ifdef CONFIG_DEBUG_HIGHMEM
75	if (!pte_none(*(kmap_pte-idx)))
76		BUG();
77#endif
78	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
79	local_flush_tlb_one((unsigned long)vaddr);
80
81	kmap_atomic_maps[smp_processor_id()].map[type].page = page;
82	kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *)vaddr;
83
84	return (void*) vaddr;
85}
86
87void __kunmap_atomic(void *kvaddr, enum km_type type)
88{
89	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
90	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
91
92	if (vaddr < FIXADDR_START) {
93		pagefault_enable();
94		return;
95	}
96
97	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
98		BUG();
99
100	/*
101	 * Protect against multiple unmaps
102	 * Can't cache flush an unmapped page.
103	 */
104	if ( kmap_atomic_maps[smp_processor_id()].map[type].vaddr ) {
105		kmap_atomic_maps[smp_processor_id()].map[type].page = (struct page *)0;
106		kmap_atomic_maps[smp_processor_id()].map[type].vaddr = (void *) 0;
107
108		flush_data_cache_page((unsigned long)vaddr);
109	}
110
111#ifdef CONFIG_DEBUG_HIGHMEM
112	/*
113	 * force other mappings to Oops if they'll try to access
114	 * this pte without first remap it
115	 */
116	pte_clear(&init_mm, vaddr, kmap_pte-idx);
117	local_flush_tlb_one(vaddr);
118#endif
119
120	pagefault_enable();
121}
122
123/*
124 * This is the same as kmap_atomic() but can map memory that doesn't
125 * have a struct page associated with it.
126 */
127void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
128{
129	enum fixed_addresses idx;
130	unsigned long vaddr;
131
132	pagefault_disable();
133
134	idx = type + KM_TYPE_NR*smp_processor_id();
135	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
136	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
137	flush_tlb_one(vaddr);
138
139	return (void*) vaddr;
140}
141
142void *kmap_atomic_pfn_prot(unsigned long pfn, enum km_type type, pgprot_t prot)
143{
144	pgprot_t old_kmap_prot = kmap_prot;
145	void * vaddr;
146
147	kmap_prot =  prot;
148	vaddr = kmap_atomic_pfn(pfn, type);
149	kmap_prot = old_kmap_prot;
150	return vaddr;
151}
152
153struct page *__kmap_atomic_to_page(void *ptr)
154{
155	unsigned long idx, vaddr = (unsigned long)ptr;
156	pte_t *pte;
157
158	if (vaddr < FIXADDR_START)
159		return virt_to_page(ptr);
160
161	idx = virt_to_fix(vaddr);
162	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
163	return pte_page(*pte);
164}
165
166EXPORT_SYMBOL(__kmap);
167EXPORT_SYMBOL(__kunmap);
168EXPORT_SYMBOL(__kmap_atomic);
169EXPORT_SYMBOL(__kunmap_atomic);
170EXPORT_SYMBOL(__kmap_atomic_to_page);
171