• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/tile/mm/
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 *
14 * This code maintains the "home" for each page in the system.
15 */
16
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/spinlock.h>
20#include <linux/list.h>
21#include <linux/bootmem.h>
22#include <linux/rmap.h>
23#include <linux/pagemap.h>
24#include <linux/mutex.h>
25#include <linux/interrupt.h>
26#include <linux/sysctl.h>
27#include <linux/pagevec.h>
28#include <linux/ptrace.h>
29#include <linux/timex.h>
30#include <linux/cache.h>
31#include <linux/smp.h>
32#include <linux/module.h>
33
34#include <asm/page.h>
35#include <asm/sections.h>
36#include <asm/tlbflush.h>
37#include <asm/pgalloc.h>
38#include <asm/homecache.h>
39
40#include "migrate.h"
41
42
43#if CHIP_HAS_COHERENT_LOCAL_CACHE()
44
45/*
46 * The noallocl2 option suppresses all use of the L2 cache to cache
47 * locally from a remote home.  There's no point in using it if we
48 * don't have coherent local caching, though.
49 */
50static int __write_once noallocl2;
51static int __init set_noallocl2(char *str)
52{
53	noallocl2 = 1;
54	return 0;
55}
56early_param("noallocl2", set_noallocl2);
57
58#else
59
60#define noallocl2 0
61
62#endif
63
64/* Provide no-op versions of these routines to keep flush_remote() cleaner. */
65#define mark_caches_evicted_start() 0
66#define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
67
68
69/*
70 * Update the irq_stat for cpus that we are going to interrupt
71 * with TLB or cache flushes.  Also handle removing dataplane cpus
72 * from the TLB flush set, and setting dataplane_tlb_state instead.
73 */
74static void hv_flush_update(const struct cpumask *cache_cpumask,
75			    struct cpumask *tlb_cpumask,
76			    unsigned long tlb_va, unsigned long tlb_length,
77			    HV_Remote_ASID *asids, int asidcount)
78{
79	struct cpumask mask;
80	int i, cpu;
81
82	cpumask_clear(&mask);
83	if (cache_cpumask)
84		cpumask_or(&mask, &mask, cache_cpumask);
85	if (tlb_cpumask && tlb_length) {
86		cpumask_or(&mask, &mask, tlb_cpumask);
87	}
88
89	for (i = 0; i < asidcount; ++i)
90		cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
91
92	/*
93	 * Don't bother to update atomically; losing a count
94	 * here is not that critical.
95	 */
96	for_each_cpu(cpu, &mask)
97		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
98}
99
100/*
101 * This wrapper function around hv_flush_remote() does several things:
102 *
103 *  - Provides a return value error-checking panic path, since
104 *    there's never any good reason for hv_flush_remote() to fail.
105 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
106 *    is the type that Linux wants to pass around anyway.
107 *  - Centralizes the mark_caches_evicted() handling.
108 *  - Canonicalizes that lengths of zero make cpumasks NULL.
109 *  - Handles deferring TLB flushes for dataplane tiles.
110 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
111 *
112 * Note that we have to wait until the cache flush completes before
113 * updating the per-cpu last_cache_flush word, since otherwise another
114 * concurrent flush can race, conclude the flush has already
115 * completed, and start to use the page while it's still dirty
116 * remotely (running concurrently with the actual evict, presumably).
117 */
118void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
119		  const struct cpumask *cache_cpumask_orig,
120		  HV_VirtAddr tlb_va, unsigned long tlb_length,
121		  unsigned long tlb_pgsize,
122		  const struct cpumask *tlb_cpumask_orig,
123		  HV_Remote_ASID *asids, int asidcount)
124{
125	int rc;
126	int timestamp = 0;  /* happy compiler */
127	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
128	struct cpumask *cache_cpumask, *tlb_cpumask;
129	HV_PhysAddr cache_pa;
130	char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
131
132	mb();   /* provided just to simplify "magic hypervisor" mode */
133
134	/*
135	 * Canonicalize and copy the cpumasks.
136	 */
137	if (cache_cpumask_orig && cache_control) {
138		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
139		cache_cpumask = &cache_cpumask_copy;
140	} else {
141		cpumask_clear(&cache_cpumask_copy);
142		cache_cpumask = NULL;
143	}
144	if (cache_cpumask == NULL)
145		cache_control = 0;
146	if (tlb_cpumask_orig && tlb_length) {
147		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
148		tlb_cpumask = &tlb_cpumask_copy;
149	} else {
150		cpumask_clear(&tlb_cpumask_copy);
151		tlb_cpumask = NULL;
152	}
153
154	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
155			asids, asidcount);
156	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
157	if (cache_control & HV_FLUSH_EVICT_L2)
158		timestamp = mark_caches_evicted_start();
159	rc = hv_flush_remote(cache_pa, cache_control,
160			     cpumask_bits(cache_cpumask),
161			     tlb_va, tlb_length, tlb_pgsize,
162			     cpumask_bits(tlb_cpumask),
163			     asids, asidcount);
164	if (cache_control & HV_FLUSH_EVICT_L2)
165		mark_caches_evicted_finish(cache_cpumask, timestamp);
166	if (rc == 0)
167		return;
168	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
169	cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
170
171	pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
172	       " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
173	       cache_pa, cache_control, cache_cpumask, cache_buf,
174	       (unsigned long)tlb_va, tlb_length, tlb_pgsize,
175	       tlb_cpumask, tlb_buf,
176	       asids, asidcount, rc);
177	panic("Unsafe to continue.");
178}
179
180void homecache_evict(const struct cpumask *mask)
181{
182	flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
183}
184
185/* Return a mask of the cpus whose caches currently own these pages. */
186static void homecache_mask(struct page *page, int pages,
187			   struct cpumask *home_mask)
188{
189	int i;
190	cpumask_clear(home_mask);
191	for (i = 0; i < pages; ++i) {
192		int home = page_home(&page[i]);
193		if (home == PAGE_HOME_IMMUTABLE ||
194		    home == PAGE_HOME_INCOHERENT) {
195			cpumask_copy(home_mask, cpu_possible_mask);
196			return;
197		}
198#if CHIP_HAS_CBOX_HOME_MAP()
199		if (home == PAGE_HOME_HASH) {
200			cpumask_or(home_mask, home_mask, &hash_for_home_map);
201			continue;
202		}
203#endif
204		if (home == PAGE_HOME_UNCACHED)
205			continue;
206		BUG_ON(home < 0 || home >= NR_CPUS);
207		cpumask_set_cpu(home, home_mask);
208	}
209}
210
211/*
212 * Return the passed length, or zero if it's long enough that we
213 * believe we should evict the whole L2 cache.
214 */
215static unsigned long cache_flush_length(unsigned long length)
216{
217	return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
218}
219
220/* On the simulator, confirm lines have been evicted everywhere. */
221static void validate_lines_evicted(unsigned long pfn, size_t length)
222{
223	sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED,
224		    (HV_PhysAddr)pfn << PAGE_SHIFT, length);
225}
226
227/* Flush a page out of whatever cache(s) it is in. */
228void homecache_flush_cache(struct page *page, int order)
229{
230	int pages = 1 << order;
231	int length = cache_flush_length(pages * PAGE_SIZE);
232	unsigned long pfn = page_to_pfn(page);
233	struct cpumask home_mask;
234
235	homecache_mask(page, pages, &home_mask);
236	flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
237	validate_lines_evicted(pfn, pages * PAGE_SIZE);
238}
239
240
241/* Report the home corresponding to a given PTE. */
242static int pte_to_home(pte_t pte)
243{
244	if (hv_pte_get_nc(pte))
245		return PAGE_HOME_IMMUTABLE;
246	switch (hv_pte_get_mode(pte)) {
247	case HV_PTE_MODE_CACHE_TILE_L3:
248		return get_remote_cache_cpu(pte);
249	case HV_PTE_MODE_CACHE_NO_L3:
250		return PAGE_HOME_INCOHERENT;
251	case HV_PTE_MODE_UNCACHED:
252		return PAGE_HOME_UNCACHED;
253#if CHIP_HAS_CBOX_HOME_MAP()
254	case HV_PTE_MODE_CACHE_HASH_L3:
255		return PAGE_HOME_HASH;
256#endif
257	}
258	panic("Bad PTE %#llx\n", pte.val);
259}
260
261/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
262pte_t pte_set_home(pte_t pte, int home)
263{
264	/* Check for non-linear file mapping "PTEs" and pass them through. */
265	if (pte_file(pte))
266		return pte;
267
268#if CHIP_HAS_MMIO()
269	/* Check for MMIO mappings and pass them through. */
270	if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
271		return pte;
272#endif
273
274
275	/*
276	 * Only immutable pages get NC mappings.  If we have a
277	 * non-coherent PTE, but the underlying page is not
278	 * immutable, it's likely the result of a forced
279	 * caching setting running up against ptrace setting
280	 * the page to be writable underneath.  In this case,
281	 * just keep the PTE coherent.
282	 */
283	if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
284		pte = hv_pte_clear_nc(pte);
285		pr_err("non-immutable page incoherently referenced: %#llx\n",
286		       pte.val);
287	}
288
289	switch (home) {
290
291	case PAGE_HOME_UNCACHED:
292		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
293		break;
294
295	case PAGE_HOME_INCOHERENT:
296		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
297		break;
298
299	case PAGE_HOME_IMMUTABLE:
300		/*
301		 * We could home this page anywhere, since it's immutable,
302		 * but by default just home it to follow "hash_default".
303		 */
304		BUG_ON(hv_pte_get_writable(pte));
305		if (pte_get_forcecache(pte)) {
306			/* Upgrade "force any cpu" to "No L3" for immutable. */
307			if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
308			    && pte_get_anyhome(pte)) {
309				pte = hv_pte_set_mode(pte,
310						      HV_PTE_MODE_CACHE_NO_L3);
311			}
312		} else
313#if CHIP_HAS_CBOX_HOME_MAP()
314		if (hash_default)
315			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
316		else
317#endif
318			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
319		pte = hv_pte_set_nc(pte);
320		break;
321
322#if CHIP_HAS_CBOX_HOME_MAP()
323	case PAGE_HOME_HASH:
324		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
325		break;
326#endif
327
328	default:
329		BUG_ON(home < 0 || home >= NR_CPUS ||
330		       !cpu_is_valid_lotar(home));
331		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
332		pte = set_remote_cache_cpu(pte, home);
333		break;
334	}
335
336#if CHIP_HAS_NC_AND_NOALLOC_BITS()
337	if (noallocl2)
338		pte = hv_pte_set_no_alloc_l2(pte);
339
340	/* Simplify "no local and no l3" to "uncached" */
341	if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
342	    hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
343		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
344	}
345#endif
346
347	/* Checking this case here gives a better panic than from the hv. */
348	BUG_ON(hv_pte_get_mode(pte) == 0);
349
350	return pte;
351}
352EXPORT_SYMBOL(pte_set_home);
353
354/*
355 * The routines in this section are the "static" versions of the normal
356 * dynamic homecaching routines; they just set the home cache
357 * of a kernel page once, and require a full-chip cache/TLB flush,
358 * so they're not suitable for anything but infrequent use.
359 */
360
361#if CHIP_HAS_CBOX_HOME_MAP()
362static inline int initial_page_home(void) { return PAGE_HOME_HASH; }
363#else
364static inline int initial_page_home(void) { return 0; }
365#endif
366
367int page_home(struct page *page)
368{
369	if (PageHighMem(page)) {
370		return initial_page_home();
371	} else {
372		unsigned long kva = (unsigned long)page_address(page);
373		return pte_to_home(*virt_to_pte(NULL, kva));
374	}
375}
376
377void homecache_change_page_home(struct page *page, int order, int home)
378{
379	int i, pages = (1 << order);
380	unsigned long kva;
381
382	BUG_ON(PageHighMem(page));
383	BUG_ON(page_count(page) > 1);
384	BUG_ON(page_mapcount(page) != 0);
385	kva = (unsigned long) page_address(page);
386	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
387		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
388		     NULL, 0);
389
390	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
391		pte_t *ptep = virt_to_pte(NULL, kva);
392		pte_t pteval = *ptep;
393		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
394		*ptep = pte_set_home(pteval, home);
395	}
396}
397
398struct page *homecache_alloc_pages(gfp_t gfp_mask,
399				   unsigned int order, int home)
400{
401	struct page *page;
402	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
403	page = alloc_pages(gfp_mask, order);
404	if (page)
405		homecache_change_page_home(page, order, home);
406	return page;
407}
408EXPORT_SYMBOL(homecache_alloc_pages);
409
410struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
411					unsigned int order, int home)
412{
413	struct page *page;
414	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
415	page = alloc_pages_node(nid, gfp_mask, order);
416	if (page)
417		homecache_change_page_home(page, order, home);
418	return page;
419}
420
421void homecache_free_pages(unsigned long addr, unsigned int order)
422{
423	struct page *page;
424
425	if (addr == 0)
426		return;
427
428	VM_BUG_ON(!virt_addr_valid((void *)addr));
429	page = virt_to_page((void *)addr);
430	if (put_page_testzero(page)) {
431		int pages = (1 << order);
432		homecache_change_page_home(page, order, initial_page_home());
433		while (pages--)
434			__free_page(page++);
435	}
436}
437