1/* 2 * linux/include/asm-arm/mmu_context.h 3 * 4 * Copyright (C) 1996 Russell King. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Changelog: 11 * 27-06-1996 RMK Created 12 */ 13#ifndef __ASM_ARM_MMU_CONTEXT_H 14#define __ASM_ARM_MMU_CONTEXT_H 15 16#include <linux/compiler.h> 17#include <asm/cacheflush.h> 18#include <asm/proc-fns.h> 19#include <asm-generic/mm_hooks.h> 20 21void __check_kvm_seq(struct mm_struct *mm); 22 23#ifdef CONFIG_CPU_HAS_ASID 24 25/* 26 * On ARMv6, we have the following structure in the Context ID: 27 * 28 * 31 7 0 29 * +-------------------------+-----------+ 30 * | process ID | ASID | 31 * +-------------------------+-----------+ 32 * | context ID | 33 * +-------------------------------------+ 34 * 35 * The ASID is used to tag entries in the CPU caches and TLBs. 36 * The context ID is used by debuggers and trace logic, and 37 * should be unique within all running processes. 38 */ 39#define ASID_BITS 8 40#define ASID_MASK ((~0) << ASID_BITS) 41#define ASID_FIRST_VERSION (1 << ASID_BITS) 42 43extern unsigned int cpu_last_asid; 44 45void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 46void __new_context(struct mm_struct *mm); 47 48static inline void check_context(struct mm_struct *mm) 49{ 50 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 51 __new_context(mm); 52 53 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 54 __check_kvm_seq(mm); 55} 56 57#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) 58 59#else 60 61static inline void check_context(struct mm_struct *mm) 62{ 63 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 64 __check_kvm_seq(mm); 65} 66 67#define init_new_context(tsk,mm) 0 68 69#endif 70 71#define destroy_context(mm) do { } while(0) 72 73/* 74 * This is called when "tsk" is about to enter lazy TLB mode. 75 * 76 * mm: describes the currently active mm context 77 * tsk: task which is entering lazy tlb 78 * cpu: cpu number which is entering lazy tlb 79 * 80 * tsk->mm will be NULL 81 */ 82static inline void 83enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 84{ 85} 86 87/* 88 * This is the actual mm switch as far as the scheduler 89 * is concerned. No registers are touched. We avoid 90 * calling the CPU specific function when the mm hasn't 91 * actually changed. 92 */ 93static inline void 94switch_mm(struct mm_struct *prev, struct mm_struct *next, 95 struct task_struct *tsk) 96{ 97#ifdef CONFIG_MMU 98 unsigned int cpu = smp_processor_id(); 99 100 if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { 101 check_context(next); 102 cpu_switch_mm(next->pgd, next); 103 if (cache_is_vivt()) 104 cpu_clear(cpu, prev->cpu_vm_mask); 105 } 106#endif 107} 108 109#define deactivate_mm(tsk,mm) do { } while (0) 110#define activate_mm(prev,next) switch_mm(prev, next, NULL) 111 112#endif 113