Searched refs:context (Results 251 - 275 of 2091) sorted by path

<<11121314151617181920>>

/linux-master/arch/s390/kvm/
H A Dkvm-s390.c849 kvm->mm->context.allow_gmap_hpage_1m = 1;
928 else if (kvm->mm->context.allow_gmap_hpage_1m)
2325 if (!bufsize || !kvm->mm->context.uses_cmm) {
2363 * set and the mm->context.uses_cmm flag is set.
2413 if (!kvm->mm->context.uses_cmm) {
2415 kvm->mm->context.uses_cmm = 1;
3702 * section, e.g. in irq context, we have a deadlock.
4529 (vcpu->kvm->mm->context.uses_cmm))
H A Dpriv.c1247 * Since we need to take a write lock to write to the context
1252 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1254 vcpu->kvm->mm->context.uses_cmm = 1;
1260 * while the context use_cmma flag is per process.
1261 * It's possible that the context flag is enabled and the
H A Dpv.c209 * On success, kvm->mm->context.protected_count will be decremented atomically
238 atomic_dec(&kvm->mm->context.protected_count);
394 atomic_dec(&kvm->mm->context.protected_count);
433 if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
480 atomic_dec(&kvm->mm->context.protected_count);
591 atomic_inc(&kvm->mm->context.protected_count);
596 atomic_dec(&kvm->mm->context.protected_count);
/linux-master/arch/s390/mm/
H A Dgmap.c117 spin_lock(&mm->context.lock);
118 list_add_rcu(&gmap->list, &mm->context.gmap_list);
119 if (list_is_singular(&mm->context.gmap_list))
123 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
124 spin_unlock(&mm->context.lock);
267 spin_lock(&gmap->mm->context.lock);
269 if (list_empty(&gmap->mm->context.gmap_list))
271 else if (list_is_singular(&gmap->mm->context.gmap_list))
272 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
276 WRITE_ONCE(gmap->mm->context
[all...]
H A Dinit.c158 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
H A Dpgalloc.c67 S390_lowcore.user_asce.val = mm->context.asce;
76 unsigned long asce_limit = mm->context.asce_limit;
104 VM_BUG_ON(asce_limit != mm->context.asce_limit);
110 mm->context.asce_limit = _REGION1_SIZE;
111 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
119 mm->context.asce_limit = TASK_SIZE_MAX;
120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
H A Dpgtable.c54 asce = READ_ONCE(mm->context.gmap_asce);
58 asce = asce ? : mm->context.asce;
74 asce = READ_ONCE(mm->context.gmap_asce);
78 asce = asce ? : mm->context.asce;
96 atomic_inc(&mm->context.flush_count);
102 atomic_dec(&mm->context.flush_count);
115 atomic_inc(&mm->context.flush_count);
116 if (cpumask_equal(&mm->context.cpu_attach_mask,
119 mm->context.flush_mm = 1;
122 atomic_dec(&mm->context
[all...]
/linux-master/arch/sh/include/asm/
H A Delf.h176 #define VDSO_BASE ((unsigned long)current->mm->context.vdso)
H A Dmmu_context.h20 * The MMU "context" consists of two things:
40 #define cpu_context(cpu, mm) ((mm)->context.id[cpu])
53 * Get MMU context if needed.
59 /* Check if we have old version of context. */
64 /* It's old, we need to get new context with new version. */
84 * Initialize the context related info for a new mm_struct
101 * the context for the new mm so we see the new mappings.
/linux-master/arch/sh/kernel/
H A Ddumpstack.c92 struct thread_info *context; local
95 context = (struct thread_info *)
105 context, &graph);
H A Dsignal_32.c291 } else if (likely(current->mm->context.vdso)) {
361 } else if (likely(current->mm->context.vdso)) {
/linux-master/arch/sh/kernel/vsyscall/
H A Dvsyscall.c80 current->mm->context.vdso = (void *)addr;
89 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
/linux-master/arch/sparc/include/asm/
H A Delf_64.h221 (unsigned long)current->mm->context.vdso); \
H A Dmman.h20 * automatically at the next context switch
36 if (!current->mm->context.adi) {
39 current->mm->context.adi = true;
H A Dmmu_context_64.h41 &mm->context.tsb_block[MM_TSB_BASE],
43 (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
44 &mm->context.tsb_block[MM_TSB_HUGE] :
49 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
64 /* Set MMU context in the actual hardware. */
74 : "r" (CTX_HWBITS((__mm)->context)), \
79 /* Switch the current MM context. */
89 spin_lock_irqsave(&mm->context.lock, flags);
90 ctx_valid = CTX_VALID(mm->context);
101 * perform the secondary context loa
[all...]
H A Dpgtsrmmu.h114 void srmmu_set_context(int context);
H A Dtlb_64.h19 #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
/linux-master/arch/sparc/kernel/
H A Dadi_64.c135 if (mm->context.tag_store) {
136 tag_desc = mm->context.tag_store;
137 spin_lock_irqsave(&mm->context.tag_lock, flags);
144 spin_unlock_irqrestore(&mm->context.tag_lock, flags);
174 spin_lock_irqsave(&mm->context.tag_lock, flags);
175 if (mm->context.tag_store) {
176 tag_desc = mm->context.tag_store;
205 mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
206 if (mm->context.tag_store == NULL) {
210 tag_desc = mm->context
[all...]
H A Dasm-offsets.c53 DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
H A Dsmp_64.c795 * Otherwise if we send an xcall from interrupt context it will
1062 * bits of the context register for an address space.
1066 u32 ctx = CTX_HWBITS(mm->context);
1094 u32 ctx = CTX_HWBITS(mm->context);
1113 unsigned long context = CTX_HWBITS(mm->context); local
1118 context, vaddr, 0,
1121 __flush_tlb_page(context, vaddr);
H A Dtraps_64.c2658 unsigned long context)
2686 __func__, addr, context);
2657 sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr, unsigned long context) argument
H A Dunaligned_32.c227 printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
228 (current->mm ? current->mm->context :
229 current->active_mm->context));
/linux-master/arch/sparc/mm/
H A Dfault_32.c50 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
141 * context, we must not take the fault..
H A Dfault_64.c51 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
53 CTX_HWBITS(tsk->mm->context) :
54 CTX_HWBITS(tsk->active_mm->context)));
317 * context, we must not take the fault..
468 mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
471 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
474 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
477 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
478 if (mm->context
[all...]
H A Dhugetlbpage.c348 mm->context.hugetlb_pte_count += nptes;
392 mm->context.hugetlb_pte_count -= nptes;

Completed in 302 milliseconds

<<11121314151617181920>>