Lines Matching refs:vma_lock

264  * hugetlb vma_lock helper routines
269 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
271 down_read(&vma_lock->rw_sema);
282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
284 up_read(&vma_lock->rw_sema);
295 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
297 down_write(&vma_lock->rw_sema);
308 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
310 up_write(&vma_lock->rw_sema);
322 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
324 return down_write_trylock(&vma_lock->rw_sema);
337 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
339 lockdep_assert_held(&vma_lock->rw_sema);
349 struct hugetlb_vma_lock *vma_lock = container_of(kref,
352 kfree(vma_lock);
355 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
357 struct vm_area_struct *vma = vma_lock->vma;
360 * vma_lock structure may or not be released as a result of put,
362 * Semaphore synchronizes access to vma_lock->vma field.
364 vma_lock->vma = NULL;
366 up_write(&vma_lock->rw_sema);
367 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
373 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
375 __hugetlb_vma_unlock_write_put(vma_lock);
393 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
395 down_write(&vma_lock->rw_sema);
396 __hugetlb_vma_unlock_write_put(vma_lock);
402 struct hugetlb_vma_lock *vma_lock;
412 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
413 if (!vma_lock) {
428 kref_init(&vma_lock->refs);
429 init_rwsem(&vma_lock->rw_sema);
430 vma_lock->vma = vma;
431 vma->vm_private_data = vma_lock;
1209 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1211 if (vma_lock && vma_lock->vma != vma)
5192 * vma_lock structure for sharable mappings is vma specific.
5194 * new structure. Before clearing, make sure vma_lock is not
5198 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
5200 if (vma_lock) {
5201 if (vma_lock->vma != vma) {
5205 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5829 * When the vma_lock is freed, this makes the vma ineligible
6025 * Drop hugetlb_fault_mutex and vma_lock before
6026 * unmapping. unmapping needs to hold vma_lock
6027 * in write mode. Dropping vma_lock in read mode
6169 * vma_lock and hugetlb_fault_mutex must be dropped before handling
7314 * Also, vma_lock (vm_private_data) is required for sharing.