1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_PARISC64_HUGETLB_H
3#define _ASM_PARISC64_HUGETLB_H
4
5#include <asm/page.h>
6
7#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9		     pte_t *ptep, pte_t pte, unsigned long sz);
10
11#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
12pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
13			      pte_t *ptep);
14
15/*
16 * If the arch doesn't supply something else, assume that hugepage
17 * size aligned regions are ok without further preparation.
18 */
19#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
20static inline int prepare_hugepage_range(struct file *file,
21			unsigned long addr, unsigned long len)
22{
23	if (len & ~HPAGE_MASK)
24		return -EINVAL;
25	if (addr & ~HPAGE_MASK)
26		return -EINVAL;
27	return 0;
28}
29
30#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
31static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
32					  unsigned long addr, pte_t *ptep)
33{
34	return *ptep;
35}
36
37#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
38void huge_ptep_set_wrprotect(struct mm_struct *mm,
39					   unsigned long addr, pte_t *ptep);
40
41#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
42int huge_ptep_set_access_flags(struct vm_area_struct *vma,
43					     unsigned long addr, pte_t *ptep,
44					     pte_t pte, int dirty);
45
46#include <asm-generic/hugetlb.h>
47
48#endif /* _ASM_PARISC64_HUGETLB_H */
49