1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * NUMA memory policies for Linux. 4 * Copyright 2003,2004 Andi Kleen SuSE Labs 5 */ 6#ifndef _LINUX_MEMPOLICY_H 7#define _LINUX_MEMPOLICY_H 1 8 9#include <linux/sched.h> 10#include <linux/mmzone.h> 11#include <linux/slab.h> 12#include <linux/rbtree.h> 13#include <linux/spinlock.h> 14#include <linux/nodemask.h> 15#include <linux/pagemap.h> 16#include <uapi/linux/mempolicy.h> 17 18struct mm_struct; 19 20#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */ 21 22#ifdef CONFIG_NUMA 23 24/* 25 * Describe a memory policy. 26 * 27 * A mempolicy can be either associated with a process or with a VMA. 28 * For VMA related allocations the VMA policy is preferred, otherwise 29 * the process policy is used. Interrupts ignore the memory policy 30 * of the current process. 31 * 32 * Locking policy for interleave: 33 * In process context there is no locking because only the process accesses 34 * its own state. All vma manipulation is somewhat protected by a down_read on 35 * mmap_lock. 36 * 37 * Freeing policy: 38 * Mempolicy objects are reference counted. A mempolicy will be freed when 39 * mpol_put() decrements the reference count to zero. 40 * 41 * Duplicating policy objects: 42 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy 43 * to the new storage. The reference count of the new object is initialized 44 * to 1, representing the caller of mpol_dup(). 45 */ 46struct mempolicy { 47 atomic_t refcnt; 48 unsigned short mode; /* See MPOL_* above */ 49 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ 50 nodemask_t nodes; /* interleave/bind/perfer */ 51 int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ 52 53 union { 54 nodemask_t cpuset_mems_allowed; /* relative to these nodes */ 55 nodemask_t user_nodemask; /* nodemask passed by user */ 56 } w; 57}; 58 59/* 60 * Support for managing mempolicy data objects (clone, copy, destroy) 61 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 62 */ 63 64extern void __mpol_put(struct mempolicy *pol); 65static inline void mpol_put(struct mempolicy *pol) 66{ 67 if (pol) 68 __mpol_put(pol); 69} 70 71/* 72 * Does mempolicy pol need explicit unref after use? 73 * Currently only needed for shared policies. 74 */ 75static inline int mpol_needs_cond_ref(struct mempolicy *pol) 76{ 77 return (pol && (pol->flags & MPOL_F_SHARED)); 78} 79 80static inline void mpol_cond_put(struct mempolicy *pol) 81{ 82 if (mpol_needs_cond_ref(pol)) 83 __mpol_put(pol); 84} 85 86extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 87static inline struct mempolicy *mpol_dup(struct mempolicy *pol) 88{ 89 if (pol) 90 pol = __mpol_dup(pol); 91 return pol; 92} 93 94static inline void mpol_get(struct mempolicy *pol) 95{ 96 if (pol) 97 atomic_inc(&pol->refcnt); 98} 99 100extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); 101static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 102{ 103 if (a == b) 104 return true; 105 return __mpol_equal(a, b); 106} 107 108/* 109 * Tree of shared policies for a shared memory region. 110 */ 111struct shared_policy { 112 struct rb_root root; 113 rwlock_t lock; 114}; 115struct sp_node { 116 struct rb_node nd; 117 pgoff_t start, end; 118 struct mempolicy *policy; 119}; 120 121int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); 122void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); 123int mpol_set_shared_policy(struct shared_policy *sp, 124 struct vm_area_struct *vma, struct mempolicy *mpol); 125void mpol_free_shared_policy(struct shared_policy *sp); 126struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 127 pgoff_t idx); 128 129struct mempolicy *get_task_policy(struct task_struct *p); 130struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 131 unsigned long addr, pgoff_t *ilx); 132struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 133 unsigned long addr, int order, pgoff_t *ilx); 134bool vma_policy_mof(struct vm_area_struct *vma); 135 136extern void numa_default_policy(void); 137extern void numa_policy_init(void); 138extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); 139extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 140 141extern int huge_node(struct vm_area_struct *vma, 142 unsigned long addr, gfp_t gfp_flags, 143 struct mempolicy **mpol, nodemask_t **nodemask); 144extern bool init_nodemask_of_mempolicy(nodemask_t *mask); 145extern bool mempolicy_in_oom_domain(struct task_struct *tsk, 146 const nodemask_t *mask); 147extern unsigned int mempolicy_slab_node(void); 148 149extern enum zone_type policy_zone; 150 151static inline void check_highest_zone(enum zone_type k) 152{ 153 if (k > policy_zone && k != ZONE_MOVABLE) 154 policy_zone = k; 155} 156 157int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 158 const nodemask_t *to, int flags); 159 160 161#ifdef CONFIG_TMPFS 162extern int mpol_parse_str(char *str, struct mempolicy **mpol); 163#endif 164 165extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); 166 167/* Check if a vma is migratable */ 168extern bool vma_migratable(struct vm_area_struct *vma); 169 170int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); 171extern void mpol_put_task_policy(struct task_struct *); 172 173static inline bool mpol_is_preferred_many(struct mempolicy *pol) 174{ 175 return (pol->mode == MPOL_PREFERRED_MANY); 176} 177 178extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone); 179 180#else 181 182struct mempolicy {}; 183 184static inline struct mempolicy *get_task_policy(struct task_struct *p) 185{ 186 return NULL; 187} 188 189static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 190{ 191 return true; 192} 193 194static inline void mpol_put(struct mempolicy *pol) 195{ 196} 197 198static inline void mpol_cond_put(struct mempolicy *pol) 199{ 200} 201 202static inline void mpol_get(struct mempolicy *pol) 203{ 204} 205 206struct shared_policy {}; 207 208static inline void mpol_shared_policy_init(struct shared_policy *sp, 209 struct mempolicy *mpol) 210{ 211} 212 213static inline void mpol_free_shared_policy(struct shared_policy *sp) 214{ 215} 216 217static inline struct mempolicy * 218mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) 219{ 220 return NULL; 221} 222 223static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 224 unsigned long addr, int order, pgoff_t *ilx) 225{ 226 *ilx = 0; 227 return NULL; 228} 229 230static inline int 231vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 232{ 233 return 0; 234} 235 236static inline void numa_policy_init(void) 237{ 238} 239 240static inline void numa_default_policy(void) 241{ 242} 243 244static inline void mpol_rebind_task(struct task_struct *tsk, 245 const nodemask_t *new) 246{ 247} 248 249static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 250{ 251} 252 253static inline int huge_node(struct vm_area_struct *vma, 254 unsigned long addr, gfp_t gfp_flags, 255 struct mempolicy **mpol, nodemask_t **nodemask) 256{ 257 *mpol = NULL; 258 *nodemask = NULL; 259 return 0; 260} 261 262static inline bool init_nodemask_of_mempolicy(nodemask_t *m) 263{ 264 return false; 265} 266 267static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 268 const nodemask_t *to, int flags) 269{ 270 return 0; 271} 272 273static inline void check_highest_zone(int k) 274{ 275} 276 277#ifdef CONFIG_TMPFS 278static inline int mpol_parse_str(char *str, struct mempolicy **mpol) 279{ 280 return 1; /* error */ 281} 282#endif 283 284static inline int mpol_misplaced(struct folio *folio, 285 struct vm_area_struct *vma, 286 unsigned long address) 287{ 288 return -1; /* no node preference */ 289} 290 291static inline void mpol_put_task_policy(struct task_struct *task) 292{ 293} 294 295static inline bool mpol_is_preferred_many(struct mempolicy *pol) 296{ 297 return false; 298} 299 300#endif /* CONFIG_NUMA */ 301#endif 302