1/* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 * Portions derived from Patrick Mochel's sysfs code. 10 * sysfs is Copyright (c) 2001-3 Patrick Mochel 11 * 12 * 2003-10-10 Written by Simon Derr. 13 * 2003-10-22 Updates by Stephen Hemminger. 14 * 2004 May-July Rework by Paul Jackson. 15 * 16 * This file is subject to the terms and conditions of the GNU General Public 17 * License. See the file COPYING in the main directory of the Linux 18 * distribution for more details. 19 */ 20 21#include <linux/cpu.h> 22#include <linux/cpumask.h> 23#include <linux/cpuset.h> 24#include <linux/err.h> 25#include <linux/errno.h> 26#include <linux/file.h> 27#include <linux/fs.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/kernel.h> 31#include <linux/kmod.h> 32#include <linux/list.h> 33#include <linux/mempolicy.h> 34#include <linux/mm.h> 35#include <linux/module.h> 36#include <linux/mount.h> 37#include <linux/namei.h> 38#include <linux/pagemap.h> 39#include <linux/proc_fs.h> 40#include <linux/rcupdate.h> 41#include <linux/sched.h> 42#include <linux/seq_file.h> 43#include <linux/security.h> 44#include <linux/slab.h> 45#include <linux/spinlock.h> 46#include <linux/stat.h> 47#include <linux/string.h> 48#include <linux/time.h> 49#include <linux/backing-dev.h> 50#include <linux/sort.h> 51 52#include <asm/uaccess.h> 53#include <asm/atomic.h> 54#include <linux/mutex.h> 55 56#define CPUSET_SUPER_MAGIC 0x27e0eb 57 58/* 59 * Tracks how many cpusets are currently defined in system. 60 * When there is only one cpuset (the root cpuset) we can 61 * short circuit some hooks. 62 */ 63int number_of_cpusets __read_mostly; 64 65/* See "Frequency meter" comments, below. */ 66 67struct fmeter { 68 int cnt; /* unprocessed events count */ 69 int val; /* most recent output value */ 70 time_t time; /* clock (secs) when val computed */ 71 spinlock_t lock; /* guards read or write of above */ 72}; 73 74struct cpuset { 75 unsigned long flags; /* "unsigned long" so bitops work */ 76 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 77 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ 78 79 /* 80 * Count is atomic so can incr (fork) or decr (exit) without a lock. 81 */ 82 atomic_t count; /* count tasks using this cpuset */ 83 84 /* 85 * We link our 'sibling' struct into our parents 'children'. 86 * Our children link their 'sibling' into our 'children'. 87 */ 88 struct list_head sibling; /* my parents children */ 89 struct list_head children; /* my children */ 90 91 struct cpuset *parent; /* my parent */ 92 struct dentry *dentry; /* cpuset fs entry */ 93 94 /* 95 * Copy of global cpuset_mems_generation as of the most 96 * recent time this cpuset changed its mems_allowed. 97 */ 98 int mems_generation; 99 100 struct fmeter fmeter; /* memory_pressure filter */ 101}; 102 103/* bits in struct cpuset flags field */ 104typedef enum { 105 CS_CPU_EXCLUSIVE, 106 CS_MEM_EXCLUSIVE, 107 CS_MEMORY_MIGRATE, 108 CS_REMOVED, 109 CS_NOTIFY_ON_RELEASE, 110 CS_SPREAD_PAGE, 111 CS_SPREAD_SLAB, 112} cpuset_flagbits_t; 113 114/* convenient tests for these bits */ 115static inline int is_cpu_exclusive(const struct cpuset *cs) 116{ 117 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 118} 119 120static inline int is_mem_exclusive(const struct cpuset *cs) 121{ 122 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 123} 124 125static inline int is_removed(const struct cpuset *cs) 126{ 127 return test_bit(CS_REMOVED, &cs->flags); 128} 129 130static inline int notify_on_release(const struct cpuset *cs) 131{ 132 return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); 133} 134 135static inline int is_memory_migrate(const struct cpuset *cs) 136{ 137 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 138} 139 140static inline int is_spread_page(const struct cpuset *cs) 141{ 142 return test_bit(CS_SPREAD_PAGE, &cs->flags); 143} 144 145static inline int is_spread_slab(const struct cpuset *cs) 146{ 147 return test_bit(CS_SPREAD_SLAB, &cs->flags); 148} 149 150/* 151 * Increment this integer everytime any cpuset changes its 152 * mems_allowed value. Users of cpusets can track this generation 153 * number, and avoid having to lock and reload mems_allowed unless 154 * the cpuset they're using changes generation. 155 * 156 * A single, global generation is needed because attach_task() could 157 * reattach a task to a different cpuset, which must not have its 158 * generation numbers aliased with those of that tasks previous cpuset. 159 * 160 * Generations are needed for mems_allowed because one task cannot 161 * modify anothers memory placement. So we must enable every task, 162 * on every visit to __alloc_pages(), to efficiently check whether 163 * its current->cpuset->mems_allowed has changed, requiring an update 164 * of its current->mems_allowed. 165 * 166 * Since cpuset_mems_generation is guarded by manage_mutex, 167 * there is no need to mark it atomic. 168 */ 169static int cpuset_mems_generation; 170 171static struct cpuset top_cpuset = { 172 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), 173 .cpus_allowed = CPU_MASK_ALL, 174 .mems_allowed = NODE_MASK_ALL, 175 .count = ATOMIC_INIT(0), 176 .sibling = LIST_HEAD_INIT(top_cpuset.sibling), 177 .children = LIST_HEAD_INIT(top_cpuset.children), 178}; 179 180static struct vfsmount *cpuset_mount; 181static struct super_block *cpuset_sb; 182 183/* 184 * We have two global cpuset mutexes below. They can nest. 185 * It is ok to first take manage_mutex, then nest callback_mutex. We also 186 * require taking task_lock() when dereferencing a tasks cpuset pointer. 187 * See "The task_lock() exception", at the end of this comment. 188 * 189 * A task must hold both mutexes to modify cpusets. If a task 190 * holds manage_mutex, then it blocks others wanting that mutex, 191 * ensuring that it is the only task able to also acquire callback_mutex 192 * and be able to modify cpusets. It can perform various checks on 193 * the cpuset structure first, knowing nothing will change. It can 194 * also allocate memory while just holding manage_mutex. While it is 195 * performing these checks, various callback routines can briefly 196 * acquire callback_mutex to query cpusets. Once it is ready to make 197 * the changes, it takes callback_mutex, blocking everyone else. 198 * 199 * Calls to the kernel memory allocator can not be made while holding 200 * callback_mutex, as that would risk double tripping on callback_mutex 201 * from one of the callbacks into the cpuset code from within 202 * __alloc_pages(). 203 * 204 * If a task is only holding callback_mutex, then it has read-only 205 * access to cpusets. 206 * 207 * The task_struct fields mems_allowed and mems_generation may only 208 * be accessed in the context of that task, so require no locks. 209 * 210 * Any task can increment and decrement the count field without lock. 211 * So in general, code holding manage_mutex or callback_mutex can't rely 212 * on the count field not changing. However, if the count goes to 213 * zero, then only attach_task(), which holds both mutexes, can 214 * increment it again. Because a count of zero means that no tasks 215 * are currently attached, therefore there is no way a task attached 216 * to that cpuset can fork (the other way to increment the count). 217 * So code holding manage_mutex or callback_mutex can safely assume that 218 * if the count is zero, it will stay zero. Similarly, if a task 219 * holds manage_mutex or callback_mutex on a cpuset with zero count, it 220 * knows that the cpuset won't be removed, as cpuset_rmdir() needs 221 * both of those mutexes. 222 * 223 * The cpuset_common_file_write handler for operations that modify 224 * the cpuset hierarchy holds manage_mutex across the entire operation, 225 * single threading all such cpuset modifications across the system. 226 * 227 * The cpuset_common_file_read() handlers only hold callback_mutex across 228 * small pieces of code, such as when reading out possibly multi-word 229 * cpumasks and nodemasks. 230 * 231 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't 232 * (usually) take either mutex. These are the two most performance 233 * critical pieces of code here. The exception occurs on cpuset_exit(), 234 * when a task in a notify_on_release cpuset exits. Then manage_mutex 235 * is taken, and if the cpuset count is zero, a usermode call made 236 * to /sbin/cpuset_release_agent with the name of the cpuset (path 237 * relative to the root of cpuset file system) as the argument. 238 * 239 * A cpuset can only be deleted if both its 'count' of using tasks 240 * is zero, and its list of 'children' cpusets is empty. Since all 241 * tasks in the system use _some_ cpuset, and since there is always at 242 * least one task in the system (init), therefore, top_cpuset 243 * always has either children cpusets and/or using tasks. So we don't 244 * need a special hack to ensure that top_cpuset cannot be deleted. 245 * 246 * The above "Tale of Two Semaphores" would be complete, but for: 247 * 248 * The task_lock() exception 249 * 250 * The need for this exception arises from the action of attach_task(), 251 * which overwrites one tasks cpuset pointer with another. It does 252 * so using both mutexes, however there are several performance 253 * critical places that need to reference task->cpuset without the 254 * expense of grabbing a system global mutex. Therefore except as 255 * noted below, when dereferencing or, as in attach_task(), modifying 256 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock 257 * (task->alloc_lock) already in the task_struct routinely used for 258 * such matters. 259 * 260 * P.S. One more locking exception. RCU is used to guard the 261 * update of a tasks cpuset pointer by attach_task() and the 262 * access of task->cpuset->mems_generation via that pointer in 263 * the routine cpuset_update_task_memory_state(). 264 */ 265 266static DEFINE_MUTEX(manage_mutex); 267static DEFINE_MUTEX(callback_mutex); 268 269/* 270 * A couple of forward declarations required, due to cyclic reference loop: 271 * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file 272 * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. 273 */ 274 275static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); 276static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); 277 278static struct backing_dev_info cpuset_backing_dev_info = { 279 .ra_pages = 0, /* No readahead */ 280 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 281}; 282 283static struct inode *cpuset_new_inode(mode_t mode) 284{ 285 struct inode *inode = new_inode(cpuset_sb); 286 287 if (inode) { 288 inode->i_mode = mode; 289 inode->i_uid = current->fsuid; 290 inode->i_gid = current->fsgid; 291 inode->i_blocks = 0; 292 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 293 inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; 294 } 295 return inode; 296} 297 298static void cpuset_diput(struct dentry *dentry, struct inode *inode) 299{ 300 /* is dentry a directory ? if so, kfree() associated cpuset */ 301 if (S_ISDIR(inode->i_mode)) { 302 struct cpuset *cs = dentry->d_fsdata; 303 BUG_ON(!(is_removed(cs))); 304 kfree(cs); 305 } 306 iput(inode); 307} 308 309static struct dentry_operations cpuset_dops = { 310 .d_iput = cpuset_diput, 311}; 312 313static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) 314{ 315 struct dentry *d = lookup_one_len(name, parent, strlen(name)); 316 if (!IS_ERR(d)) 317 d->d_op = &cpuset_dops; 318 return d; 319} 320 321static void remove_dir(struct dentry *d) 322{ 323 struct dentry *parent = dget(d->d_parent); 324 325 d_delete(d); 326 simple_rmdir(parent->d_inode, d); 327 dput(parent); 328} 329 330/* 331 * NOTE : the dentry must have been dget()'ed 332 */ 333static void cpuset_d_remove_dir(struct dentry *dentry) 334{ 335 struct list_head *node; 336 337 spin_lock(&dcache_lock); 338 node = dentry->d_subdirs.next; 339 while (node != &dentry->d_subdirs) { 340 struct dentry *d = list_entry(node, struct dentry, d_u.d_child); 341 list_del_init(node); 342 if (d->d_inode) { 343 d = dget_locked(d); 344 spin_unlock(&dcache_lock); 345 d_delete(d); 346 simple_unlink(dentry->d_inode, d); 347 dput(d); 348 spin_lock(&dcache_lock); 349 } 350 node = dentry->d_subdirs.next; 351 } 352 list_del_init(&dentry->d_u.d_child); 353 spin_unlock(&dcache_lock); 354 remove_dir(dentry); 355} 356 357static struct super_operations cpuset_ops = { 358 .statfs = simple_statfs, 359 .drop_inode = generic_delete_inode, 360}; 361 362static int cpuset_fill_super(struct super_block *sb, void *unused_data, 363 int unused_silent) 364{ 365 struct inode *inode; 366 struct dentry *root; 367 368 sb->s_blocksize = PAGE_CACHE_SIZE; 369 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 370 sb->s_magic = CPUSET_SUPER_MAGIC; 371 sb->s_op = &cpuset_ops; 372 cpuset_sb = sb; 373 374 inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); 375 if (inode) { 376 inode->i_op = &simple_dir_inode_operations; 377 inode->i_fop = &simple_dir_operations; 378 /* directories start off with i_nlink == 2 (for "." entry) */ 379 inc_nlink(inode); 380 } else { 381 return -ENOMEM; 382 } 383 384 root = d_alloc_root(inode); 385 if (!root) { 386 iput(inode); 387 return -ENOMEM; 388 } 389 sb->s_root = root; 390 return 0; 391} 392 393static int cpuset_get_sb(struct file_system_type *fs_type, 394 int flags, const char *unused_dev_name, 395 void *data, struct vfsmount *mnt) 396{ 397 return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt); 398} 399 400static struct file_system_type cpuset_fs_type = { 401 .name = "cpuset", 402 .get_sb = cpuset_get_sb, 403 .kill_sb = kill_litter_super, 404}; 405 406/* struct cftype: 407 * 408 * The files in the cpuset filesystem mostly have a very simple read/write 409 * handling, some common function will take care of it. Nevertheless some cases 410 * (read tasks) are special and therefore I define this structure for every 411 * kind of file. 412 * 413 * 414 * When reading/writing to a file: 415 * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata 416 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata 417 */ 418 419struct cftype { 420 char *name; 421 int private; 422 int (*open) (struct inode *inode, struct file *file); 423 ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, 424 loff_t *ppos); 425 int (*write) (struct file *file, const char __user *buf, size_t nbytes, 426 loff_t *ppos); 427 int (*release) (struct inode *inode, struct file *file); 428}; 429 430static inline struct cpuset *__d_cs(struct dentry *dentry) 431{ 432 return dentry->d_fsdata; 433} 434 435static inline struct cftype *__d_cft(struct dentry *dentry) 436{ 437 return dentry->d_fsdata; 438} 439 440/* 441 * Call with manage_mutex held. Writes path of cpuset into buf. 442 * Returns 0 on success, -errno on error. 443 */ 444 445static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) 446{ 447 char *start; 448 449 start = buf + buflen; 450 451 *--start = '\0'; 452 for (;;) { 453 int len = cs->dentry->d_name.len; 454 if ((start -= len) < buf) 455 return -ENAMETOOLONG; 456 memcpy(start, cs->dentry->d_name.name, len); 457 cs = cs->parent; 458 if (!cs) 459 break; 460 if (!cs->parent) 461 continue; 462 if (--start < buf) 463 return -ENAMETOOLONG; 464 *start = '/'; 465 } 466 memmove(buf, start, buf + buflen - start); 467 return 0; 468} 469 470/* 471 * Notify userspace when a cpuset is released, by running 472 * /sbin/cpuset_release_agent with the name of the cpuset (path 473 * relative to the root of cpuset file system) as the argument. 474 * 475 * Most likely, this user command will try to rmdir this cpuset. 476 * 477 * This races with the possibility that some other task will be 478 * attached to this cpuset before it is removed, or that some other 479 * user task will 'mkdir' a child cpuset of this cpuset. That's ok. 480 * The presumed 'rmdir' will fail quietly if this cpuset is no longer 481 * unused, and this cpuset will be reprieved from its death sentence, 482 * to continue to serve a useful existence. Next time it's released, 483 * we will get notified again, if it still has 'notify_on_release' set. 484 * 485 * The final arg to call_usermodehelper() is 0, which means don't 486 * wait. The separate /sbin/cpuset_release_agent task is forked by 487 * call_usermodehelper(), then control in this thread returns here, 488 * without waiting for the release agent task. We don't bother to 489 * wait because the caller of this routine has no use for the exit 490 * status of the /sbin/cpuset_release_agent task, so no sense holding 491 * our caller up for that. 492 * 493 * When we had only one cpuset mutex, we had to call this 494 * without holding it, to avoid deadlock when call_usermodehelper() 495 * allocated memory. With two locks, we could now call this while 496 * holding manage_mutex, but we still don't, so as to minimize 497 * the time manage_mutex is held. 498 */ 499 500static void cpuset_release_agent(const char *pathbuf) 501{ 502 char *argv[3], *envp[3]; 503 int i; 504 505 if (!pathbuf) 506 return; 507 508 i = 0; 509 argv[i++] = "/sbin/cpuset_release_agent"; 510 argv[i++] = (char *)pathbuf; 511 argv[i] = NULL; 512 513 i = 0; 514 /* minimal command environment */ 515 envp[i++] = "HOME=/"; 516 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 517 envp[i] = NULL; 518 519 call_usermodehelper(argv[0], argv, envp, 0); 520 kfree(pathbuf); 521} 522 523/* 524 * Either cs->count of using tasks transitioned to zero, or the 525 * cs->children list of child cpusets just became empty. If this 526 * cs is notify_on_release() and now both the user count is zero and 527 * the list of children is empty, prepare cpuset path in a kmalloc'd 528 * buffer, to be returned via ppathbuf, so that the caller can invoke 529 * cpuset_release_agent() with it later on, once manage_mutex is dropped. 530 * Call here with manage_mutex held. 531 * 532 * This check_for_release() routine is responsible for kmalloc'ing 533 * pathbuf. The above cpuset_release_agent() is responsible for 534 * kfree'ing pathbuf. The caller of these routines is responsible 535 * for providing a pathbuf pointer, initialized to NULL, then 536 * calling check_for_release() with manage_mutex held and the address 537 * of the pathbuf pointer, then dropping manage_mutex, then calling 538 * cpuset_release_agent() with pathbuf, as set by check_for_release(). 539 */ 540 541static void check_for_release(struct cpuset *cs, char **ppathbuf) 542{ 543 if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && 544 list_empty(&cs->children)) { 545 char *buf; 546 547 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 548 if (!buf) 549 return; 550 if (cpuset_path(cs, buf, PAGE_SIZE) < 0) 551 kfree(buf); 552 else 553 *ppathbuf = buf; 554 } 555} 556 557/* 558 * Return in *pmask the portion of a cpusets's cpus_allowed that 559 * are online. If none are online, walk up the cpuset hierarchy 560 * until we find one that does have some online cpus. If we get 561 * all the way to the top and still haven't found any online cpus, 562 * return cpu_online_map. Or if passed a NULL cs from an exit'ing 563 * task, return cpu_online_map. 564 * 565 * One way or another, we guarantee to return some non-empty subset 566 * of cpu_online_map. 567 * 568 * Call with callback_mutex held. 569 */ 570 571static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 572{ 573 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) 574 cs = cs->parent; 575 if (cs) 576 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); 577 else 578 *pmask = cpu_online_map; 579 BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); 580} 581 582/* 583 * Return in *pmask the portion of a cpusets's mems_allowed that 584 * are online. If none are online, walk up the cpuset hierarchy 585 * until we find one that does have some online mems. If we get 586 * all the way to the top and still haven't found any online mems, 587 * return node_online_map. 588 * 589 * One way or another, we guarantee to return some non-empty subset 590 * of node_online_map. 591 * 592 * Call with callback_mutex held. 593 */ 594 595static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) 596{ 597 while (cs && !nodes_intersects(cs->mems_allowed, node_online_map)) 598 cs = cs->parent; 599 if (cs) 600 nodes_and(*pmask, cs->mems_allowed, node_online_map); 601 else 602 *pmask = node_online_map; 603 BUG_ON(!nodes_intersects(*pmask, node_online_map)); 604} 605 606/** 607 * cpuset_update_task_memory_state - update task memory placement 608 * 609 * If the current tasks cpusets mems_allowed changed behind our 610 * backs, update current->mems_allowed, mems_generation and task NUMA 611 * mempolicy to the new value. 612 * 613 * Task mempolicy is updated by rebinding it relative to the 614 * current->cpuset if a task has its memory placement changed. 615 * Do not call this routine if in_interrupt(). 616 * 617 * Call without callback_mutex or task_lock() held. May be 618 * called with or without manage_mutex held. Thanks in part to 619 * 'the_top_cpuset_hack', the tasks cpuset pointer will never 620 * be NULL. This routine also might acquire callback_mutex and 621 * current->mm->mmap_sem during call. 622 * 623 * Reading current->cpuset->mems_generation doesn't need task_lock 624 * to guard the current->cpuset derefence, because it is guarded 625 * from concurrent freeing of current->cpuset by attach_task(), 626 * using RCU. 627 * 628 * The rcu_dereference() is technically probably not needed, 629 * as I don't actually mind if I see a new cpuset pointer but 630 * an old value of mems_generation. However this really only 631 * matters on alpha systems using cpusets heavily. If I dropped 632 * that rcu_dereference(), it would save them a memory barrier. 633 * For all other arch's, rcu_dereference is a no-op anyway, and for 634 * alpha systems not using cpusets, another planned optimization, 635 * avoiding the rcu critical section for tasks in the root cpuset 636 * which is statically allocated, so can't vanish, will make this 637 * irrelevant. Better to use RCU as intended, than to engage in 638 * some cute trick to save a memory barrier that is impossible to 639 * test, for alpha systems using cpusets heavily, which might not 640 * even exist. 641 * 642 * This routine is needed to update the per-task mems_allowed data, 643 * within the tasks context, when it is trying to allocate memory 644 * (in various mm/mempolicy.c routines) and notices that some other 645 * task has been modifying its cpuset. 646 */ 647 648void cpuset_update_task_memory_state(void) 649{ 650 int my_cpusets_mem_gen; 651 struct task_struct *tsk = current; 652 struct cpuset *cs; 653 654 if (tsk->cpuset == &top_cpuset) { 655 /* Don't need rcu for top_cpuset. It's never freed. */ 656 my_cpusets_mem_gen = top_cpuset.mems_generation; 657 } else { 658 rcu_read_lock(); 659 cs = rcu_dereference(tsk->cpuset); 660 my_cpusets_mem_gen = cs->mems_generation; 661 rcu_read_unlock(); 662 } 663 664 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { 665 mutex_lock(&callback_mutex); 666 task_lock(tsk); 667 cs = tsk->cpuset; /* Maybe changed when task not locked */ 668 guarantee_online_mems(cs, &tsk->mems_allowed); 669 tsk->cpuset_mems_generation = cs->mems_generation; 670 if (is_spread_page(cs)) 671 tsk->flags |= PF_SPREAD_PAGE; 672 else 673 tsk->flags &= ~PF_SPREAD_PAGE; 674 if (is_spread_slab(cs)) 675 tsk->flags |= PF_SPREAD_SLAB; 676 else 677 tsk->flags &= ~PF_SPREAD_SLAB; 678 task_unlock(tsk); 679 mutex_unlock(&callback_mutex); 680 mpol_rebind_task(tsk, &tsk->mems_allowed); 681 } 682} 683 684/* 685 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 686 * 687 * One cpuset is a subset of another if all its allowed CPUs and 688 * Memory Nodes are a subset of the other, and its exclusive flags 689 * are only set if the other's are set. Call holding manage_mutex. 690 */ 691 692static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 693{ 694 return cpus_subset(p->cpus_allowed, q->cpus_allowed) && 695 nodes_subset(p->mems_allowed, q->mems_allowed) && 696 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 697 is_mem_exclusive(p) <= is_mem_exclusive(q); 698} 699 700/* 701 * validate_change() - Used to validate that any proposed cpuset change 702 * follows the structural rules for cpusets. 703 * 704 * If we replaced the flag and mask values of the current cpuset 705 * (cur) with those values in the trial cpuset (trial), would 706 * our various subset and exclusive rules still be valid? Presumes 707 * manage_mutex held. 708 * 709 * 'cur' is the address of an actual, in-use cpuset. Operations 710 * such as list traversal that depend on the actual address of the 711 * cpuset in the list must use cur below, not trial. 712 * 713 * 'trial' is the address of bulk structure copy of cur, with 714 * perhaps one or more of the fields cpus_allowed, mems_allowed, 715 * or flags changed to new, trial values. 716 * 717 * Return 0 if valid, -errno if not. 718 */ 719 720static int validate_change(const struct cpuset *cur, const struct cpuset *trial) 721{ 722 struct cpuset *c, *par; 723 724 /* Each of our child cpusets must be a subset of us */ 725 list_for_each_entry(c, &cur->children, sibling) { 726 if (!is_cpuset_subset(c, trial)) 727 return -EBUSY; 728 } 729 730 /* Remaining checks don't apply to root cpuset */ 731 if (cur == &top_cpuset) 732 return 0; 733 734 par = cur->parent; 735 736 /* We must be a subset of our parent cpuset */ 737 if (!is_cpuset_subset(trial, par)) 738 return -EACCES; 739 740 /* If either I or some sibling (!= me) is exclusive, we can't overlap */ 741 list_for_each_entry(c, &par->children, sibling) { 742 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 743 c != cur && 744 cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) 745 return -EINVAL; 746 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 747 c != cur && 748 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 749 return -EINVAL; 750 } 751 752 return 0; 753} 754 755/* 756 * For a given cpuset cur, partition the system as follows 757 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any 758 * exclusive child cpusets 759 * b. All cpus in the current cpuset's cpus_allowed that are not part of any 760 * exclusive child cpusets 761 * Build these two partitions by calling partition_sched_domains 762 * 763 * Call with manage_mutex held. May nest a call to the 764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 765 * Must not be called holding callback_mutex, because we must 766 * not call lock_cpu_hotplug() while holding callback_mutex. 767 */ 768 769static void update_cpu_domains(struct cpuset *cur) 770{ 771 struct cpuset *c, *par = cur->parent; 772 cpumask_t pspan, cspan; 773 774 if (par == NULL || cpus_empty(cur->cpus_allowed)) 775 return; 776 777 /* 778 * Get all cpus from parent's cpus_allowed not part of exclusive 779 * children 780 */ 781 pspan = par->cpus_allowed; 782 list_for_each_entry(c, &par->children, sibling) { 783 if (is_cpu_exclusive(c)) 784 cpus_andnot(pspan, pspan, c->cpus_allowed); 785 } 786 if (!is_cpu_exclusive(cur)) { 787 cpus_or(pspan, pspan, cur->cpus_allowed); 788 if (cpus_equal(pspan, cur->cpus_allowed)) 789 return; 790 cspan = CPU_MASK_NONE; 791 } else { 792 if (cpus_empty(pspan)) 793 return; 794 cspan = cur->cpus_allowed; 795 /* 796 * Get all cpus from current cpuset's cpus_allowed not part 797 * of exclusive children 798 */ 799 list_for_each_entry(c, &cur->children, sibling) { 800 if (is_cpu_exclusive(c)) 801 cpus_andnot(cspan, cspan, c->cpus_allowed); 802 } 803 } 804 805 lock_cpu_hotplug(); 806 partition_sched_domains(&pspan, &cspan); 807 unlock_cpu_hotplug(); 808} 809 810/* 811 * Call with manage_mutex held. May take callback_mutex during call. 812 */ 813 814static int update_cpumask(struct cpuset *cs, char *buf) 815{ 816 struct cpuset trialcs; 817 int retval, cpus_unchanged; 818 819 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ 820 if (cs == &top_cpuset) 821 return -EACCES; 822 823 trialcs = *cs; 824 825 /* 826 * We allow a cpuset's cpus_allowed to be empty; if it has attached 827 * tasks, we'll catch it later when we validate the change and return 828 * -ENOSPC. 829 */ 830 if (!buf[0] || (buf[0] == '\n' && !buf[1])) { 831 cpus_clear(trialcs.cpus_allowed); 832 } else { 833 retval = cpulist_parse(buf, trialcs.cpus_allowed); 834 if (retval < 0) 835 return retval; 836 } 837 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); 838 /* cpus_allowed cannot be empty for a cpuset with attached tasks. */ 839 if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed)) 840 return -ENOSPC; 841 retval = validate_change(cs, &trialcs); 842 if (retval < 0) 843 return retval; 844 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); 845 mutex_lock(&callback_mutex); 846 cs->cpus_allowed = trialcs.cpus_allowed; 847 mutex_unlock(&callback_mutex); 848 if (is_cpu_exclusive(cs) && !cpus_unchanged) 849 update_cpu_domains(cs); 850 return 0; 851} 852 853/* 854 * cpuset_migrate_mm 855 * 856 * Migrate memory region from one set of nodes to another. 857 * 858 * Temporarilly set tasks mems_allowed to target nodes of migration, 859 * so that the migration code can allocate pages on these nodes. 860 * 861 * Call holding manage_mutex, so our current->cpuset won't change 862 * during this call, as manage_mutex holds off any attach_task() 863 * calls. Therefore we don't need to take task_lock around the 864 * call to guarantee_online_mems(), as we know no one is changing 865 * our tasks cpuset. 866 * 867 * Hold callback_mutex around the two modifications of our tasks 868 * mems_allowed to synchronize with cpuset_mems_allowed(). 869 * 870 * While the mm_struct we are migrating is typically from some 871 * other task, the task_struct mems_allowed that we are hacking 872 * is for our current task, which must allocate new pages for that 873 * migrating memory region. 874 * 875 * We call cpuset_update_task_memory_state() before hacking 876 * our tasks mems_allowed, so that we are assured of being in 877 * sync with our tasks cpuset, and in particular, callbacks to 878 * cpuset_update_task_memory_state() from nested page allocations 879 * won't see any mismatch of our cpuset and task mems_generation 880 * values, so won't overwrite our hacked tasks mems_allowed 881 * nodemask. 882 */ 883 884static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 885 const nodemask_t *to) 886{ 887 struct task_struct *tsk = current; 888 889 cpuset_update_task_memory_state(); 890 891 mutex_lock(&callback_mutex); 892 tsk->mems_allowed = *to; 893 mutex_unlock(&callback_mutex); 894 895 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 896 897 mutex_lock(&callback_mutex); 898 guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed); 899 mutex_unlock(&callback_mutex); 900} 901 902/* 903 * Handle user request to change the 'mems' memory placement 904 * of a cpuset. Needs to validate the request, update the 905 * cpusets mems_allowed and mems_generation, and for each 906 * task in the cpuset, rebind any vma mempolicies and if 907 * the cpuset is marked 'memory_migrate', migrate the tasks 908 * pages to the new memory. 909 * 910 * Call with manage_mutex held. May take callback_mutex during call. 911 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 912 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 913 * their mempolicies to the cpusets new mems_allowed. 914 */ 915 916static int update_nodemask(struct cpuset *cs, char *buf) 917{ 918 struct cpuset trialcs; 919 nodemask_t oldmem; 920 struct task_struct *g, *p; 921 struct mm_struct **mmarray; 922 int i, n, ntasks; 923 int migrate; 924 int fudge; 925 int retval; 926 927 /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */ 928 if (cs == &top_cpuset) 929 return -EACCES; 930 931 trialcs = *cs; 932 933 /* 934 * We allow a cpuset's mems_allowed to be empty; if it has attached 935 * tasks, we'll catch it later when we validate the change and return 936 * -ENOSPC. 937 */ 938 if (!buf[0] || (buf[0] == '\n' && !buf[1])) { 939 nodes_clear(trialcs.mems_allowed); 940 } else { 941 retval = nodelist_parse(buf, trialcs.mems_allowed); 942 if (retval < 0) 943 goto done; 944 } 945 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map); 946 oldmem = cs->mems_allowed; 947 if (nodes_equal(oldmem, trialcs.mems_allowed)) { 948 retval = 0; /* Too easy - nothing to do */ 949 goto done; 950 } 951 /* mems_allowed cannot be empty for a cpuset with attached tasks. */ 952 if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) { 953 retval = -ENOSPC; 954 goto done; 955 } 956 retval = validate_change(cs, &trialcs); 957 if (retval < 0) 958 goto done; 959 960 mutex_lock(&callback_mutex); 961 cs->mems_allowed = trialcs.mems_allowed; 962 cs->mems_generation = cpuset_mems_generation++; 963 mutex_unlock(&callback_mutex); 964 965 set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ 966 967 fudge = 10; /* spare mmarray[] slots */ 968 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ 969 retval = -ENOMEM; 970 971 /* 972 * Allocate mmarray[] to hold mm reference for each task 973 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding 974 * tasklist_lock. We could use GFP_ATOMIC, but with a 975 * few more lines of code, we can retry until we get a big 976 * enough mmarray[] w/o using GFP_ATOMIC. 977 */ 978 while (1) { 979 ntasks = atomic_read(&cs->count); /* guess */ 980 ntasks += fudge; 981 mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); 982 if (!mmarray) 983 goto done; 984 write_lock_irq(&tasklist_lock); /* block fork */ 985 if (atomic_read(&cs->count) <= ntasks) 986 break; /* got enough */ 987 write_unlock_irq(&tasklist_lock); /* try again */ 988 kfree(mmarray); 989 } 990 991 n = 0; 992 993 /* Load up mmarray[] with mm reference for each task in cpuset. */ 994 do_each_thread(g, p) { 995 struct mm_struct *mm; 996 997 if (n >= ntasks) { 998 printk(KERN_WARNING 999 "Cpuset mempolicy rebind incomplete.\n"); 1000 continue; 1001 } 1002 if (p->cpuset != cs) 1003 continue; 1004 mm = get_task_mm(p); 1005 if (!mm) 1006 continue; 1007 mmarray[n++] = mm; 1008 } while_each_thread(g, p); 1009 write_unlock_irq(&tasklist_lock); 1010 1011 /* 1012 * Now that we've dropped the tasklist spinlock, we can 1013 * rebind the vma mempolicies of each mm in mmarray[] to their 1014 * new cpuset, and release that mm. The mpol_rebind_mm() 1015 * call takes mmap_sem, which we couldn't take while holding 1016 * tasklist_lock. Forks can happen again now - the mpol_copy() 1017 * cpuset_being_rebound check will catch such forks, and rebind 1018 * their vma mempolicies too. Because we still hold the global 1019 * cpuset manage_mutex, we know that no other rebind effort will 1020 * be contending for the global variable cpuset_being_rebound. 1021 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1022 * is idempotent. Also migrate pages in each mm to new nodes. 1023 */ 1024 migrate = is_memory_migrate(cs); 1025 for (i = 0; i < n; i++) { 1026 struct mm_struct *mm = mmarray[i]; 1027 1028 mpol_rebind_mm(mm, &cs->mems_allowed); 1029 if (migrate) 1030 cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); 1031 mmput(mm); 1032 } 1033 1034 /* We're done rebinding vma's to this cpusets new mems_allowed. */ 1035 kfree(mmarray); 1036 set_cpuset_being_rebound(NULL); 1037 retval = 0; 1038done: 1039 return retval; 1040} 1041 1042/* 1043 * Call with manage_mutex held. 1044 */ 1045 1046static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) 1047{ 1048 if (simple_strtoul(buf, NULL, 10) != 0) 1049 cpuset_memory_pressure_enabled = 1; 1050 else 1051 cpuset_memory_pressure_enabled = 0; 1052 return 0; 1053} 1054 1055/* 1056 * update_flag - read a 0 or a 1 in a file and update associated flag 1057 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, 1058 * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, 1059 * CS_SPREAD_PAGE, CS_SPREAD_SLAB) 1060 * cs: the cpuset to update 1061 * buf: the buffer where we read the 0 or 1 1062 * 1063 * Call with manage_mutex held. 1064 */ 1065 1066static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) 1067{ 1068 int turning_on; 1069 struct cpuset trialcs; 1070 int err, cpu_exclusive_changed; 1071 1072 turning_on = (simple_strtoul(buf, NULL, 10) != 0); 1073 1074 trialcs = *cs; 1075 if (turning_on) 1076 set_bit(bit, &trialcs.flags); 1077 else 1078 clear_bit(bit, &trialcs.flags); 1079 1080 err = validate_change(cs, &trialcs); 1081 if (err < 0) 1082 return err; 1083 cpu_exclusive_changed = 1084 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); 1085 mutex_lock(&callback_mutex); 1086 cs->flags = trialcs.flags; 1087 mutex_unlock(&callback_mutex); 1088 1089 if (cpu_exclusive_changed) 1090 update_cpu_domains(cs); 1091 return 0; 1092} 1093 1094/* 1095 * Frequency meter - How fast is some event occurring? 1096 * 1097 * These routines manage a digitally filtered, constant time based, 1098 * event frequency meter. There are four routines: 1099 * fmeter_init() - initialize a frequency meter. 1100 * fmeter_markevent() - called each time the event happens. 1101 * fmeter_getrate() - returns the recent rate of such events. 1102 * fmeter_update() - internal routine used to update fmeter. 1103 * 1104 * A common data structure is passed to each of these routines, 1105 * which is used to keep track of the state required to manage the 1106 * frequency meter and its digital filter. 1107 * 1108 * The filter works on the number of events marked per unit time. 1109 * The filter is single-pole low-pass recursive (IIR). The time unit 1110 * is 1 second. Arithmetic is done using 32-bit integers scaled to 1111 * simulate 3 decimal digits of precision (multiplied by 1000). 1112 * 1113 * With an FM_COEF of 933, and a time base of 1 second, the filter 1114 * has a half-life of 10 seconds, meaning that if the events quit 1115 * happening, then the rate returned from the fmeter_getrate() 1116 * will be cut in half each 10 seconds, until it converges to zero. 1117 * 1118 * It is not worth doing a real infinitely recursive filter. If more 1119 * than FM_MAXTICKS ticks have elapsed since the last filter event, 1120 * just compute FM_MAXTICKS ticks worth, by which point the level 1121 * will be stable. 1122 * 1123 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 1124 * arithmetic overflow in the fmeter_update() routine. 1125 * 1126 * Given the simple 32 bit integer arithmetic used, this meter works 1127 * best for reporting rates between one per millisecond (msec) and 1128 * one per 32 (approx) seconds. At constant rates faster than one 1129 * per msec it maxes out at values just under 1,000,000. At constant 1130 * rates between one per msec, and one per second it will stabilize 1131 * to a value N*1000, where N is the rate of events per second. 1132 * At constant rates between one per second and one per 32 seconds, 1133 * it will be choppy, moving up on the seconds that have an event, 1134 * and then decaying until the next event. At rates slower than 1135 * about one in 32 seconds, it decays all the way back to zero between 1136 * each event. 1137 */ 1138 1139#define FM_COEF 933 /* coefficient for half-life of 10 secs */ 1140#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ 1141#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 1142#define FM_SCALE 1000 /* faux fixed point scale */ 1143 1144/* Initialize a frequency meter */ 1145static void fmeter_init(struct fmeter *fmp) 1146{ 1147 fmp->cnt = 0; 1148 fmp->val = 0; 1149 fmp->time = 0; 1150 spin_lock_init(&fmp->lock); 1151} 1152 1153/* Internal meter update - process cnt events and update value */ 1154static void fmeter_update(struct fmeter *fmp) 1155{ 1156 time_t now = get_seconds(); 1157 time_t ticks = now - fmp->time; 1158 1159 if (ticks == 0) 1160 return; 1161 1162 ticks = min(FM_MAXTICKS, ticks); 1163 while (ticks-- > 0) 1164 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 1165 fmp->time = now; 1166 1167 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 1168 fmp->cnt = 0; 1169} 1170 1171/* Process any previous ticks, then bump cnt by one (times scale). */ 1172static void fmeter_markevent(struct fmeter *fmp) 1173{ 1174 spin_lock(&fmp->lock); 1175 fmeter_update(fmp); 1176 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 1177 spin_unlock(&fmp->lock); 1178} 1179 1180/* Process any previous ticks, then return current value. */ 1181static int fmeter_getrate(struct fmeter *fmp) 1182{ 1183 int val; 1184 1185 spin_lock(&fmp->lock); 1186 fmeter_update(fmp); 1187 val = fmp->val; 1188 spin_unlock(&fmp->lock); 1189 return val; 1190} 1191 1192/* 1193 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly 1194 * writing the path of the old cpuset in 'ppathbuf' if it needs to be 1195 * notified on release. 1196 * 1197 * Call holding manage_mutex. May take callback_mutex and task_lock of 1198 * the task 'pid' during call. 1199 */ 1200 1201static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) 1202{ 1203 pid_t pid; 1204 struct task_struct *tsk; 1205 struct cpuset *oldcs; 1206 cpumask_t cpus; 1207 nodemask_t from, to; 1208 struct mm_struct *mm; 1209 int retval; 1210 1211 if (sscanf(pidbuf, "%d", &pid) != 1) 1212 return -EIO; 1213 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1214 return -ENOSPC; 1215 1216 if (pid) { 1217 read_lock(&tasklist_lock); 1218 1219 tsk = find_task_by_pid(pid); 1220 if (!tsk || tsk->flags & PF_EXITING) { 1221 read_unlock(&tasklist_lock); 1222 return -ESRCH; 1223 } 1224 1225 get_task_struct(tsk); 1226 read_unlock(&tasklist_lock); 1227 1228 if ((current->euid) && (current->euid != tsk->uid) 1229 && (current->euid != tsk->suid)) { 1230 put_task_struct(tsk); 1231 return -EACCES; 1232 } 1233 } else { 1234 tsk = current; 1235 get_task_struct(tsk); 1236 } 1237 1238 retval = security_task_setscheduler(tsk, 0, NULL); 1239 if (retval) { 1240 put_task_struct(tsk); 1241 return retval; 1242 } 1243 1244 mutex_lock(&callback_mutex); 1245 1246 task_lock(tsk); 1247 oldcs = tsk->cpuset; 1248 /* 1249 * After getting 'oldcs' cpuset ptr, be sure still not exiting. 1250 * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack 1251 * then fail this attach_task(), to avoid breaking top_cpuset.count. 1252 */ 1253 if (tsk->flags & PF_EXITING) { 1254 task_unlock(tsk); 1255 mutex_unlock(&callback_mutex); 1256 put_task_struct(tsk); 1257 return -ESRCH; 1258 } 1259 atomic_inc(&cs->count); 1260 rcu_assign_pointer(tsk->cpuset, cs); 1261 task_unlock(tsk); 1262 1263 guarantee_online_cpus(cs, &cpus); 1264 set_cpus_allowed(tsk, cpus); 1265 1266 from = oldcs->mems_allowed; 1267 to = cs->mems_allowed; 1268 1269 mutex_unlock(&callback_mutex); 1270 1271 mm = get_task_mm(tsk); 1272 if (mm) { 1273 mpol_rebind_mm(mm, &to); 1274 if (is_memory_migrate(cs)) 1275 cpuset_migrate_mm(mm, &from, &to); 1276 mmput(mm); 1277 } 1278 1279 put_task_struct(tsk); 1280 synchronize_rcu(); 1281 if (atomic_dec_and_test(&oldcs->count)) 1282 check_for_release(oldcs, ppathbuf); 1283 return 0; 1284} 1285 1286/* The various types of files and directories in a cpuset file system */ 1287 1288typedef enum { 1289 FILE_ROOT, 1290 FILE_DIR, 1291 FILE_MEMORY_MIGRATE, 1292 FILE_CPULIST, 1293 FILE_MEMLIST, 1294 FILE_CPU_EXCLUSIVE, 1295 FILE_MEM_EXCLUSIVE, 1296 FILE_NOTIFY_ON_RELEASE, 1297 FILE_MEMORY_PRESSURE_ENABLED, 1298 FILE_MEMORY_PRESSURE, 1299 FILE_SPREAD_PAGE, 1300 FILE_SPREAD_SLAB, 1301 FILE_TASKLIST, 1302} cpuset_filetype_t; 1303 1304static ssize_t cpuset_common_file_write(struct file *file, 1305 const char __user *userbuf, 1306 size_t nbytes, loff_t *unused_ppos) 1307{ 1308 struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); 1309 struct cftype *cft = __d_cft(file->f_path.dentry); 1310 cpuset_filetype_t type = cft->private; 1311 char *buffer; 1312 char *pathbuf = NULL; 1313 int retval = 0; 1314 1315 /* Crude upper limit on largest legitimate cpulist user might write. */ 1316 if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES)) 1317 return -E2BIG; 1318 1319 /* +1 for nul-terminator */ 1320 if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0) 1321 return -ENOMEM; 1322 1323 if (copy_from_user(buffer, userbuf, nbytes)) { 1324 retval = -EFAULT; 1325 goto out1; 1326 } 1327 buffer[nbytes] = 0; /* nul-terminate */ 1328 1329 mutex_lock(&manage_mutex); 1330 1331 if (is_removed(cs)) { 1332 retval = -ENODEV; 1333 goto out2; 1334 } 1335 1336 switch (type) { 1337 case FILE_CPULIST: 1338 retval = update_cpumask(cs, buffer); 1339 break; 1340 case FILE_MEMLIST: 1341 retval = update_nodemask(cs, buffer); 1342 break; 1343 case FILE_CPU_EXCLUSIVE: 1344 retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); 1345 break; 1346 case FILE_MEM_EXCLUSIVE: 1347 retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); 1348 break; 1349 case FILE_NOTIFY_ON_RELEASE: 1350 retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); 1351 break; 1352 case FILE_MEMORY_MIGRATE: 1353 retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); 1354 break; 1355 case FILE_MEMORY_PRESSURE_ENABLED: 1356 retval = update_memory_pressure_enabled(cs, buffer); 1357 break; 1358 case FILE_MEMORY_PRESSURE: 1359 retval = -EACCES; 1360 break; 1361 case FILE_SPREAD_PAGE: 1362 retval = update_flag(CS_SPREAD_PAGE, cs, buffer); 1363 cs->mems_generation = cpuset_mems_generation++; 1364 break; 1365 case FILE_SPREAD_SLAB: 1366 retval = update_flag(CS_SPREAD_SLAB, cs, buffer); 1367 cs->mems_generation = cpuset_mems_generation++; 1368 break; 1369 case FILE_TASKLIST: 1370 retval = attach_task(cs, buffer, &pathbuf); 1371 break; 1372 default: 1373 retval = -EINVAL; 1374 goto out2; 1375 } 1376 1377 if (retval == 0) 1378 retval = nbytes; 1379out2: 1380 mutex_unlock(&manage_mutex); 1381 cpuset_release_agent(pathbuf); 1382out1: 1383 kfree(buffer); 1384 return retval; 1385} 1386 1387static ssize_t cpuset_file_write(struct file *file, const char __user *buf, 1388 size_t nbytes, loff_t *ppos) 1389{ 1390 ssize_t retval = 0; 1391 struct cftype *cft = __d_cft(file->f_path.dentry); 1392 if (!cft) 1393 return -ENODEV; 1394 1395 /* special function ? */ 1396 if (cft->write) 1397 retval = cft->write(file, buf, nbytes, ppos); 1398 else 1399 retval = cpuset_common_file_write(file, buf, nbytes, ppos); 1400 1401 return retval; 1402} 1403 1404/* 1405 * These ascii lists should be read in a single call, by using a user 1406 * buffer large enough to hold the entire map. If read in smaller 1407 * chunks, there is no guarantee of atomicity. Since the display format 1408 * used, list of ranges of sequential numbers, is variable length, 1409 * and since these maps can change value dynamically, one could read 1410 * gibberish by doing partial reads while a list was changing. 1411 * A single large read to a buffer that crosses a page boundary is 1412 * ok, because the result being copied to user land is not recomputed 1413 * across a page fault. 1414 */ 1415 1416static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) 1417{ 1418 cpumask_t mask; 1419 1420 mutex_lock(&callback_mutex); 1421 mask = cs->cpus_allowed; 1422 mutex_unlock(&callback_mutex); 1423 1424 return cpulist_scnprintf(page, PAGE_SIZE, mask); 1425} 1426 1427static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) 1428{ 1429 nodemask_t mask; 1430 1431 mutex_lock(&callback_mutex); 1432 mask = cs->mems_allowed; 1433 mutex_unlock(&callback_mutex); 1434 1435 return nodelist_scnprintf(page, PAGE_SIZE, mask); 1436} 1437 1438static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, 1439 size_t nbytes, loff_t *ppos) 1440{ 1441 struct cftype *cft = __d_cft(file->f_path.dentry); 1442 struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); 1443 cpuset_filetype_t type = cft->private; 1444 char *page; 1445 ssize_t retval = 0; 1446 char *s; 1447 1448 if (!(page = (char *)__get_free_page(GFP_KERNEL))) 1449 return -ENOMEM; 1450 1451 s = page; 1452 1453 switch (type) { 1454 case FILE_CPULIST: 1455 s += cpuset_sprintf_cpulist(s, cs); 1456 break; 1457 case FILE_MEMLIST: 1458 s += cpuset_sprintf_memlist(s, cs); 1459 break; 1460 case FILE_CPU_EXCLUSIVE: 1461 *s++ = is_cpu_exclusive(cs) ? '1' : '0'; 1462 break; 1463 case FILE_MEM_EXCLUSIVE: 1464 *s++ = is_mem_exclusive(cs) ? '1' : '0'; 1465 break; 1466 case FILE_NOTIFY_ON_RELEASE: 1467 *s++ = notify_on_release(cs) ? '1' : '0'; 1468 break; 1469 case FILE_MEMORY_MIGRATE: 1470 *s++ = is_memory_migrate(cs) ? '1' : '0'; 1471 break; 1472 case FILE_MEMORY_PRESSURE_ENABLED: 1473 *s++ = cpuset_memory_pressure_enabled ? '1' : '0'; 1474 break; 1475 case FILE_MEMORY_PRESSURE: 1476 s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); 1477 break; 1478 case FILE_SPREAD_PAGE: 1479 *s++ = is_spread_page(cs) ? '1' : '0'; 1480 break; 1481 case FILE_SPREAD_SLAB: 1482 *s++ = is_spread_slab(cs) ? '1' : '0'; 1483 break; 1484 default: 1485 retval = -EINVAL; 1486 goto out; 1487 } 1488 *s++ = '\n'; 1489 1490 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); 1491out: 1492 free_page((unsigned long)page); 1493 return retval; 1494} 1495 1496static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, 1497 loff_t *ppos) 1498{ 1499 ssize_t retval = 0; 1500 struct cftype *cft = __d_cft(file->f_path.dentry); 1501 if (!cft) 1502 return -ENODEV; 1503 1504 /* special function ? */ 1505 if (cft->read) 1506 retval = cft->read(file, buf, nbytes, ppos); 1507 else 1508 retval = cpuset_common_file_read(file, buf, nbytes, ppos); 1509 1510 return retval; 1511} 1512 1513static int cpuset_file_open(struct inode *inode, struct file *file) 1514{ 1515 int err; 1516 struct cftype *cft; 1517 1518 err = generic_file_open(inode, file); 1519 if (err) 1520 return err; 1521 1522 cft = __d_cft(file->f_path.dentry); 1523 if (!cft) 1524 return -ENODEV; 1525 if (cft->open) 1526 err = cft->open(inode, file); 1527 else 1528 err = 0; 1529 1530 return err; 1531} 1532 1533static int cpuset_file_release(struct inode *inode, struct file *file) 1534{ 1535 struct cftype *cft = __d_cft(file->f_path.dentry); 1536 if (cft->release) 1537 return cft->release(inode, file); 1538 return 0; 1539} 1540 1541/* 1542 * cpuset_rename - Only allow simple rename of directories in place. 1543 */ 1544static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, 1545 struct inode *new_dir, struct dentry *new_dentry) 1546{ 1547 if (!S_ISDIR(old_dentry->d_inode->i_mode)) 1548 return -ENOTDIR; 1549 if (new_dentry->d_inode) 1550 return -EEXIST; 1551 if (old_dir != new_dir) 1552 return -EIO; 1553 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1554} 1555 1556static const struct file_operations cpuset_file_operations = { 1557 .read = cpuset_file_read, 1558 .write = cpuset_file_write, 1559 .llseek = generic_file_llseek, 1560 .open = cpuset_file_open, 1561 .release = cpuset_file_release, 1562}; 1563 1564static const struct inode_operations cpuset_dir_inode_operations = { 1565 .lookup = simple_lookup, 1566 .mkdir = cpuset_mkdir, 1567 .rmdir = cpuset_rmdir, 1568 .rename = cpuset_rename, 1569}; 1570 1571static int cpuset_create_file(struct dentry *dentry, int mode) 1572{ 1573 struct inode *inode; 1574 1575 if (!dentry) 1576 return -ENOENT; 1577 if (dentry->d_inode) 1578 return -EEXIST; 1579 1580 inode = cpuset_new_inode(mode); 1581 if (!inode) 1582 return -ENOMEM; 1583 1584 if (S_ISDIR(mode)) { 1585 inode->i_op = &cpuset_dir_inode_operations; 1586 inode->i_fop = &simple_dir_operations; 1587 1588 /* start off with i_nlink == 2 (for "." entry) */ 1589 inc_nlink(inode); 1590 } else if (S_ISREG(mode)) { 1591 inode->i_size = 0; 1592 inode->i_fop = &cpuset_file_operations; 1593 } 1594 1595 d_instantiate(dentry, inode); 1596 dget(dentry); /* Extra count - pin the dentry in core */ 1597 return 0; 1598} 1599 1600/* 1601 * cpuset_create_dir - create a directory for an object. 1602 * cs: the cpuset we create the directory for. 1603 * It must have a valid ->parent field 1604 * And we are going to fill its ->dentry field. 1605 * name: The name to give to the cpuset directory. Will be copied. 1606 * mode: mode to set on new directory. 1607 */ 1608 1609static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) 1610{ 1611 struct dentry *dentry = NULL; 1612 struct dentry *parent; 1613 int error = 0; 1614 1615 parent = cs->parent->dentry; 1616 dentry = cpuset_get_dentry(parent, name); 1617 if (IS_ERR(dentry)) 1618 return PTR_ERR(dentry); 1619 error = cpuset_create_file(dentry, S_IFDIR | mode); 1620 if (!error) { 1621 dentry->d_fsdata = cs; 1622 inc_nlink(parent->d_inode); 1623 cs->dentry = dentry; 1624 } 1625 dput(dentry); 1626 1627 return error; 1628} 1629 1630static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) 1631{ 1632 struct dentry *dentry; 1633 int error; 1634 1635 mutex_lock(&dir->d_inode->i_mutex); 1636 dentry = cpuset_get_dentry(dir, cft->name); 1637 if (!IS_ERR(dentry)) { 1638 error = cpuset_create_file(dentry, 0644 | S_IFREG); 1639 if (!error) 1640 dentry->d_fsdata = (void *)cft; 1641 dput(dentry); 1642 } else 1643 error = PTR_ERR(dentry); 1644 mutex_unlock(&dir->d_inode->i_mutex); 1645 return error; 1646} 1647 1648/* 1649 * Stuff for reading the 'tasks' file. 1650 * 1651 * Reading this file can return large amounts of data if a cpuset has 1652 * *lots* of attached tasks. So it may need several calls to read(), 1653 * but we cannot guarantee that the information we produce is correct 1654 * unless we produce it entirely atomically. 1655 * 1656 * Upon tasks file open(), a struct ctr_struct is allocated, that 1657 * will have a pointer to an array (also allocated here). The struct 1658 * ctr_struct * is stored in file->private_data. Its resources will 1659 * be freed by release() when the file is closed. The array is used 1660 * to sprintf the PIDs and then used by read(). 1661 */ 1662 1663/* cpusets_tasks_read array */ 1664 1665struct ctr_struct { 1666 char *buf; 1667 int bufsz; 1668}; 1669 1670/* 1671 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. 1672 * Return actual number of pids loaded. No need to task_lock(p) 1673 * when reading out p->cpuset, as we don't really care if it changes 1674 * on the next cycle, and we are not going to try to dereference it. 1675 */ 1676static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) 1677{ 1678 int n = 0; 1679 struct task_struct *g, *p; 1680 1681 read_lock(&tasklist_lock); 1682 1683 do_each_thread(g, p) { 1684 if (p->cpuset == cs) { 1685 if (unlikely(n == npids)) 1686 goto array_full; 1687 pidarray[n++] = p->pid; 1688 } 1689 } while_each_thread(g, p); 1690 1691array_full: 1692 read_unlock(&tasklist_lock); 1693 return n; 1694} 1695 1696static int cmppid(const void *a, const void *b) 1697{ 1698 return *(pid_t *)a - *(pid_t *)b; 1699} 1700 1701/* 1702 * Convert array 'a' of 'npids' pid_t's to a string of newline separated 1703 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return 1704 * count 'cnt' of how many chars would be written if buf were large enough. 1705 */ 1706static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) 1707{ 1708 int cnt = 0; 1709 int i; 1710 1711 for (i = 0; i < npids; i++) 1712 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); 1713 return cnt; 1714} 1715 1716/* 1717 * Handle an open on 'tasks' file. Prepare a buffer listing the 1718 * process id's of tasks currently attached to the cpuset being opened. 1719 * 1720 * Does not require any specific cpuset mutexes, and does not take any. 1721 */ 1722static int cpuset_tasks_open(struct inode *unused, struct file *file) 1723{ 1724 struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); 1725 struct ctr_struct *ctr; 1726 pid_t *pidarray; 1727 int npids; 1728 char c; 1729 1730 if (!(file->f_mode & FMODE_READ)) 1731 return 0; 1732 1733 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); 1734 if (!ctr) 1735 goto err0; 1736 1737 /* 1738 * If cpuset gets more users after we read count, we won't have 1739 * enough space - tough. This race is indistinguishable to the 1740 * caller from the case that the additional cpuset users didn't 1741 * show up until sometime later on. 1742 */ 1743 npids = atomic_read(&cs->count); 1744 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); 1745 if (!pidarray) 1746 goto err1; 1747 1748 npids = pid_array_load(pidarray, npids, cs); 1749 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); 1750 1751 /* Call pid_array_to_buf() twice, first just to get bufsz */ 1752 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; 1753 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); 1754 if (!ctr->buf) 1755 goto err2; 1756 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); 1757 1758 kfree(pidarray); 1759 file->private_data = ctr; 1760 return 0; 1761 1762err2: 1763 kfree(pidarray); 1764err1: 1765 kfree(ctr); 1766err0: 1767 return -ENOMEM; 1768} 1769 1770static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, 1771 size_t nbytes, loff_t *ppos) 1772{ 1773 struct ctr_struct *ctr = file->private_data; 1774 1775 return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); 1776} 1777 1778static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) 1779{ 1780 struct ctr_struct *ctr; 1781 1782 if (file->f_mode & FMODE_READ) { 1783 ctr = file->private_data; 1784 kfree(ctr->buf); 1785 kfree(ctr); 1786 } 1787 return 0; 1788} 1789 1790/* 1791 * for the common functions, 'private' gives the type of file 1792 */ 1793 1794static struct cftype cft_tasks = { 1795 .name = "tasks", 1796 .open = cpuset_tasks_open, 1797 .read = cpuset_tasks_read, 1798 .release = cpuset_tasks_release, 1799 .private = FILE_TASKLIST, 1800}; 1801 1802static struct cftype cft_cpus = { 1803 .name = "cpus", 1804 .private = FILE_CPULIST, 1805}; 1806 1807static struct cftype cft_mems = { 1808 .name = "mems", 1809 .private = FILE_MEMLIST, 1810}; 1811 1812static struct cftype cft_cpu_exclusive = { 1813 .name = "cpu_exclusive", 1814 .private = FILE_CPU_EXCLUSIVE, 1815}; 1816 1817static struct cftype cft_mem_exclusive = { 1818 .name = "mem_exclusive", 1819 .private = FILE_MEM_EXCLUSIVE, 1820}; 1821 1822static struct cftype cft_notify_on_release = { 1823 .name = "notify_on_release", 1824 .private = FILE_NOTIFY_ON_RELEASE, 1825}; 1826 1827static struct cftype cft_memory_migrate = { 1828 .name = "memory_migrate", 1829 .private = FILE_MEMORY_MIGRATE, 1830}; 1831 1832static struct cftype cft_memory_pressure_enabled = { 1833 .name = "memory_pressure_enabled", 1834 .private = FILE_MEMORY_PRESSURE_ENABLED, 1835}; 1836 1837static struct cftype cft_memory_pressure = { 1838 .name = "memory_pressure", 1839 .private = FILE_MEMORY_PRESSURE, 1840}; 1841 1842static struct cftype cft_spread_page = { 1843 .name = "memory_spread_page", 1844 .private = FILE_SPREAD_PAGE, 1845}; 1846 1847static struct cftype cft_spread_slab = { 1848 .name = "memory_spread_slab", 1849 .private = FILE_SPREAD_SLAB, 1850}; 1851 1852static int cpuset_populate_dir(struct dentry *cs_dentry) 1853{ 1854 int err; 1855 1856 if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) 1857 return err; 1858 if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) 1859 return err; 1860 if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) 1861 return err; 1862 if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) 1863 return err; 1864 if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) 1865 return err; 1866 if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0) 1867 return err; 1868 if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0) 1869 return err; 1870 if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0) 1871 return err; 1872 if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0) 1873 return err; 1874 if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) 1875 return err; 1876 return 0; 1877} 1878 1879/* 1880 * cpuset_create - create a cpuset 1881 * parent: cpuset that will be parent of the new cpuset. 1882 * name: name of the new cpuset. Will be strcpy'ed. 1883 * mode: mode to set on new inode 1884 * 1885 * Must be called with the mutex on the parent inode held 1886 */ 1887 1888static long cpuset_create(struct cpuset *parent, const char *name, int mode) 1889{ 1890 struct cpuset *cs; 1891 int err; 1892 1893 cs = kmalloc(sizeof(*cs), GFP_KERNEL); 1894 if (!cs) 1895 return -ENOMEM; 1896 1897 mutex_lock(&manage_mutex); 1898 cpuset_update_task_memory_state(); 1899 cs->flags = 0; 1900 if (notify_on_release(parent)) 1901 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); 1902 if (is_spread_page(parent)) 1903 set_bit(CS_SPREAD_PAGE, &cs->flags); 1904 if (is_spread_slab(parent)) 1905 set_bit(CS_SPREAD_SLAB, &cs->flags); 1906 cs->cpus_allowed = CPU_MASK_NONE; 1907 cs->mems_allowed = NODE_MASK_NONE; 1908 atomic_set(&cs->count, 0); 1909 INIT_LIST_HEAD(&cs->sibling); 1910 INIT_LIST_HEAD(&cs->children); 1911 cs->mems_generation = cpuset_mems_generation++; 1912 fmeter_init(&cs->fmeter); 1913 1914 cs->parent = parent; 1915 1916 mutex_lock(&callback_mutex); 1917 list_add(&cs->sibling, &cs->parent->children); 1918 number_of_cpusets++; 1919 mutex_unlock(&callback_mutex); 1920 1921 err = cpuset_create_dir(cs, name, mode); 1922 if (err < 0) 1923 goto err; 1924 1925 /* 1926 * Release manage_mutex before cpuset_populate_dir() because it 1927 * will down() this new directory's i_mutex and if we race with 1928 * another mkdir, we might deadlock. 1929 */ 1930 mutex_unlock(&manage_mutex); 1931 1932 err = cpuset_populate_dir(cs->dentry); 1933 /* If err < 0, we have a half-filled directory - oh well ;) */ 1934 return 0; 1935err: 1936 list_del(&cs->sibling); 1937 mutex_unlock(&manage_mutex); 1938 kfree(cs); 1939 return err; 1940} 1941 1942static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1943{ 1944 struct cpuset *c_parent = dentry->d_parent->d_fsdata; 1945 1946 /* the vfs holds inode->i_mutex already */ 1947 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1948} 1949 1950/* 1951 * Locking note on the strange update_flag() call below: 1952 * 1953 * If the cpuset being removed is marked cpu_exclusive, then simulate 1954 * turning cpu_exclusive off, which will call update_cpu_domains(). 1955 * The lock_cpu_hotplug() call in update_cpu_domains() must not be 1956 * made while holding callback_mutex. Elsewhere the kernel nests 1957 * callback_mutex inside lock_cpu_hotplug() calls. So the reverse 1958 * nesting would risk an ABBA deadlock. 1959 */ 1960 1961static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1962{ 1963 struct cpuset *cs = dentry->d_fsdata; 1964 struct dentry *d; 1965 struct cpuset *parent; 1966 char *pathbuf = NULL; 1967 1968 /* the vfs holds both inode->i_mutex already */ 1969 1970 mutex_lock(&manage_mutex); 1971 cpuset_update_task_memory_state(); 1972 if (atomic_read(&cs->count) > 0) { 1973 mutex_unlock(&manage_mutex); 1974 return -EBUSY; 1975 } 1976 if (!list_empty(&cs->children)) { 1977 mutex_unlock(&manage_mutex); 1978 return -EBUSY; 1979 } 1980 if (is_cpu_exclusive(cs)) { 1981 int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0"); 1982 if (retval < 0) { 1983 mutex_unlock(&manage_mutex); 1984 return retval; 1985 } 1986 } 1987 parent = cs->parent; 1988 mutex_lock(&callback_mutex); 1989 set_bit(CS_REMOVED, &cs->flags); 1990 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1991 spin_lock(&cs->dentry->d_lock); 1992 d = dget(cs->dentry); 1993 cs->dentry = NULL; 1994 spin_unlock(&d->d_lock); 1995 cpuset_d_remove_dir(d); 1996 dput(d); 1997 number_of_cpusets--; 1998 mutex_unlock(&callback_mutex); 1999 if (list_empty(&parent->children)) 2000 check_for_release(parent, &pathbuf); 2001 mutex_unlock(&manage_mutex); 2002 cpuset_release_agent(pathbuf); 2003 return 0; 2004} 2005 2006/* 2007 * cpuset_init_early - just enough so that the calls to 2008 * cpuset_update_task_memory_state() in early init code 2009 * are harmless. 2010 */ 2011 2012int __init cpuset_init_early(void) 2013{ 2014 struct task_struct *tsk = current; 2015 2016 tsk->cpuset = &top_cpuset; 2017 tsk->cpuset->mems_generation = cpuset_mems_generation++; 2018 return 0; 2019} 2020 2021/** 2022 * cpuset_init - initialize cpusets at system boot 2023 * 2024 * Description: Initialize top_cpuset and the cpuset internal file system, 2025 **/ 2026 2027int __init cpuset_init(void) 2028{ 2029 struct dentry *root; 2030 int err; 2031 2032 top_cpuset.cpus_allowed = CPU_MASK_ALL; 2033 top_cpuset.mems_allowed = NODE_MASK_ALL; 2034 2035 fmeter_init(&top_cpuset.fmeter); 2036 top_cpuset.mems_generation = cpuset_mems_generation++; 2037 2038 init_task.cpuset = &top_cpuset; 2039 2040 err = register_filesystem(&cpuset_fs_type); 2041 if (err < 0) 2042 goto out; 2043 cpuset_mount = kern_mount(&cpuset_fs_type); 2044 if (IS_ERR(cpuset_mount)) { 2045 printk(KERN_ERR "cpuset: could not mount!\n"); 2046 err = PTR_ERR(cpuset_mount); 2047 cpuset_mount = NULL; 2048 goto out; 2049 } 2050 root = cpuset_mount->mnt_sb->s_root; 2051 root->d_fsdata = &top_cpuset; 2052 inc_nlink(root->d_inode); 2053 top_cpuset.dentry = root; 2054 root->d_inode->i_op = &cpuset_dir_inode_operations; 2055 number_of_cpusets = 1; 2056 err = cpuset_populate_dir(root); 2057 /* memory_pressure_enabled is in root cpuset only */ 2058 if (err == 0) 2059 err = cpuset_add_file(root, &cft_memory_pressure_enabled); 2060out: 2061 return err; 2062} 2063 2064/* 2065 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs 2066 * or memory nodes, we need to walk over the cpuset hierarchy, 2067 * removing that CPU or node from all cpusets. If this removes the 2068 * last CPU or node from a cpuset, then the guarantee_online_cpus() 2069 * or guarantee_online_mems() code will use that emptied cpusets 2070 * parent online CPUs or nodes. Cpusets that were already empty of 2071 * CPUs or nodes are left empty. 2072 * 2073 * This routine is intentionally inefficient in a couple of regards. 2074 * It will check all cpusets in a subtree even if the top cpuset of 2075 * the subtree has no offline CPUs or nodes. It checks both CPUs and 2076 * nodes, even though the caller could have been coded to know that 2077 * only one of CPUs or nodes needed to be checked on a given call. 2078 * This was done to minimize text size rather than cpu cycles. 2079 * 2080 * Call with both manage_mutex and callback_mutex held. 2081 * 2082 * Recursive, on depth of cpuset subtree. 2083 */ 2084 2085static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) 2086{ 2087 struct cpuset *c; 2088 2089 /* Each of our child cpusets mems must be online */ 2090 list_for_each_entry(c, &cur->children, sibling) { 2091 guarantee_online_cpus_mems_in_subtree(c); 2092 if (!cpus_empty(c->cpus_allowed)) 2093 guarantee_online_cpus(c, &c->cpus_allowed); 2094 if (!nodes_empty(c->mems_allowed)) 2095 guarantee_online_mems(c, &c->mems_allowed); 2096 } 2097} 2098 2099/* 2100 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track 2101 * cpu_online_map and node_online_map. Force the top cpuset to track 2102 * whats online after any CPU or memory node hotplug or unplug event. 2103 * 2104 * To ensure that we don't remove a CPU or node from the top cpuset 2105 * that is currently in use by a child cpuset (which would violate 2106 * the rule that cpusets must be subsets of their parent), we first 2107 * call the recursive routine guarantee_online_cpus_mems_in_subtree(). 2108 * 2109 * Since there are two callers of this routine, one for CPU hotplug 2110 * events and one for memory node hotplug events, we could have coded 2111 * two separate routines here. We code it as a single common routine 2112 * in order to minimize text size. 2113 */ 2114 2115static void common_cpu_mem_hotplug_unplug(void) 2116{ 2117 mutex_lock(&manage_mutex); 2118 mutex_lock(&callback_mutex); 2119 2120 guarantee_online_cpus_mems_in_subtree(&top_cpuset); 2121 top_cpuset.cpus_allowed = cpu_online_map; 2122 top_cpuset.mems_allowed = node_online_map; 2123 2124 mutex_unlock(&callback_mutex); 2125 mutex_unlock(&manage_mutex); 2126} 2127 2128/* 2129 * The top_cpuset tracks what CPUs and Memory Nodes are online, 2130 * period. This is necessary in order to make cpusets transparent 2131 * (of no affect) on systems that are actively using CPU hotplug 2132 * but making no active use of cpusets. 2133 * 2134 * This routine ensures that top_cpuset.cpus_allowed tracks 2135 * cpu_online_map on each CPU hotplug (cpuhp) event. 2136 */ 2137 2138static int cpuset_handle_cpuhp(struct notifier_block *nb, 2139 unsigned long phase, void *cpu) 2140{ 2141 common_cpu_mem_hotplug_unplug(); 2142 return 0; 2143} 2144 2145#ifdef CONFIG_MEMORY_HOTPLUG 2146/* 2147 * Keep top_cpuset.mems_allowed tracking node_online_map. 2148 * Call this routine anytime after you change node_online_map. 2149 * See also the previous routine cpuset_handle_cpuhp(). 2150 */ 2151 2152void cpuset_track_online_nodes(void) 2153{ 2154 common_cpu_mem_hotplug_unplug(); 2155} 2156#endif 2157 2158/** 2159 * cpuset_init_smp - initialize cpus_allowed 2160 * 2161 * Description: Finish top cpuset after cpu, node maps are initialized 2162 **/ 2163 2164void __init cpuset_init_smp(void) 2165{ 2166 top_cpuset.cpus_allowed = cpu_online_map; 2167 top_cpuset.mems_allowed = node_online_map; 2168 2169 hotcpu_notifier(cpuset_handle_cpuhp, 0); 2170} 2171 2172/** 2173 * cpuset_fork - attach newly forked task to its parents cpuset. 2174 * @tsk: pointer to task_struct of forking parent process. 2175 * 2176 * Description: A task inherits its parent's cpuset at fork(). 2177 * 2178 * A pointer to the shared cpuset was automatically copied in fork.c 2179 * by dup_task_struct(). However, we ignore that copy, since it was 2180 * not made under the protection of task_lock(), so might no longer be 2181 * a valid cpuset pointer. attach_task() might have already changed 2182 * current->cpuset, allowing the previously referenced cpuset to 2183 * be removed and freed. Instead, we task_lock(current) and copy 2184 * its present value of current->cpuset for our freshly forked child. 2185 * 2186 * At the point that cpuset_fork() is called, 'current' is the parent 2187 * task, and the passed argument 'child' points to the child task. 2188 **/ 2189 2190void cpuset_fork(struct task_struct *child) 2191{ 2192 task_lock(current); 2193 child->cpuset = current->cpuset; 2194 atomic_inc(&child->cpuset->count); 2195 task_unlock(current); 2196} 2197 2198/** 2199 * cpuset_exit - detach cpuset from exiting task 2200 * @tsk: pointer to task_struct of exiting process 2201 * 2202 * Description: Detach cpuset from @tsk and release it. 2203 * 2204 * Note that cpusets marked notify_on_release force every task in 2205 * them to take the global manage_mutex mutex when exiting. 2206 * This could impact scaling on very large systems. Be reluctant to 2207 * use notify_on_release cpusets where very high task exit scaling 2208 * is required on large systems. 2209 * 2210 * Don't even think about derefencing 'cs' after the cpuset use count 2211 * goes to zero, except inside a critical section guarded by manage_mutex 2212 * or callback_mutex. Otherwise a zero cpuset use count is a license to 2213 * any other task to nuke the cpuset immediately, via cpuset_rmdir(). 2214 * 2215 * This routine has to take manage_mutex, not callback_mutex, because 2216 * it is holding that mutex while calling check_for_release(), 2217 * which calls kmalloc(), so can't be called holding callback_mutex(). 2218 * 2219 * the_top_cpuset_hack: 2220 * 2221 * Set the exiting tasks cpuset to the root cpuset (top_cpuset). 2222 * 2223 * Don't leave a task unable to allocate memory, as that is an 2224 * accident waiting to happen should someone add a callout in 2225 * do_exit() after the cpuset_exit() call that might allocate. 2226 * If a task tries to allocate memory with an invalid cpuset, 2227 * it will oops in cpuset_update_task_memory_state(). 2228 * 2229 * We call cpuset_exit() while the task is still competent to 2230 * handle notify_on_release(), then leave the task attached to 2231 * the root cpuset (top_cpuset) for the remainder of its exit. 2232 * 2233 * To do this properly, we would increment the reference count on 2234 * top_cpuset, and near the very end of the kernel/exit.c do_exit() 2235 * code we would add a second cpuset function call, to drop that 2236 * reference. This would just create an unnecessary hot spot on 2237 * the top_cpuset reference count, to no avail. 2238 * 2239 * Normally, holding a reference to a cpuset without bumping its 2240 * count is unsafe. The cpuset could go away, or someone could 2241 * attach us to a different cpuset, decrementing the count on 2242 * the first cpuset that we never incremented. But in this case, 2243 * top_cpuset isn't going away, and either task has PF_EXITING set, 2244 * which wards off any attach_task() attempts, or task is a failed 2245 * fork, never visible to attach_task. 2246 * 2247 * Another way to do this would be to set the cpuset pointer 2248 * to NULL here, and check in cpuset_update_task_memory_state() 2249 * for a NULL pointer. This hack avoids that NULL check, for no 2250 * cost (other than this way too long comment ;). 2251 **/ 2252 2253void cpuset_exit(struct task_struct *tsk) 2254{ 2255 struct cpuset *cs; 2256 2257 task_lock(current); 2258 cs = tsk->cpuset; 2259 tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */ 2260 task_unlock(current); 2261 2262 if (notify_on_release(cs)) { 2263 char *pathbuf = NULL; 2264 2265 mutex_lock(&manage_mutex); 2266 if (atomic_dec_and_test(&cs->count)) 2267 check_for_release(cs, &pathbuf); 2268 mutex_unlock(&manage_mutex); 2269 cpuset_release_agent(pathbuf); 2270 } else { 2271 atomic_dec(&cs->count); 2272 } 2273} 2274 2275/** 2276 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 2277 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 2278 * 2279 * Description: Returns the cpumask_t cpus_allowed of the cpuset 2280 * attached to the specified @tsk. Guaranteed to return some non-empty 2281 * subset of cpu_online_map, even if this means going outside the 2282 * tasks cpuset. 2283 **/ 2284 2285cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) 2286{ 2287 cpumask_t mask; 2288 2289 mutex_lock(&callback_mutex); 2290 task_lock(tsk); 2291 guarantee_online_cpus(tsk->cpuset, &mask); 2292 task_unlock(tsk); 2293 mutex_unlock(&callback_mutex); 2294 2295 return mask; 2296} 2297 2298void cpuset_init_current_mems_allowed(void) 2299{ 2300 current->mems_allowed = NODE_MASK_ALL; 2301} 2302 2303/** 2304 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 2305 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 2306 * 2307 * Description: Returns the nodemask_t mems_allowed of the cpuset 2308 * attached to the specified @tsk. Guaranteed to return some non-empty 2309 * subset of node_online_map, even if this means going outside the 2310 * tasks cpuset. 2311 **/ 2312 2313nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 2314{ 2315 nodemask_t mask; 2316 2317 mutex_lock(&callback_mutex); 2318 task_lock(tsk); 2319 guarantee_online_mems(tsk->cpuset, &mask); 2320 task_unlock(tsk); 2321 mutex_unlock(&callback_mutex); 2322 2323 return mask; 2324} 2325 2326/** 2327 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed 2328 * @zl: the zonelist to be checked 2329 * 2330 * Are any of the nodes on zonelist zl allowed in current->mems_allowed? 2331 */ 2332int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) 2333{ 2334 int i; 2335 2336 for (i = 0; zl->zones[i]; i++) { 2337 int nid = zone_to_nid(zl->zones[i]); 2338 2339 if (node_isset(nid, current->mems_allowed)) 2340 return 1; 2341 } 2342 return 0; 2343} 2344 2345/* 2346 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive 2347 * ancestor to the specified cpuset. Call holding callback_mutex. 2348 * If no ancestor is mem_exclusive (an unusual configuration), then 2349 * returns the root cpuset. 2350 */ 2351static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) 2352{ 2353 while (!is_mem_exclusive(cs) && cs->parent) 2354 cs = cs->parent; 2355 return cs; 2356} 2357 2358/** 2359 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? 2360 * @z: is this zone on an allowed node? 2361 * @gfp_mask: memory allocation flags 2362 * 2363 * If we're in interrupt, yes, we can always allocate. If 2364 * __GFP_THISNODE is set, yes, we can always allocate. If zone 2365 * z's node is in our tasks mems_allowed, yes. If it's not a 2366 * __GFP_HARDWALL request and this zone's nodes is in the nearest 2367 * mem_exclusive cpuset ancestor to this tasks cpuset, yes. 2368 * If the task has been OOM killed and has access to memory reserves 2369 * as specified by the TIF_MEMDIE flag, yes. 2370 * Otherwise, no. 2371 * 2372 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() 2373 * reduces to cpuset_zone_allowed_hardwall(). Otherwise, 2374 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone 2375 * from an enclosing cpuset. 2376 * 2377 * cpuset_zone_allowed_hardwall() only handles the simpler case of 2378 * hardwall cpusets, and never sleeps. 2379 * 2380 * The __GFP_THISNODE placement logic is really handled elsewhere, 2381 * by forcibly using a zonelist starting at a specified node, and by 2382 * (in get_page_from_freelist()) refusing to consider the zones for 2383 * any node on the zonelist except the first. By the time any such 2384 * calls get to this routine, we should just shut up and say 'yes'. 2385 * 2386 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 2387 * and do not allow allocations outside the current tasks cpuset 2388 * unless the task has been OOM killed as is marked TIF_MEMDIE. 2389 * GFP_KERNEL allocations are not so marked, so can escape to the 2390 * nearest enclosing mem_exclusive ancestor cpuset. 2391 * 2392 * Scanning up parent cpusets requires callback_mutex. The 2393 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 2394 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 2395 * current tasks mems_allowed came up empty on the first pass over 2396 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 2397 * cpuset are short of memory, might require taking the callback_mutex 2398 * mutex. 2399 * 2400 * The first call here from mm/page_alloc:get_page_from_freelist() 2401 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 2402 * so no allocation on a node outside the cpuset is allowed (unless 2403 * in interrupt, of course). 2404 * 2405 * The second pass through get_page_from_freelist() doesn't even call 2406 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 2407 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 2408 * in alloc_flags. That logic and the checks below have the combined 2409 * affect that: 2410 * in_interrupt - any node ok (current task context irrelevant) 2411 * GFP_ATOMIC - any node ok 2412 * TIF_MEMDIE - any node ok 2413 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok 2414 * GFP_USER - only nodes in current tasks mems allowed ok. 2415 * 2416 * Rule: 2417 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you 2418 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables 2419 * the code that might scan up ancestor cpusets and sleep. 2420 */ 2421 2422int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 2423{ 2424 int node; /* node that zone z is on */ 2425 const struct cpuset *cs; /* current cpuset ancestors */ 2426 int allowed; /* is allocation in zone z allowed? */ 2427 2428 if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) 2429 return 1; 2430 node = zone_to_nid(z); 2431 might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); 2432 if (node_isset(node, current->mems_allowed)) 2433 return 1; 2434 /* 2435 * Allow tasks that have access to memory reserves because they have 2436 * been OOM killed to get memory anywhere. 2437 */ 2438 if (unlikely(test_thread_flag(TIF_MEMDIE))) 2439 return 1; 2440 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 2441 return 0; 2442 2443 if (current->flags & PF_EXITING) /* Let dying task have memory */ 2444 return 1; 2445 2446 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 2447 mutex_lock(&callback_mutex); 2448 2449 task_lock(current); 2450 cs = nearest_exclusive_ancestor(current->cpuset); 2451 task_unlock(current); 2452 2453 allowed = node_isset(node, cs->mems_allowed); 2454 mutex_unlock(&callback_mutex); 2455 return allowed; 2456} 2457 2458/* 2459 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? 2460 * @z: is this zone on an allowed node? 2461 * @gfp_mask: memory allocation flags 2462 * 2463 * If we're in interrupt, yes, we can always allocate. 2464 * If __GFP_THISNODE is set, yes, we can always allocate. If zone 2465 * z's node is in our tasks mems_allowed, yes. If the task has been 2466 * OOM killed and has access to memory reserves as specified by the 2467 * TIF_MEMDIE flag, yes. Otherwise, no. 2468 * 2469 * The __GFP_THISNODE placement logic is really handled elsewhere, 2470 * by forcibly using a zonelist starting at a specified node, and by 2471 * (in get_page_from_freelist()) refusing to consider the zones for 2472 * any node on the zonelist except the first. By the time any such 2473 * calls get to this routine, we should just shut up and say 'yes'. 2474 * 2475 * Unlike the cpuset_zone_allowed_softwall() variant, above, 2476 * this variant requires that the zone be in the current tasks 2477 * mems_allowed or that we're in interrupt. It does not scan up the 2478 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. 2479 * It never sleeps. 2480 */ 2481 2482int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 2483{ 2484 int node; /* node that zone z is on */ 2485 2486 if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) 2487 return 1; 2488 node = zone_to_nid(z); 2489 if (node_isset(node, current->mems_allowed)) 2490 return 1; 2491 /* 2492 * Allow tasks that have access to memory reserves because they have 2493 * been OOM killed to get memory anywhere. 2494 */ 2495 if (unlikely(test_thread_flag(TIF_MEMDIE))) 2496 return 1; 2497 return 0; 2498} 2499 2500/** 2501 * cpuset_lock - lock out any changes to cpuset structures 2502 * 2503 * The out of memory (oom) code needs to mutex_lock cpusets 2504 * from being changed while it scans the tasklist looking for a 2505 * task in an overlapping cpuset. Expose callback_mutex via this 2506 * cpuset_lock() routine, so the oom code can lock it, before 2507 * locking the task list. The tasklist_lock is a spinlock, so 2508 * must be taken inside callback_mutex. 2509 */ 2510 2511void cpuset_lock(void) 2512{ 2513 mutex_lock(&callback_mutex); 2514} 2515 2516/** 2517 * cpuset_unlock - release lock on cpuset changes 2518 * 2519 * Undo the lock taken in a previous cpuset_lock() call. 2520 */ 2521 2522void cpuset_unlock(void) 2523{ 2524 mutex_unlock(&callback_mutex); 2525} 2526 2527/** 2528 * cpuset_mem_spread_node() - On which node to begin search for a page 2529 * 2530 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 2531 * tasks in a cpuset with is_spread_page or is_spread_slab set), 2532 * and if the memory allocation used cpuset_mem_spread_node() 2533 * to determine on which node to start looking, as it will for 2534 * certain page cache or slab cache pages such as used for file 2535 * system buffers and inode caches, then instead of starting on the 2536 * local node to look for a free page, rather spread the starting 2537 * node around the tasks mems_allowed nodes. 2538 * 2539 * We don't have to worry about the returned node being offline 2540 * because "it can't happen", and even if it did, it would be ok. 2541 * 2542 * The routines calling guarantee_online_mems() are careful to 2543 * only set nodes in task->mems_allowed that are online. So it 2544 * should not be possible for the following code to return an 2545 * offline node. But if it did, that would be ok, as this routine 2546 * is not returning the node where the allocation must be, only 2547 * the node where the search should start. The zonelist passed to 2548 * __alloc_pages() will include all nodes. If the slab allocator 2549 * is passed an offline node, it will fall back to the local node. 2550 * See kmem_cache_alloc_node(). 2551 */ 2552 2553int cpuset_mem_spread_node(void) 2554{ 2555 int node; 2556 2557 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); 2558 if (node == MAX_NUMNODES) 2559 node = first_node(current->mems_allowed); 2560 current->cpuset_mem_spread_rotor = node; 2561 return node; 2562} 2563EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 2564 2565/** 2566 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? 2567 * @p: pointer to task_struct of some other task. 2568 * 2569 * Description: Return true if the nearest mem_exclusive ancestor 2570 * cpusets of tasks @p and current overlap. Used by oom killer to 2571 * determine if task @p's memory usage might impact the memory 2572 * available to the current task. 2573 * 2574 * Call while holding callback_mutex. 2575 **/ 2576 2577int cpuset_excl_nodes_overlap(const struct task_struct *p) 2578{ 2579 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ 2580 int overlap = 1; /* do cpusets overlap? */ 2581 2582 task_lock(current); 2583 if (current->flags & PF_EXITING) { 2584 task_unlock(current); 2585 goto done; 2586 } 2587 cs1 = nearest_exclusive_ancestor(current->cpuset); 2588 task_unlock(current); 2589 2590 task_lock((struct task_struct *)p); 2591 if (p->flags & PF_EXITING) { 2592 task_unlock((struct task_struct *)p); 2593 goto done; 2594 } 2595 cs2 = nearest_exclusive_ancestor(p->cpuset); 2596 task_unlock((struct task_struct *)p); 2597 2598 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); 2599done: 2600 return overlap; 2601} 2602 2603/* 2604 * Collection of memory_pressure is suppressed unless 2605 * this flag is enabled by writing "1" to the special 2606 * cpuset file 'memory_pressure_enabled' in the root cpuset. 2607 */ 2608 2609int cpuset_memory_pressure_enabled __read_mostly; 2610 2611/** 2612 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 2613 * 2614 * Keep a running average of the rate of synchronous (direct) 2615 * page reclaim efforts initiated by tasks in each cpuset. 2616 * 2617 * This represents the rate at which some task in the cpuset 2618 * ran low on memory on all nodes it was allowed to use, and 2619 * had to enter the kernels page reclaim code in an effort to 2620 * create more free memory by tossing clean pages or swapping 2621 * or writing dirty pages. 2622 * 2623 * Display to user space in the per-cpuset read-only file 2624 * "memory_pressure". Value displayed is an integer 2625 * representing the recent rate of entry into the synchronous 2626 * (direct) page reclaim by any task attached to the cpuset. 2627 **/ 2628 2629void __cpuset_memory_pressure_bump(void) 2630{ 2631 struct cpuset *cs; 2632 2633 task_lock(current); 2634 cs = current->cpuset; 2635 fmeter_markevent(&cs->fmeter); 2636 task_unlock(current); 2637} 2638 2639/* 2640 * proc_cpuset_show() 2641 * - Print tasks cpuset path into seq_file. 2642 * - Used for /proc/<pid>/cpuset. 2643 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 2644 * doesn't really matter if tsk->cpuset changes after we read it, 2645 * and we take manage_mutex, keeping attach_task() from changing it 2646 * anyway. No need to check that tsk->cpuset != NULL, thanks to 2647 * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks 2648 * cpuset to top_cpuset. 2649 */ 2650static int proc_cpuset_show(struct seq_file *m, void *v) 2651{ 2652 struct pid *pid; 2653 struct task_struct *tsk; 2654 char *buf; 2655 int retval; 2656 2657 retval = -ENOMEM; 2658 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2659 if (!buf) 2660 goto out; 2661 2662 retval = -ESRCH; 2663 pid = m->private; 2664 tsk = get_pid_task(pid, PIDTYPE_PID); 2665 if (!tsk) 2666 goto out_free; 2667 2668 retval = -EINVAL; 2669 mutex_lock(&manage_mutex); 2670 2671 retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE); 2672 if (retval < 0) 2673 goto out_unlock; 2674 seq_puts(m, buf); 2675 seq_putc(m, '\n'); 2676out_unlock: 2677 mutex_unlock(&manage_mutex); 2678 put_task_struct(tsk); 2679out_free: 2680 kfree(buf); 2681out: 2682 return retval; 2683} 2684 2685static int cpuset_open(struct inode *inode, struct file *file) 2686{ 2687 struct pid *pid = PROC_I(inode)->pid; 2688 return single_open(file, proc_cpuset_show, pid); 2689} 2690 2691const struct file_operations proc_cpuset_operations = { 2692 .open = cpuset_open, 2693 .read = seq_read, 2694 .llseek = seq_lseek, 2695 .release = single_release, 2696}; 2697 2698/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ 2699char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) 2700{ 2701 buffer += sprintf(buffer, "Cpus_allowed:\t"); 2702 buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed); 2703 buffer += sprintf(buffer, "\n"); 2704 buffer += sprintf(buffer, "Mems_allowed:\t"); 2705 buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed); 2706 buffer += sprintf(buffer, "\n"); 2707 return buffer; 2708} 2709