Lines Matching defs:node

47 #include <linux/node.h>
1356 int node = NUMA_NO_NODE;
1368 * no need to ask again on the same node. Pool is node rather than
1371 if (zone_to_nid(zone) == node)
1373 node = zone_to_nid(zone);
1375 folio = dequeue_hugetlb_folio_node_exact(h, node);
1445 * node for alloc or free.
1463 * returns the previously saved node ["this node"] from which to
1465 * next node from which to allocate, handling wrap at end of node
1483 * node ["this node"] from which to free a huge page. Advance the
1484 * next node id whether or not we find a free huge page to free so
1485 * that the next attempt to free addresses the next node.
1499 #define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \
1502 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \
1505 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1508 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1575 int node;
1585 for_each_node_mask(node, *nodemask) {
1586 if (node == nid || !hugetlb_cma[node])
1589 page = cma_alloc(hugetlb_cma[node], nr_pages,
1818 struct llist_node *node;
1820 node = llist_del_all(&hpage_freelist);
1822 while (node) {
1826 folio = container_of((struct address_space **)node,
1828 node = node->next;
2195 * failed, do not continue to try hard on the same node. Use the
2333 * Allocates a fresh hugetlb page in a node interleaved manner. The page
2342 int nr_nodes, node;
2344 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
2347 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2357 * Remove huge page from pool from next node to free. Attempt to keep
2366 int nr_nodes, node;
2370 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2375 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2376 !list_empty(&h->hugepage_freelists[node])) {
2377 folio = list_entry(h->hugepage_freelists[node].next,
3291 int nr_nodes, node = nid;
3293 /* do node specific alloc */
3301 /* allocate from next node when distributing huge pages */
3302 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) {
3305 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3323 * subsystem like zone id and node id.
3329 list_add(&m->list, &huge_boot_pages[node]);
3493 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3534 /* Bit mask controlling how hard we retry per-node allocations.*/
3589 * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms |
3590 * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms |
3591 * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms |
3632 /* do node specific alloc */
3636 /* below will do all node balanced alloc */
3731 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3738 int nr_nodes, node;
3744 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3745 if (h->surplus_huge_pages_node[node])
3749 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3750 if (h->surplus_huge_pages_node[node] <
3751 h->nr_huge_pages_node[node])
3759 h->surplus_huge_pages_node[node] += delta;
3774 * Bit mask controlling how hard we retry per-node allocations.
3792 * Check for a node specific request.
3793 * Changing node specific huge page count may require a corresponding
3794 * change to the global count. In any case, the passed node mask
3795 * (nodes_allowed) will restrict alloc/free to the specified node.
4012 int nr_nodes, node;
4023 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
4024 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
4396 * node_hstate/s - associate per node hstate attributes, via their kobjects,
4397 * with node devices in node_devices[] using a parallel array. The array
4398 * index of a node device or _hstate == node id.
4399 * This is here to avoid any static dependency of the node device driver, in
4409 * A subset of global hstate attributes for node devices
4423 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4424 * Returns node id via non-NULL nidp.
4446 * Unregister hstate attributes from a single node device.
4449 void hugetlb_unregister_node(struct node *node)
4452 struct node_hstate *nhs = &node_hstates[node->dev.id];
4476 * Register hstate attributes for a single node device.
4479 void hugetlb_register_node(struct node *node)
4482 struct node_hstate *nhs = &node_hstates[node->dev.id];
4492 &node->dev.kobj);
4501 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4502 h->name, node->dev.id);
4503 hugetlb_unregister_node(node);
4510 * hugetlb init time: register hstate attributes for all registered node
4704 int node = NUMA_NO_NODE;
4735 /* Parameter is node format */
4738 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4743 node = array_index_nospec(tmp, MAX_NUMNODES);
4749 default_hugepages_in_node[node] = tmp;
4751 parsed_hstate->max_huge_pages_node[node] = tmp;
4753 /* Go to parse next node*/
4903 int node;
4910 for_each_node_mask(node, cpuset_current_mems_allowed) {
4911 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4912 nr += array[node];
5140 * task or memory node can be dynamically moved between cpusets.
5149 * also determines from which node the kernel will allocate memory
6642 int node;
6645 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6646 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
7662 * Also note that we have to transfer the per-node surplus state
7664 * the per-node's.
7675 * There is no need to transfer the per-node surplus state
7676 * when we do not cross the node.
7810 pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7817 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7843 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7874 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7880 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",