1/* 2 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 3 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 35 unchanged lines hidden (view full) --- 44#include <sys/mutex.h> 45#include <sys/ktr.h> 46#include <vm/uma.h> 47#include <vm/vm.h> 48#include <vm/vm_param.h> 49#include <vm/vm_extern.h> 50#include <vm/vm_kern.h> 51#include <vm/vm_page.h> |
52#ifndef UMA_MD_SMALL_ALLOC 53#include <vm/vm_map.h> 54#endif |
55#include <vm/vm_radix.h> 56#include <vm/vm_object.h> 57 58#include <sys/kdb.h> 59 |
60#ifndef UMA_MD_SMALL_ALLOC 61#define VM_RADIX_RNODE_MAP_SCALE (1024 * 1024 / 2) 62#define VM_RADIX_WIDTH 4 63 64/* 65 * Bits of height in root. 66 * The mask of smaller power of 2 containing VM_RADIX_LIMIT. 67 */ 68#define VM_RADIX_HEIGHT 0x1f 69#else |
70#define VM_RADIX_WIDTH 5 |
71 72/* See the comment above. */ 73#define VM_RADIX_HEIGHT 0xf 74#endif 75 |
76#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 77#define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 78#define VM_RADIX_MAXVAL ((vm_pindex_t)-1) 79#define VM_RADIX_LIMIT howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) 80 81/* Flag bits stored in node pointers. */ 82#define VM_RADIX_FLAGS 0x3 83 |
84/* Calculates maximum value for a tree of height h. */ 85#define VM_RADIX_MAX(h) \ 86 ((h) == VM_RADIX_LIMIT ? VM_RADIX_MAXVAL : \ 87 (((vm_pindex_t)1 << ((h) * VM_RADIX_WIDTH)) - 1)) 88 89CTASSERT(VM_RADIX_HEIGHT >= VM_RADIX_LIMIT); 90CTASSERT((sizeof(u_int) * NBBY) >= VM_RADIX_LIMIT); 91 92struct vm_radix_node { 93 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 94 volatile uint32_t rn_count; /* Valid children. */ 95}; 96 97CTASSERT(sizeof(struct vm_radix_node) < PAGE_SIZE); 98 99static uma_zone_t vm_radix_node_zone; 100 101#ifndef UMA_MD_SMALL_ALLOC |
102static vm_map_t rnode_map; 103static u_long rnode_map_scale; 104 |
105static void * 106vm_radix_node_zone_allocf(uma_zone_t zone, int size, uint8_t *flags, int wait) 107{ 108 vm_offset_t addr; 109 vm_page_t m; 110 int pflags; 111 |
112 /* Inform UMA that this allocator uses rnode_map. */ |
113 *flags = UMA_SLAB_KERNEL; 114 115 pflags = VM_ALLOC_WIRED | VM_ALLOC_NOOBJ; 116 117 /* 118 * As kmem_alloc_nofault() can however fail, let just assume that 119 * M_NOWAIT is on and act accordingly. 120 */ 121 pflags |= ((wait & M_USE_RESERVE) != 0) ? VM_ALLOC_INTERRUPT : 122 VM_ALLOC_SYSTEM; 123 if ((wait & M_ZERO) != 0) 124 pflags |= VM_ALLOC_ZERO; |
125 addr = kmem_alloc_nofault(rnode_map, size); |
126 if (addr == 0) 127 return (NULL); 128 129 /* Just one page allocation is assumed here. */ 130 m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS), 131 pflags); 132 if (m == NULL) { |
133 kmem_free(rnode_map, addr, size); |
134 return (NULL); 135 } 136 if ((wait & M_ZERO) != 0 && (m->flags & PG_ZERO) == 0) 137 pmap_zero_page(m); 138 pmap_qenter(addr, &m, 1); 139 return ((void *)addr); 140} 141 --- 4 unchanged lines hidden (view full) --- 146 vm_offset_t voitem; 147 148 MPASS((flags & UMA_SLAB_KERNEL) != 0); 149 150 /* Just one page allocation is assumed here. */ 151 voitem = (vm_offset_t)item; 152 m = PHYS_TO_VM_PAGE(pmap_kextract(voitem)); 153 pmap_qremove(voitem, 1); |
154 vm_page_lock(m); 155 vm_page_unwire(m, 0); |
156 vm_page_free(m); |
157 vm_page_unlock(m); 158 kmem_free(rnode_map, voitem, size); |
159} 160 161static void 162init_vm_radix_alloc(void *dummy __unused) 163{ 164 |
165 uma_zone_set_max(vm_radix_node_zone, rnode_map_scale); |
166 uma_zone_set_allocf(vm_radix_node_zone, vm_radix_node_zone_allocf); 167 uma_zone_set_freef(vm_radix_node_zone, vm_radix_node_zone_freef); 168} 169SYSINIT(vm_radix, SI_SUB_KMEM, SI_ORDER_SECOND, init_vm_radix_alloc, NULL); 170#endif 171 172/* 173 * Radix node zone destructor. --- 36 unchanged lines hidden (view full) --- 210 */ 211static __inline int 212vm_radix_slot(vm_pindex_t index, int level) 213{ 214 215 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK); 216} 217 |
218/* 219 * Initialize the radix node submap (for architectures not supporting 220 * direct-mapping) and the radix node zone. 221 * 222 * WITNESS reports a lock order reversal, for architectures not 223 * supporting direct-mapping, between the "system map" lock 224 * and the "vm object" lock. This is because the well established ordering 225 * "system map" -> "vm object" is not honoured in this case as allocating 226 * from the radix node submap ends up adding a mapping entry to it, meaning 227 * it is necessary to lock the submap. However, the radix node submap is 228 * a leaf and self-contained, thus a deadlock cannot happen here and 229 * adding MTX_NOWITNESS to all map locks would be largerly sub-optimal. 230 */ |
231void 232vm_radix_init(void) 233{ |
234#ifndef UMA_MD_SMALL_ALLOC 235 vm_offset_t maxaddr, minaddr; |
236 |
237 rnode_map_scale = VM_RADIX_RNODE_MAP_SCALE; 238 TUNABLE_ULONG_FETCH("hw.rnode_map_scale", &rnode_map_scale); 239 rnode_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 240 rnode_map_scale * sizeof(struct vm_radix_node), FALSE); 241 rnode_map->system_map = 1; 242#endif 243 |
244 vm_radix_node_zone = uma_zcreate("RADIX NODE", 245 sizeof(struct vm_radix_node), NULL, 246#ifdef INVARIANTS 247 vm_radix_node_zone_dtor, 248#else 249 NULL, 250#endif 251 NULL, NULL, VM_RADIX_HEIGHT, UMA_ZONE_VM); --- 639 unchanged lines hidden --- |