Deleted Added
full compact
vm_radix.c (232631) vm_radix.c (233034)
1/*
2 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
3 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 35 unchanged lines hidden (view full) ---

44#include <sys/mutex.h>
45#include <sys/ktr.h>
46#include <vm/uma.h>
47#include <vm/vm.h>
48#include <vm/vm_param.h>
49#include <vm/vm_extern.h>
50#include <vm/vm_kern.h>
51#include <vm/vm_page.h>
1/*
2 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
3 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 35 unchanged lines hidden (view full) ---

44#include <sys/mutex.h>
45#include <sys/ktr.h>
46#include <vm/uma.h>
47#include <vm/vm.h>
48#include <vm/vm_param.h>
49#include <vm/vm_extern.h>
50#include <vm/vm_kern.h>
51#include <vm/vm_page.h>
52#ifndef UMA_MD_SMALL_ALLOC
53#include <vm/vm_map.h>
54#endif
52#include <vm/vm_radix.h>
53#include <vm/vm_object.h>
54
55#include <sys/kdb.h>
56
55#include <vm/vm_radix.h>
56#include <vm/vm_object.h>
57
58#include <sys/kdb.h>
59
60#ifndef UMA_MD_SMALL_ALLOC
61#define VM_RADIX_RNODE_MAP_SCALE (1024 * 1024 / 2)
62#define VM_RADIX_WIDTH 4
63
64/*
65 * Bits of height in root.
66 * The mask of smaller power of 2 containing VM_RADIX_LIMIT.
67 */
68#define VM_RADIX_HEIGHT 0x1f
69#else
57#define VM_RADIX_WIDTH 5
70#define VM_RADIX_WIDTH 5
71
72/* See the comment above. */
73#define VM_RADIX_HEIGHT 0xf
74#endif
75
58#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
59#define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
60#define VM_RADIX_MAXVAL ((vm_pindex_t)-1)
61#define VM_RADIX_LIMIT howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH)
62
63/* Flag bits stored in node pointers. */
64#define VM_RADIX_FLAGS 0x3
65
76#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
77#define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
78#define VM_RADIX_MAXVAL ((vm_pindex_t)-1)
79#define VM_RADIX_LIMIT howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH)
80
81/* Flag bits stored in node pointers. */
82#define VM_RADIX_FLAGS 0x3
83
66/* Bits of height in root. */
67#define VM_RADIX_HEIGHT 0xf
68
69/* Calculates maximum value for a tree of height h. */
70#define VM_RADIX_MAX(h) \
71 ((h) == VM_RADIX_LIMIT ? VM_RADIX_MAXVAL : \
72 (((vm_pindex_t)1 << ((h) * VM_RADIX_WIDTH)) - 1))
73
74CTASSERT(VM_RADIX_HEIGHT >= VM_RADIX_LIMIT);
75CTASSERT((sizeof(u_int) * NBBY) >= VM_RADIX_LIMIT);
76
77struct vm_radix_node {
78 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */
79 volatile uint32_t rn_count; /* Valid children. */
80};
81
82CTASSERT(sizeof(struct vm_radix_node) < PAGE_SIZE);
83
84static uma_zone_t vm_radix_node_zone;
85
86#ifndef UMA_MD_SMALL_ALLOC
84/* Calculates maximum value for a tree of height h. */
85#define VM_RADIX_MAX(h) \
86 ((h) == VM_RADIX_LIMIT ? VM_RADIX_MAXVAL : \
87 (((vm_pindex_t)1 << ((h) * VM_RADIX_WIDTH)) - 1))
88
89CTASSERT(VM_RADIX_HEIGHT >= VM_RADIX_LIMIT);
90CTASSERT((sizeof(u_int) * NBBY) >= VM_RADIX_LIMIT);
91
92struct vm_radix_node {
93 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */
94 volatile uint32_t rn_count; /* Valid children. */
95};
96
97CTASSERT(sizeof(struct vm_radix_node) < PAGE_SIZE);
98
99static uma_zone_t vm_radix_node_zone;
100
101#ifndef UMA_MD_SMALL_ALLOC
102static vm_map_t rnode_map;
103static u_long rnode_map_scale;
104
87static void *
88vm_radix_node_zone_allocf(uma_zone_t zone, int size, uint8_t *flags, int wait)
89{
90 vm_offset_t addr;
91 vm_page_t m;
92 int pflags;
93
105static void *
106vm_radix_node_zone_allocf(uma_zone_t zone, int size, uint8_t *flags, int wait)
107{
108 vm_offset_t addr;
109 vm_page_t m;
110 int pflags;
111
94 /* Inform UMA that this allocator uses kernel_map. */
112 /* Inform UMA that this allocator uses rnode_map. */
95 *flags = UMA_SLAB_KERNEL;
96
97 pflags = VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
98
99 /*
100 * As kmem_alloc_nofault() can however fail, let just assume that
101 * M_NOWAIT is on and act accordingly.
102 */
103 pflags |= ((wait & M_USE_RESERVE) != 0) ? VM_ALLOC_INTERRUPT :
104 VM_ALLOC_SYSTEM;
105 if ((wait & M_ZERO) != 0)
106 pflags |= VM_ALLOC_ZERO;
113 *flags = UMA_SLAB_KERNEL;
114
115 pflags = VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
116
117 /*
118 * As kmem_alloc_nofault() can however fail, let just assume that
119 * M_NOWAIT is on and act accordingly.
120 */
121 pflags |= ((wait & M_USE_RESERVE) != 0) ? VM_ALLOC_INTERRUPT :
122 VM_ALLOC_SYSTEM;
123 if ((wait & M_ZERO) != 0)
124 pflags |= VM_ALLOC_ZERO;
107 addr = kmem_alloc_nofault(kernel_map, size);
125 addr = kmem_alloc_nofault(rnode_map, size);
108 if (addr == 0)
109 return (NULL);
110
111 /* Just one page allocation is assumed here. */
112 m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
113 pflags);
114 if (m == NULL) {
126 if (addr == 0)
127 return (NULL);
128
129 /* Just one page allocation is assumed here. */
130 m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
131 pflags);
132 if (m == NULL) {
115 kmem_free(kernel_map, addr, size);
133 kmem_free(rnode_map, addr, size);
116 return (NULL);
117 }
118 if ((wait & M_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
119 pmap_zero_page(m);
120 pmap_qenter(addr, &m, 1);
121 return ((void *)addr);
122}
123

--- 4 unchanged lines hidden (view full) ---

128 vm_offset_t voitem;
129
130 MPASS((flags & UMA_SLAB_KERNEL) != 0);
131
132 /* Just one page allocation is assumed here. */
133 voitem = (vm_offset_t)item;
134 m = PHYS_TO_VM_PAGE(pmap_kextract(voitem));
135 pmap_qremove(voitem, 1);
134 return (NULL);
135 }
136 if ((wait & M_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
137 pmap_zero_page(m);
138 pmap_qenter(addr, &m, 1);
139 return ((void *)addr);
140}
141

--- 4 unchanged lines hidden (view full) ---

146 vm_offset_t voitem;
147
148 MPASS((flags & UMA_SLAB_KERNEL) != 0);
149
150 /* Just one page allocation is assumed here. */
151 voitem = (vm_offset_t)item;
152 m = PHYS_TO_VM_PAGE(pmap_kextract(voitem));
153 pmap_qremove(voitem, 1);
154 vm_page_lock(m);
155 vm_page_unwire(m, 0);
136 vm_page_free(m);
156 vm_page_free(m);
137 kmem_free(kernel_map, voitem, size);
157 vm_page_unlock(m);
158 kmem_free(rnode_map, voitem, size);
138}
139
140static void
141init_vm_radix_alloc(void *dummy __unused)
142{
143
159}
160
161static void
162init_vm_radix_alloc(void *dummy __unused)
163{
164
165 uma_zone_set_max(vm_radix_node_zone, rnode_map_scale);
144 uma_zone_set_allocf(vm_radix_node_zone, vm_radix_node_zone_allocf);
145 uma_zone_set_freef(vm_radix_node_zone, vm_radix_node_zone_freef);
146}
147SYSINIT(vm_radix, SI_SUB_KMEM, SI_ORDER_SECOND, init_vm_radix_alloc, NULL);
148#endif
149
150/*
151 * Radix node zone destructor.

--- 36 unchanged lines hidden (view full) ---

188 */
189static __inline int
190vm_radix_slot(vm_pindex_t index, int level)
191{
192
193 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
194}
195
166 uma_zone_set_allocf(vm_radix_node_zone, vm_radix_node_zone_allocf);
167 uma_zone_set_freef(vm_radix_node_zone, vm_radix_node_zone_freef);
168}
169SYSINIT(vm_radix, SI_SUB_KMEM, SI_ORDER_SECOND, init_vm_radix_alloc, NULL);
170#endif
171
172/*
173 * Radix node zone destructor.

--- 36 unchanged lines hidden (view full) ---

210 */
211static __inline int
212vm_radix_slot(vm_pindex_t index, int level)
213{
214
215 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
216}
217
218/*
219 * Initialize the radix node submap (for architectures not supporting
220 * direct-mapping) and the radix node zone.
221 *
222 * WITNESS reports a lock order reversal, for architectures not
223 * supporting direct-mapping, between the "system map" lock
224 * and the "vm object" lock. This is because the well established ordering
225 * "system map" -> "vm object" is not honoured in this case as allocating
226 * from the radix node submap ends up adding a mapping entry to it, meaning
227 * it is necessary to lock the submap. However, the radix node submap is
228 * a leaf and self-contained, thus a deadlock cannot happen here and
229 * adding MTX_NOWITNESS to all map locks would be largerly sub-optimal.
230 */
196void
197vm_radix_init(void)
198{
231void
232vm_radix_init(void)
233{
234#ifndef UMA_MD_SMALL_ALLOC
235 vm_offset_t maxaddr, minaddr;
199
236
237 rnode_map_scale = VM_RADIX_RNODE_MAP_SCALE;
238 TUNABLE_ULONG_FETCH("hw.rnode_map_scale", &rnode_map_scale);
239 rnode_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
240 rnode_map_scale * sizeof(struct vm_radix_node), FALSE);
241 rnode_map->system_map = 1;
242#endif
243
200 vm_radix_node_zone = uma_zcreate("RADIX NODE",
201 sizeof(struct vm_radix_node), NULL,
202#ifdef INVARIANTS
203 vm_radix_node_zone_dtor,
204#else
205 NULL,
206#endif
207 NULL, NULL, VM_RADIX_HEIGHT, UMA_ZONE_VM);

--- 639 unchanged lines hidden ---
244 vm_radix_node_zone = uma_zcreate("RADIX NODE",
245 sizeof(struct vm_radix_node), NULL,
246#ifdef INVARIANTS
247 vm_radix_node_zone_dtor,
248#else
249 NULL,
250#endif
251 NULL, NULL, VM_RADIX_HEIGHT, UMA_ZONE_VM);

--- 639 unchanged lines hidden ---