Deleted Added
full compact
vm_radix.c (247233) vm_radix.c (247742)
1/*
2 * Copyright (c) 2013 EMC Corp.
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 31 unchanged lines hidden (view full) ---

40 * and vice-versa.
41 * - In average there are not many complete levels, than level
42 * compression may just complicate things.
43 */
44
45#include <sys/cdefs.h>
46
47#include "opt_ddb.h"
1/*
2 * Copyright (c) 2013 EMC Corp.
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 31 unchanged lines hidden (view full) ---

40 * and vice-versa.
41 * - In average there are not many complete levels, than level
42 * compression may just complicate things.
43 */
44
45#include <sys/cdefs.h>
46
47#include "opt_ddb.h"
48#include "opt_vm.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/kernel.h>
53#include <sys/vmmeter.h>
54
55#include <vm/uma.h>
56#include <vm/vm.h>
57#include <vm/vm_param.h>
58#include <vm/vm_page.h>
59#include <vm/vm_radix.h>
60
61#ifdef DDB
62#include <ddb/ddb.h>
63#endif
64
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/vmmeter.h>
53
54#include <vm/uma.h>
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <vm/vm_page.h>
58#include <vm/vm_radix.h>
59
60#ifdef DDB
61#include <ddb/ddb.h>
62#endif
63
65#ifndef VM_RADIX_BOOT_CACHE
66#define VM_RADIX_BOOT_CACHE 150
67#endif
68
69/*
70 * Such sizes should permit to keep node children contained into a single
71 * cache-line, or to at least not span many of those.
72 * In particular, sparse tries should however be compressed properly and
73 * then make some extra-levels not a big deal.
74 */
75#ifdef __LP64__
76#define VM_RADIX_WIDTH 4

--- 20 unchanged lines hidden (view full) ---

97 vm_pindex_t rn_owner; /* Owner of record. */
98 uint16_t rn_count; /* Valid children. */
99 uint16_t rn_clev; /* Current level. */
100};
101
102static uma_zone_t vm_radix_node_zone;
103
104/*
64/*
65 * Such sizes should permit to keep node children contained into a single
66 * cache-line, or to at least not span many of those.
67 * In particular, sparse tries should however be compressed properly and
68 * then make some extra-levels not a big deal.
69 */
70#ifdef __LP64__
71#define VM_RADIX_WIDTH 4

--- 20 unchanged lines hidden (view full) ---

92 vm_pindex_t rn_owner; /* Owner of record. */
93 uint16_t rn_count; /* Valid children. */
94 uint16_t rn_clev; /* Current level. */
95};
96
97static uma_zone_t vm_radix_node_zone;
98
99/*
105 * Boot-time cache of struct vm_radix_node objects.
106 * This cache is used to cater page allocations before the UMA zone is
107 * actually setup and pre-allocated (ie. pmap_init()).
108 */
109static u_int boot_cache_cnt;
110static struct vm_radix_node boot_cache[VM_RADIX_BOOT_CACHE];
111
112static struct vm_radix_node *
113vm_radix_carve_bootcache(void)
114{
115 struct vm_radix_node *rnode;
116
117 if (boot_cache_cnt == VM_RADIX_BOOT_CACHE)
118 panic("%s: Increase VM_RADIX_BOOT_CACHE (%u)", __func__,
119 VM_RADIX_BOOT_CACHE);
120 rnode = &boot_cache[boot_cache_cnt];
121 boot_cache_cnt++;
122 return (rnode);
123}
124
125/*
126 * Allocate a radix node. Pre-allocation ensures that the request will be
127 * always successfully satisfied.
128 */
129static __inline struct vm_radix_node *
130vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
131{
132 struct vm_radix_node *rnode;
133
100 * Allocate a radix node. Pre-allocation ensures that the request will be
101 * always successfully satisfied.
102 */
103static __inline struct vm_radix_node *
104vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
105{
106 struct vm_radix_node *rnode;
107
134 if (__predict_false(boot_cache_cnt <= VM_RADIX_BOOT_CACHE))
135 rnode = vm_radix_carve_bootcache();
136 else {
137 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO);
108 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO);
138
109
139 /*
140 * The required number of nodes might be already correctly
141 * pre-allocated in vm_radix_init(). However, UMA can reserve
142 * few nodes on per-cpu specific buckets, which will not be
143 * accessible from the curcpu. The allocation could then
144 * return NULL when the pre-allocation pool is close to be
145 * exhausted. Anyway, in practice this should never be a
146 * problem because a new node is not always required for
147 * insert, thus the pre-allocation pool should already have
148 * some extra-pages that indirectly deal with this situation.
149 */
150 if (rnode == NULL)
151 panic("%s: uma_zalloc() returned NULL for a new node",
152 __func__);
153 }
110 /*
111 * The required number of nodes might be already correctly
112 * pre-allocated in vm_radix_init(). However, UMA can reserve
113 * few nodes on per-cpu specific buckets, which will not be
114 * accessible from the curcpu. The allocation could then
115 * return NULL when the pre-allocation pool is close to be
116 * exhausted. Anyway, in practice this should never be a
117 * problem because a new node is not always required for
118 * insert, thus the pre-allocation pool should already have
119 * some extra-pages that indirectly deal with this situation.
120 */
121 if (rnode == NULL)
122 panic("%s: uma_zalloc() returned NULL for a new node",
123 __func__);
154 rnode->rn_owner = owner;
155 rnode->rn_count = count;
156 rnode->rn_clev = clevel;
157 return (rnode);
158}
159
160/*
161 * Free radix node.
162 */
163static __inline void
164vm_radix_node_put(struct vm_radix_node *rnode)
165{
166
124 rnode->rn_owner = owner;
125 rnode->rn_count = count;
126 rnode->rn_clev = clevel;
127 return (rnode);
128}
129
130/*
131 * Free radix node.
132 */
133static __inline void
134vm_radix_node_put(struct vm_radix_node *rnode)
135{
136
167 if (__predict_false(rnode > boot_cache &&
168 rnode <= &boot_cache[VM_RADIX_BOOT_CACHE]))
169 return;
170 uma_zfree(vm_radix_node_zone, rnode);
171}
172
173/*
174 * Return the position in the array for a given level.
175 */
176static __inline int
177vm_radix_slot(vm_pindex_t index, uint16_t level)

--- 184 unchanged lines hidden (view full) ---

362 rnode->rn_count));
363}
364#endif
365
366/*
367 * Pre-allocate intermediate nodes from the UMA slab zone.
368 */
369static void
137 uma_zfree(vm_radix_node_zone, rnode);
138}
139
140/*
141 * Return the position in the array for a given level.
142 */
143static __inline int
144vm_radix_slot(vm_pindex_t index, uint16_t level)

--- 184 unchanged lines hidden (view full) ---

329 rnode->rn_count));
330}
331#endif
332
333/*
334 * Pre-allocate intermediate nodes from the UMA slab zone.
335 */
336static void
370vm_radix_init(void *arg __unused)
337vm_radix_prealloc(void *arg __unused)
371{
372
338{
339
340 if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count))
341 panic("%s: unable to create new zone", __func__);
342 uma_prealloc(vm_radix_node_zone, cnt.v_page_count);
343}
344SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc,
345 NULL);
346
347/*
348 * Initialize the UMA slab zone.
349 * Until vm_radix_prealloc() is called, the zone will be served by the
350 * UMA boot-time pre-allocated pool of pages.
351 */
352void
353vm_radix_init(void)
354{
355
373 vm_radix_node_zone = uma_zcreate("RADIX NODE",
374 sizeof(struct vm_radix_node), NULL,
375#ifdef INVARIANTS
376 vm_radix_node_zone_dtor,
377#else
378 NULL,
379#endif
380 NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE);
356 vm_radix_node_zone = uma_zcreate("RADIX NODE",
357 sizeof(struct vm_radix_node), NULL,
358#ifdef INVARIANTS
359 vm_radix_node_zone_dtor,
360#else
361 NULL,
362#endif
363 NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE);
381 if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count))
382 panic("%s: unable to create new zone", __func__);
383 uma_prealloc(vm_radix_node_zone, cnt.v_page_count);
384 boot_cache_cnt = VM_RADIX_BOOT_CACHE + 1;
385}
364}
386SYSINIT(vm_radix_init, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_init, NULL);
387
388/*
389 * Inserts the key-value pair in to the trie.
390 * Panics if the key already exists.
391 */
392void
393vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, vm_page_t page)
394{

--- 378 unchanged lines hidden ---
365
366/*
367 * Inserts the key-value pair in to the trie.
368 * Panics if the key already exists.
369 */
370void
371vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, vm_page_t page)
372{

--- 378 unchanged lines hidden ---