• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/mm/

Lines Matching defs:l3

315 			struct kmem_list3 *l3, int tofree);
704 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
705 if (!l3 || OFF_SLAB(s->cs_cachep))
707 lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
708 alc = l3->alien;
986 #define reap_alien(cachep, l3) do { } while (0)
1081 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1085 if (l3->alien) {
1086 struct array_cache *ac = l3->alien[node];
1116 struct kmem_list3 *l3;
1129 l3 = cachep->nodelists[node];
1131 if (l3->alien && l3->alien[nodeid]) {
1132 alien = l3->alien[nodeid];
1154 struct kmem_list3 *l3 = NULL;
1178 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1179 if (!l3)
1181 kmem_list3_init(l3);
1182 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1190 cachep->nodelists[node] = l3;
1226 l3 = cachep->nodelists[node];
1227 BUG_ON(!l3);
1229 spin_lock_irq(&l3->list_lock);
1230 if (!l3->shared) {
1235 l3->shared = shared;
1239 if (!l3->alien) {
1240 l3->alien = alien;
1244 spin_unlock_irq(&l3->list_lock);
1294 l3 = cachep->nodelists[node];
1296 if (!l3)
1299 spin_lock_irq(&l3->list_lock);
1302 l3->free_limit -= cachep->batchcount;
1307 spin_unlock_irq(&l3->list_lock);
1311 shared = l3->shared;
1315 l3->shared = NULL;
1318 alien = l3->alien;
1319 l3->alien = NULL;
1321 spin_unlock_irq(&l3->list_lock);
1337 l3 = cachep->nodelists[node];
1338 if (!l3)
1340 drain_freelist(cachep, l3, l3->free_objects);
1945 struct kmem_list3 *l3;
1952 l3 = cachep->nodelists[i];
1953 if (l3) {
1954 kfree(l3->shared);
1955 free_alien_cache(l3->alien);
1956 kfree(l3);
2403 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2423 struct kmem_list3 *l3;
2429 l3 = cachep->nodelists[node];
2430 if (l3 && l3->alien)
2431 drain_alien_cache(cachep, l3->alien);
2435 l3 = cachep->nodelists[node];
2436 if (l3)
2437 drain_array(cachep, l3, l3->shared, 1, node);
2448 struct kmem_list3 *l3, int tofree)
2455 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2457 spin_lock_irq(&l3->list_lock);
2458 p = l3->slabs_free.prev;
2459 if (p == &l3->slabs_free) {
2460 spin_unlock_irq(&l3->list_lock);
2473 l3->free_objects -= cache->num;
2474 spin_unlock_irq(&l3->list_lock);
2486 struct kmem_list3 *l3;
2492 l3 = cachep->nodelists[i];
2493 if (!l3)
2496 drain_freelist(cachep, l3, l3->free_objects);
2498 ret += !list_empty(&l3->slabs_full) ||
2499 !list_empty(&l3->slabs_partial);
2733 struct kmem_list3 *l3;
2742 /* Take the l3 list lock to change the colour_next on this node */
2744 l3 = cachep->nodelists[nodeid];
2745 spin_lock(&l3->list_lock);
2748 offset = l3->colour_next;
2749 l3->colour_next++;
2750 if (l3->colour_next >= cachep->colour)
2751 l3->colour_next = 0;
2752 spin_unlock(&l3->list_lock);
2790 spin_lock(&l3->list_lock);
2793 list_add_tail(&slabp->list, &(l3->slabs_free));
2795 l3->free_objects += cachep->num;
2796 spin_unlock(&l3->list_lock);
2925 struct kmem_list3 *l3;
2943 l3 = cachep->nodelists[node];
2945 BUG_ON(ac->avail > 0 || !l3);
2946 spin_lock(&l3->list_lock);
2949 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2956 entry = l3->slabs_partial.next;
2957 if (entry == &l3->slabs_partial) {
2958 l3->free_touched = 1;
2959 entry = l3->slabs_free.next;
2960 if (entry == &l3->slabs_free)
2988 list_add(&slabp->list, &l3->slabs_full);
2990 list_add(&slabp->list, &l3->slabs_partial);
2994 l3->free_objects -= ac->avail;
2996 spin_unlock(&l3->list_lock);
3280 struct kmem_list3 *l3;
3284 l3 = cachep->nodelists[nodeid];
3285 BUG_ON(!l3);
3289 spin_lock(&l3->list_lock);
3290 entry = l3->slabs_partial.next;
3291 if (entry == &l3->slabs_partial) {
3292 l3->free_touched = 1;
3293 entry = l3->slabs_free.next;
3294 if (entry == &l3->slabs_free)
3310 l3->free_objects--;
3315 list_add(&slabp->list, &l3->slabs_full);
3317 list_add(&slabp->list, &l3->slabs_partial);
3319 spin_unlock(&l3->list_lock);
3323 spin_unlock(&l3->list_lock);
3446 struct kmem_list3 *l3;
3453 l3 = cachep->nodelists[node];
3459 l3->free_objects++;
3464 if (l3->free_objects > l3->free_limit) {
3465 l3->free_objects -= cachep->num;
3474 list_add(&slabp->list, &l3->slabs_free);
3481 list_add_tail(&slabp->list, &l3->slabs_partial);
3489 struct kmem_list3 *l3;
3497 l3 = cachep->nodelists[node];
3498 spin_lock(&l3->list_lock);
3499 if (l3->shared) {
3500 struct array_cache *shared_array = l3->shared;
3519 p = l3->slabs_free.next;
3520 while (p != &(l3->slabs_free)) {
3532 spin_unlock(&l3->list_lock);
3831 struct kmem_list3 *l3;
3854 l3 = cachep->nodelists[node];
3855 if (l3) {
3856 struct array_cache *shared = l3->shared;
3858 spin_lock_irq(&l3->list_lock);
3864 l3->shared = new_shared;
3865 if (!l3->alien) {
3866 l3->alien = new_alien;
3869 l3->free_limit = (1 + nr_cpus_node(node)) *
3871 spin_unlock_irq(&l3->list_lock);
3876 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3877 if (!l3) {
3883 kmem_list3_init(l3);
3884 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3886 l3->shared = new_shared;
3887 l3->alien = new_alien;
3888 l3->free_limit = (1 + nr_cpus_node(node)) *
3890 cachep->nodelists[node] = l3;
3900 l3 = cachep->nodelists[node];
3902 kfree(l3->shared);
3903 free_alien_cache(l3->alien);
3904 kfree(l3);
4028 * Drain an array if it contains any elements taking the l3 lock only if
4029 * necessary. Note that the l3 listlock also protects the array_cache
4032 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4042 spin_lock_irq(&l3->list_lock);
4052 spin_unlock_irq(&l3->list_lock);
4071 struct kmem_list3 *l3;
4084 * We only take the l3 lock if absolutely necessary and we
4088 l3 = searchp->nodelists[node];
4090 reap_alien(searchp, l3);
4092 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4098 if (time_after(l3->next_reap, jiffies))
4101 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4103 drain_array(searchp, l3, l3->shared, 0, node);
4105 if (l3->free_touched)
4106 l3->free_touched = 0;
4110 freed = drain_freelist(searchp, l3, (l3->free_limit +
4191 struct kmem_list3 *l3;
4196 l3 = cachep->nodelists[node];
4197 if (!l3)
4201 spin_lock_irq(&l3->list_lock);
4203 list_for_each_entry(slabp, &l3->slabs_full, list) {
4209 list_for_each_entry(slabp, &l3->slabs_partial, list) {
4217 list_for_each_entry(slabp, &l3->slabs_free, list) {
4222 free_objects += l3->free_objects;
4223 if (l3->shared)
4224 shared_avail += l3->shared->avail;
4226 spin_unlock_irq(&l3->list_lock);
4428 struct kmem_list3 *l3;
4444 l3 = cachep->nodelists[node];
4445 if (!l3)
4449 spin_lock_irq(&l3->list_lock);
4451 list_for_each_entry(slabp, &l3->slabs_full, list)
4453 list_for_each_entry(slabp, &l3->slabs_partial, list)
4455 spin_unlock_irq(&l3->list_lock);