• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/core/

Lines Matching refs:pool

56  * its pool's free_list (if the FMR can be mapped again; that is,
57 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
94 void (*flush_function)(struct ib_fmr_pool *pool,
113 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
122 if (!pool->cache_bucket)
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
144 spin_lock_irq(&pool->pool_lock);
146 list_for_each_entry(fmr, &pool->dirty_list, list) {
159 list_splice_init(&pool->dirty_list, &unmap_list);
160 pool->dirty_len = 0;
162 spin_unlock_irq(&pool->pool_lock);
172 spin_lock_irq(&pool->pool_lock);
173 list_splice(&unmap_list, &pool->free_list);
174 spin_unlock_irq(&pool->pool_lock);
179 struct ib_fmr_pool *pool = pool_ptr;
182 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
183 ib_fmr_batch_release(pool);
185 atomic_inc(&pool->flush_ser);
186 wake_up_interruptible(&pool->force_wait);
188 if (pool->flush_function)
189 pool->flush_function(pool, pool->flush_arg);
193 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
203 * ib_create_fmr_pool - Create an FMR pool
205 * @params:FMR pool parameters
207 * Create a pool of FMRs. Return value is pointer to new pool or
214 struct ib_fmr_pool *pool;
251 pool = kmalloc(sizeof *pool, GFP_KERNEL);
252 if (!pool) {
253 printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
257 pool->cache_bucket = NULL;
259 pool->flush_function = params->flush_function;
260 pool->flush_arg = params->flush_arg;
262 INIT_LIST_HEAD(&pool->free_list);
263 INIT_LIST_HEAD(&pool->dirty_list);
266 pool->cache_bucket =
267 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
269 if (!pool->cache_bucket) {
270 printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
276 INIT_HLIST_HEAD(pool->cache_bucket + i);
279 pool->pool_size = 0;
280 pool->max_pages = params->max_pages_per_fmr;
281 pool->max_remaps = max_remaps;
282 pool->dirty_watermark = params->dirty_watermark;
283 pool->dirty_len = 0;
284 spin_lock_init(&pool->pool_lock);
285 atomic_set(&pool->req_ser, 0);
286 atomic_set(&pool->flush_ser, 0);
287 init_waitqueue_head(&pool->force_wait);
289 pool->thread = kthread_run(ib_fmr_cleanup_thread,
290 pool,
293 if (IS_ERR(pool->thread)) {
295 ret = PTR_ERR(pool->thread);
303 .max_maps = pool->max_remaps,
308 if (pool->cache_bucket)
319 fmr->pool = pool;
332 list_add_tail(&fmr->list, &pool->free_list);
333 ++pool->pool_size;
337 return pool;
340 kfree(pool->cache_bucket);
341 kfree(pool);
346 ib_destroy_fmr_pool(pool);
353 * ib_destroy_fmr_pool - Free FMR pool
354 * @pool:FMR pool to free
356 * Destroy an FMR pool and free all associated resources.
358 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
365 kthread_stop(pool->thread);
366 ib_fmr_batch_release(pool);
369 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
381 if (i < pool->pool_size)
382 printk(KERN_WARNING PFX "pool still has %d regions registered\n",
383 pool->pool_size - i);
385 kfree(pool->cache_bucket);
386 kfree(pool);
392 * @pool:FMR pool to flush
396 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
407 spin_lock_irq(&pool->pool_lock);
408 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
410 list_move(&fmr->list, &pool->dirty_list);
412 spin_unlock_irq(&pool->pool_lock);
414 serial = atomic_inc_return(&pool->req_ser);
415 wake_up_process(pool->thread);
417 if (wait_event_interruptible(pool->force_wait,
418 atomic_read(&pool->flush_ser) - serial >= 0))
427 * @pool:FMR pool to allocate FMR from
432 * Map an FMR from an FMR pool.
439 struct ib_fmr_pool *pool = pool_handle;
444 if (list_len < 1 || list_len > pool->max_pages)
447 spin_lock_irqsave(&pool->pool_lock, flags);
448 fmr = ib_fmr_cache_lookup(pool,
459 spin_unlock_irqrestore(&pool->pool_lock, flags);
464 if (list_empty(&pool->free_list)) {
465 spin_unlock_irqrestore(&pool->pool_lock, flags);
469 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
472 spin_unlock_irqrestore(&pool->pool_lock, flags);
478 spin_lock_irqsave(&pool->pool_lock, flags);
479 list_add(&fmr->list, &pool->free_list);
480 spin_unlock_irqrestore(&pool->pool_lock, flags);
490 if (pool->cache_bucket) {
495 spin_lock_irqsave(&pool->pool_lock, flags);
497 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
498 spin_unlock_irqrestore(&pool->pool_lock, flags);
514 struct ib_fmr_pool *pool;
517 pool = fmr->pool;
519 spin_lock_irqsave(&pool->pool_lock, flags);
523 if (fmr->remap_count < pool->max_remaps) {
524 list_add_tail(&fmr->list, &pool->free_list);
526 list_add_tail(&fmr->list, &pool->dirty_list);
527 if (++pool->dirty_len >= pool->dirty_watermark) {
528 atomic_inc(&pool->req_ser);
529 wake_up_process(pool->thread);
540 spin_unlock_irqrestore(&pool->pool_lock, flags);