ib_fmr_pool.c revision 331769
1/*
2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/jhash.h>
38#include <linux/kthread.h>
39#include <linux/wait.h>
40
41#include <rdma/ib_fmr_pool.h>
42
43#include "core_priv.h"
44
45#define PFX "fmr_pool: "
46
47enum {
48	IB_FMR_MAX_REMAPS = 32,
49
50	IB_FMR_HASH_BITS  = 8,
51	IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
52	IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
53};
54
55/*
56 * If an FMR is not in use, then the list member will point to either
57 * its pool's free_list (if the FMR can be mapped again; that is,
58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
59 * FMR needs to be unmapped before being remapped).  In either of
60 * these cases it is a bug if the ref_count is not 0.  In other words,
61 * if ref_count is > 0, then the list member must not be linked into
62 * either free_list or dirty_list.
63 *
64 * The cache_node member is used to link the FMR into a cache bucket
65 * (if caching is enabled).  This is independent of the reference
66 * count of the FMR.  When a valid FMR is released, its ref_count is
67 * decremented, and if ref_count reaches 0, the FMR is placed in
68 * either free_list or dirty_list as appropriate.  However, it is not
69 * removed from the cache and may be "revived" if a call to
70 * ib_fmr_register_physical() occurs before the FMR is remapped.  In
71 * this case we just increment the ref_count and remove the FMR from
72 * free_list/dirty_list.
73 *
74 * Before we remap an FMR from free_list, we remove it from the cache
75 * (to prevent another user from obtaining a stale FMR).  When an FMR
76 * is released, we add it to the tail of the free list, so that our
77 * cache eviction policy is "least recently used."
78 *
79 * All manipulation of ref_count, list and cache_node is protected by
80 * pool_lock to maintain consistency.
81 */
82
83struct ib_fmr_pool {
84	spinlock_t                pool_lock;
85
86	int                       pool_size;
87	int                       max_pages;
88	int			  max_remaps;
89	int                       dirty_watermark;
90	int                       dirty_len;
91	struct list_head          free_list;
92	struct list_head          dirty_list;
93	struct hlist_head        *cache_bucket;
94
95	void                     (*flush_function)(struct ib_fmr_pool *pool,
96						   void *              arg);
97	void                     *flush_arg;
98
99	struct task_struct       *thread;
100
101	atomic_t                  req_ser;
102	atomic_t                  flush_ser;
103
104	wait_queue_head_t         force_wait;
105};
106
107static inline u32 ib_fmr_hash(u64 first_page)
108{
109	return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
110		(IB_FMR_HASH_SIZE - 1);
111}
112
113/* Caller must hold pool_lock */
114static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
115						      u64 *page_list,
116						      int  page_list_len,
117						      u64  io_virtual_address)
118{
119	struct hlist_head *bucket;
120	struct ib_pool_fmr *fmr;
121
122	if (!pool->cache_bucket)
123		return NULL;
124
125	bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
126
127	hlist_for_each_entry(fmr, bucket, cache_node)
128		if (io_virtual_address == fmr->io_virtual_address &&
129		    page_list_len      == fmr->page_list_len      &&
130		    !memcmp(page_list, fmr->page_list,
131			    page_list_len * sizeof *page_list))
132			return fmr;
133
134	return NULL;
135}
136
137static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
138{
139	int                 ret;
140	struct ib_pool_fmr *fmr;
141	LIST_HEAD(unmap_list);
142	LIST_HEAD(fmr_list);
143
144	spin_lock_irq(&pool->pool_lock);
145
146	list_for_each_entry(fmr, &pool->dirty_list, list) {
147		hlist_del_init(&fmr->cache_node);
148		fmr->remap_count = 0;
149		list_add_tail(&fmr->fmr->list, &fmr_list);
150
151#ifdef DEBUG
152		if (fmr->ref_count !=0) {
153			pr_warn(PFX "Unmapping FMR %p with ref count %d\n",
154				fmr, (int)fmr->ref_count);
155		}
156#endif
157	}
158
159	list_splice_init(&pool->dirty_list, &unmap_list);
160	pool->dirty_len = 0;
161
162	spin_unlock_irq(&pool->pool_lock);
163
164	if (list_empty(&unmap_list)) {
165		return;
166	}
167
168	ret = ib_unmap_fmr(&fmr_list);
169	if (ret)
170		pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
171
172	spin_lock_irq(&pool->pool_lock);
173	list_splice(&unmap_list, &pool->free_list);
174	spin_unlock_irq(&pool->pool_lock);
175}
176
177static int ib_fmr_cleanup_thread(void *pool_ptr)
178{
179	struct ib_fmr_pool *pool = pool_ptr;
180
181	do {
182		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
183			ib_fmr_batch_release(pool);
184
185			atomic_inc(&pool->flush_ser);
186			wake_up_interruptible(&pool->force_wait);
187
188			if (pool->flush_function)
189				pool->flush_function(pool, pool->flush_arg);
190		}
191
192		set_current_state(TASK_INTERRUPTIBLE);
193		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
194		    !kthread_should_stop())
195			schedule();
196		__set_current_state(TASK_RUNNING);
197	} while (!kthread_should_stop());
198
199	return 0;
200}
201
202/**
203 * ib_create_fmr_pool - Create an FMR pool
204 * @pd:Protection domain for FMRs
205 * @params:FMR pool parameters
206 *
207 * Create a pool of FMRs.  Return value is pointer to new pool or
208 * error code if creation failed.
209 */
210struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
211				       struct ib_fmr_pool_param *params)
212{
213	struct ib_device   *device;
214	struct ib_fmr_pool *pool;
215	int i;
216	int ret;
217	int max_remaps;
218
219	if (!params)
220		return ERR_PTR(-EINVAL);
221
222	device = pd->device;
223	if (!device->alloc_fmr    || !device->dealloc_fmr  ||
224	    !device->map_phys_fmr || !device->unmap_fmr) {
225		pr_info(PFX "Device %s does not support FMRs\n", device->name);
226		return ERR_PTR(-ENOSYS);
227	}
228
229	if (!device->attrs.max_map_per_fmr)
230		max_remaps = IB_FMR_MAX_REMAPS;
231	else
232		max_remaps = device->attrs.max_map_per_fmr;
233
234	pool = kmalloc(sizeof *pool, GFP_KERNEL);
235	if (!pool)
236		return ERR_PTR(-ENOMEM);
237
238	pool->cache_bucket   = NULL;
239	pool->flush_function = params->flush_function;
240	pool->flush_arg      = params->flush_arg;
241
242	INIT_LIST_HEAD(&pool->free_list);
243	INIT_LIST_HEAD(&pool->dirty_list);
244
245	if (params->cache) {
246		pool->cache_bucket =
247			kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
248				GFP_KERNEL);
249		if (!pool->cache_bucket) {
250			pr_warn(PFX "Failed to allocate cache in pool\n");
251			ret = -ENOMEM;
252			goto out_free_pool;
253		}
254
255		for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
256			INIT_HLIST_HEAD(pool->cache_bucket + i);
257	}
258
259	pool->pool_size       = 0;
260	pool->max_pages       = params->max_pages_per_fmr;
261	pool->max_remaps      = max_remaps;
262	pool->dirty_watermark = params->dirty_watermark;
263	pool->dirty_len       = 0;
264	spin_lock_init(&pool->pool_lock);
265	atomic_set(&pool->req_ser,   0);
266	atomic_set(&pool->flush_ser, 0);
267	init_waitqueue_head(&pool->force_wait);
268
269	pool->thread = kthread_run(ib_fmr_cleanup_thread,
270				   pool,
271				   "ib_fmr(%s)",
272				   device->name);
273	if (IS_ERR(pool->thread)) {
274		pr_warn(PFX "couldn't start cleanup thread\n");
275		ret = PTR_ERR(pool->thread);
276		goto out_free_pool;
277	}
278
279	{
280		struct ib_pool_fmr *fmr;
281		struct ib_fmr_attr fmr_attr = {
282			.max_pages  = params->max_pages_per_fmr,
283			.max_maps   = pool->max_remaps,
284			.page_shift = params->page_shift
285		};
286		int bytes_per_fmr = sizeof *fmr;
287
288		if (pool->cache_bucket)
289			bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
290
291		for (i = 0; i < params->pool_size; ++i) {
292			fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
293			if (!fmr)
294				goto out_fail;
295
296			fmr->pool             = pool;
297			fmr->remap_count      = 0;
298			fmr->ref_count        = 0;
299			INIT_HLIST_NODE(&fmr->cache_node);
300
301			fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
302			if (IS_ERR(fmr->fmr)) {
303				pr_warn(PFX "fmr_create failed for FMR %d\n",
304					i);
305				kfree(fmr);
306				goto out_fail;
307			}
308
309			list_add_tail(&fmr->list, &pool->free_list);
310			++pool->pool_size;
311		}
312	}
313
314	return pool;
315
316 out_free_pool:
317	kfree(pool->cache_bucket);
318	kfree(pool);
319
320	return ERR_PTR(ret);
321
322 out_fail:
323	ib_destroy_fmr_pool(pool);
324
325	return ERR_PTR(-ENOMEM);
326}
327EXPORT_SYMBOL(ib_create_fmr_pool);
328
329/**
330 * ib_destroy_fmr_pool - Free FMR pool
331 * @pool:FMR pool to free
332 *
333 * Destroy an FMR pool and free all associated resources.
334 */
335void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
336{
337	struct ib_pool_fmr *fmr;
338	struct ib_pool_fmr *tmp;
339	LIST_HEAD(fmr_list);
340	int                 i;
341
342	kthread_stop(pool->thread);
343	ib_fmr_batch_release(pool);
344
345	i = 0;
346	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
347		if (fmr->remap_count) {
348			INIT_LIST_HEAD(&fmr_list);
349			list_add_tail(&fmr->fmr->list, &fmr_list);
350			ib_unmap_fmr(&fmr_list);
351		}
352		ib_dealloc_fmr(fmr->fmr);
353		list_del(&fmr->list);
354		kfree(fmr);
355		++i;
356	}
357
358	if (i < pool->pool_size)
359		pr_warn(PFX "pool still has %d regions registered\n",
360			pool->pool_size - i);
361
362	kfree(pool->cache_bucket);
363	kfree(pool);
364}
365EXPORT_SYMBOL(ib_destroy_fmr_pool);
366
367/**
368 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
369 * @pool:FMR pool to flush
370 *
371 * Ensure that all unmapped FMRs are fully invalidated.
372 */
373int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
374{
375	int serial;
376	struct ib_pool_fmr *fmr, *next;
377
378	/*
379	 * The free_list holds FMRs that may have been used
380	 * but have not been remapped enough times to be dirty.
381	 * Put them on the dirty list now so that the cleanup
382	 * thread will reap them too.
383	 */
384	spin_lock_irq(&pool->pool_lock);
385	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
386		if (fmr->remap_count > 0)
387			list_move(&fmr->list, &pool->dirty_list);
388	}
389	spin_unlock_irq(&pool->pool_lock);
390
391	serial = atomic_inc_return(&pool->req_ser);
392	wake_up_process(pool->thread);
393
394	if (wait_event_interruptible(pool->force_wait,
395				     atomic_read(&pool->flush_ser) - serial >= 0))
396		return -EINTR;
397
398	return 0;
399}
400EXPORT_SYMBOL(ib_flush_fmr_pool);
401
402/**
403 * ib_fmr_pool_map_phys -
404 * @pool:FMR pool to allocate FMR from
405 * @page_list:List of pages to map
406 * @list_len:Number of pages in @page_list
407 * @io_virtual_address:I/O virtual address for new FMR
408 *
409 * Map an FMR from an FMR pool.
410 */
411struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
412					 u64                *page_list,
413					 int                 list_len,
414					 u64                 io_virtual_address)
415{
416	struct ib_fmr_pool *pool = pool_handle;
417	struct ib_pool_fmr *fmr;
418	unsigned long       flags;
419	int                 result;
420
421	if (list_len < 1 || list_len > pool->max_pages)
422		return ERR_PTR(-EINVAL);
423
424	spin_lock_irqsave(&pool->pool_lock, flags);
425	fmr = ib_fmr_cache_lookup(pool,
426				  page_list,
427				  list_len,
428				  io_virtual_address);
429	if (fmr) {
430		/* found in cache */
431		++fmr->ref_count;
432		if (fmr->ref_count == 1) {
433			list_del(&fmr->list);
434		}
435
436		spin_unlock_irqrestore(&pool->pool_lock, flags);
437
438		return fmr;
439	}
440
441	if (list_empty(&pool->free_list)) {
442		spin_unlock_irqrestore(&pool->pool_lock, flags);
443		return ERR_PTR(-EAGAIN);
444	}
445
446	fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
447	list_del(&fmr->list);
448	hlist_del_init(&fmr->cache_node);
449	spin_unlock_irqrestore(&pool->pool_lock, flags);
450
451	result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
452				 io_virtual_address);
453
454	if (result) {
455		spin_lock_irqsave(&pool->pool_lock, flags);
456		list_add(&fmr->list, &pool->free_list);
457		spin_unlock_irqrestore(&pool->pool_lock, flags);
458
459		pr_warn(PFX "fmr_map returns %d\n", result);
460
461		return ERR_PTR(result);
462	}
463
464	++fmr->remap_count;
465	fmr->ref_count = 1;
466
467	if (pool->cache_bucket) {
468		fmr->io_virtual_address = io_virtual_address;
469		fmr->page_list_len      = list_len;
470		memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
471
472		spin_lock_irqsave(&pool->pool_lock, flags);
473		hlist_add_head(&fmr->cache_node,
474			       pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
475		spin_unlock_irqrestore(&pool->pool_lock, flags);
476	}
477
478	return fmr;
479}
480EXPORT_SYMBOL(ib_fmr_pool_map_phys);
481
482/**
483 * ib_fmr_pool_unmap - Unmap FMR
484 * @fmr:FMR to unmap
485 *
486 * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
487 * reused (or until ib_flush_fmr_pool() is called).
488 */
489int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
490{
491	struct ib_fmr_pool *pool;
492	unsigned long flags;
493
494	pool = fmr->pool;
495
496	spin_lock_irqsave(&pool->pool_lock, flags);
497
498	--fmr->ref_count;
499	if (!fmr->ref_count) {
500		if (fmr->remap_count < pool->max_remaps) {
501			list_add_tail(&fmr->list, &pool->free_list);
502		} else {
503			list_add_tail(&fmr->list, &pool->dirty_list);
504			if (++pool->dirty_len >= pool->dirty_watermark) {
505				atomic_inc(&pool->req_ser);
506				wake_up_process(pool->thread);
507			}
508		}
509	}
510
511#ifdef DEBUG
512	if (fmr->ref_count < 0)
513		pr_warn(PFX "FMR %p has ref count %d < 0\n",
514			fmr, fmr->ref_count);
515#endif
516
517	spin_unlock_irqrestore(&pool->pool_lock, flags);
518
519	return 0;
520}
521EXPORT_SYMBOL(ib_fmr_pool_unmap);
522