1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mm.h>
36#include <linux/scatterlist.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39
40#include <asm/page.h>
41
42#include "mthca_memfree.h"
43#include "mthca_dev.h"
44#include "mthca_cmd.h"
45
46/*
47 * We allocate in as big chunks as we can, up to a maximum of 256 KB
48 * per chunk.
49 */
50enum {
51	MTHCA_ICM_ALLOC_SIZE   = 1 << 18,
52	MTHCA_TABLE_CHUNK_SIZE = 1 << 18
53};
54
55struct mthca_user_db_table {
56	struct mutex mutex;
57	struct {
58		u64                uvirt;
59		struct scatterlist mem;
60		int                refcount;
61	} page[];
62};
63
64static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
65{
66	int i;
67
68	if (chunk->nsg > 0)
69		dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
70			     DMA_BIDIRECTIONAL);
71
72	for (i = 0; i < chunk->npages; ++i)
73		__free_pages(sg_page(&chunk->mem[i]),
74			     get_order(chunk->mem[i].length));
75}
76
77static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
78{
79	int i;
80
81	for (i = 0; i < chunk->npages; ++i) {
82		dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
83				  lowmem_page_address(sg_page(&chunk->mem[i])),
84				  sg_dma_address(&chunk->mem[i]));
85	}
86}
87
88void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
89{
90	struct mthca_icm_chunk *chunk, *tmp;
91
92	if (!icm)
93		return;
94
95	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
96		if (coherent)
97			mthca_free_icm_coherent(dev, chunk);
98		else
99			mthca_free_icm_pages(dev, chunk);
100
101		kfree(chunk);
102	}
103
104	kfree(icm);
105}
106
107static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
108{
109	struct page *page;
110
111	/*
112	 * Use __GFP_ZERO because buggy firmware assumes ICM pages are
113	 * cleared, and subtle failures are seen if they aren't.
114	 */
115	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
116	if (!page)
117		return -ENOMEM;
118
119	sg_set_page(mem, page, PAGE_SIZE << order, 0);
120	return 0;
121}
122
123static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
124				    int order, gfp_t gfp_mask)
125{
126	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
127				       gfp_mask);
128	if (!buf)
129		return -ENOMEM;
130
131	sg_set_buf(mem, buf, PAGE_SIZE << order);
132	BUG_ON(mem->offset);
133	sg_dma_len(mem) = PAGE_SIZE << order;
134	return 0;
135}
136
137struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
138				  gfp_t gfp_mask, int coherent)
139{
140	struct mthca_icm *icm;
141	struct mthca_icm_chunk *chunk = NULL;
142	int cur_order;
143	int ret;
144
145	/* We use sg_set_buf for coherent allocs, which assumes low memory */
146	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
147
148	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
149	if (!icm)
150		return icm;
151
152	icm->refcount = 0;
153	INIT_LIST_HEAD(&icm->chunk_list);
154
155	cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
156
157	while (npages > 0) {
158		if (!chunk) {
159			chunk = kmalloc(sizeof *chunk,
160					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
161			if (!chunk)
162				goto fail;
163
164			sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
165			chunk->npages = 0;
166			chunk->nsg    = 0;
167			list_add_tail(&chunk->list, &icm->chunk_list);
168		}
169
170		while (1 << cur_order > npages)
171			--cur_order;
172
173		if (coherent)
174			ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
175						       &chunk->mem[chunk->npages],
176						       cur_order, gfp_mask);
177		else
178			ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
179						    cur_order, gfp_mask);
180
181		if (!ret) {
182			++chunk->npages;
183
184			if (coherent)
185				++chunk->nsg;
186			else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
187				chunk->nsg =
188					dma_map_sg(&dev->pdev->dev, chunk->mem,
189						   chunk->npages,
190						   DMA_BIDIRECTIONAL);
191
192				if (chunk->nsg <= 0)
193					goto fail;
194			}
195
196			if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
197				chunk = NULL;
198
199			npages -= 1 << cur_order;
200		} else {
201			--cur_order;
202			if (cur_order < 0)
203				goto fail;
204		}
205	}
206
207	if (!coherent && chunk) {
208		chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
209					chunk->npages, DMA_BIDIRECTIONAL);
210
211		if (chunk->nsg <= 0)
212			goto fail;
213	}
214
215	return icm;
216
217fail:
218	mthca_free_icm(dev, icm, coherent);
219	return NULL;
220}
221
222int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
223{
224	int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
225	int ret = 0;
226
227	mutex_lock(&table->mutex);
228
229	if (table->icm[i]) {
230		++table->icm[i]->refcount;
231		goto out;
232	}
233
234	table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
235					(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
236					__GFP_NOWARN, table->coherent);
237	if (!table->icm[i]) {
238		ret = -ENOMEM;
239		goto out;
240	}
241
242	if (mthca_MAP_ICM(dev, table->icm[i],
243			  table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
244		mthca_free_icm(dev, table->icm[i], table->coherent);
245		table->icm[i] = NULL;
246		ret = -ENOMEM;
247		goto out;
248	}
249
250	++table->icm[i]->refcount;
251
252out:
253	mutex_unlock(&table->mutex);
254	return ret;
255}
256
257void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
258{
259	int i;
260
261	if (!mthca_is_memfree(dev))
262		return;
263
264	i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
265
266	mutex_lock(&table->mutex);
267
268	if (--table->icm[i]->refcount == 0) {
269		mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
270				MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
271		mthca_free_icm(dev, table->icm[i], table->coherent);
272		table->icm[i] = NULL;
273	}
274
275	mutex_unlock(&table->mutex);
276}
277
278void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
279{
280	int idx, offset, dma_offset, i;
281	struct mthca_icm_chunk *chunk;
282	struct mthca_icm *icm;
283	struct page *page = NULL;
284
285	if (!table->lowmem)
286		return NULL;
287
288	mutex_lock(&table->mutex);
289
290	idx = (obj & (table->num_obj - 1)) * table->obj_size;
291	icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
292	dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
293
294	if (!icm)
295		goto out;
296
297	list_for_each_entry(chunk, &icm->chunk_list, list) {
298		for (i = 0; i < chunk->npages; ++i) {
299			if (dma_handle && dma_offset >= 0) {
300				if (sg_dma_len(&chunk->mem[i]) > dma_offset)
301					*dma_handle = sg_dma_address(&chunk->mem[i]) +
302						dma_offset;
303				dma_offset -= sg_dma_len(&chunk->mem[i]);
304			}
305			/* DMA mapping can merge pages but not split them,
306			 * so if we found the page, dma_handle has already
307			 * been assigned to. */
308			if (chunk->mem[i].length > offset) {
309				page = sg_page(&chunk->mem[i]);
310				goto out;
311			}
312			offset -= chunk->mem[i].length;
313		}
314	}
315
316out:
317	mutex_unlock(&table->mutex);
318	return page ? lowmem_page_address(page) + offset : NULL;
319}
320
321int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
322			  int start, int end)
323{
324	int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
325	int i, err;
326
327	for (i = start; i <= end; i += inc) {
328		err = mthca_table_get(dev, table, i);
329		if (err)
330			goto fail;
331	}
332
333	return 0;
334
335fail:
336	while (i > start) {
337		i -= inc;
338		mthca_table_put(dev, table, i);
339	}
340
341	return err;
342}
343
344void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
345			   int start, int end)
346{
347	int i;
348
349	if (!mthca_is_memfree(dev))
350		return;
351
352	for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
353		mthca_table_put(dev, table, i);
354}
355
356struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
357					      u64 virt, int obj_size,
358					      int nobj, int reserved,
359					      int use_lowmem, int use_coherent)
360{
361	struct mthca_icm_table *table;
362	int obj_per_chunk;
363	int num_icm;
364	unsigned chunk_size;
365	int i;
366
367	obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
368	num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
369
370	table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL);
371	if (!table)
372		return NULL;
373
374	table->virt     = virt;
375	table->num_icm  = num_icm;
376	table->num_obj  = nobj;
377	table->obj_size = obj_size;
378	table->lowmem   = use_lowmem;
379	table->coherent = use_coherent;
380	mutex_init(&table->mutex);
381
382	for (i = 0; i < num_icm; ++i)
383		table->icm[i] = NULL;
384
385	for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
386		chunk_size = MTHCA_TABLE_CHUNK_SIZE;
387		if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
388			chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
389
390		table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
391						(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
392						__GFP_NOWARN, use_coherent);
393		if (!table->icm[i])
394			goto err;
395		if (mthca_MAP_ICM(dev, table->icm[i],
396				  virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
397			mthca_free_icm(dev, table->icm[i], table->coherent);
398			table->icm[i] = NULL;
399			goto err;
400		}
401
402		/*
403		 * Add a reference to this ICM chunk so that it never
404		 * gets freed (since it contains reserved firmware objects).
405		 */
406		++table->icm[i]->refcount;
407	}
408
409	return table;
410
411err:
412	for (i = 0; i < num_icm; ++i)
413		if (table->icm[i]) {
414			mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
415					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
416			mthca_free_icm(dev, table->icm[i], table->coherent);
417		}
418
419	kfree(table);
420
421	return NULL;
422}
423
424void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
425{
426	int i;
427
428	for (i = 0; i < table->num_icm; ++i)
429		if (table->icm[i]) {
430			mthca_UNMAP_ICM(dev,
431					table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
432					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
433			mthca_free_icm(dev, table->icm[i], table->coherent);
434		}
435
436	kfree(table);
437}
438
439static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
440{
441	return dev->uar_table.uarc_base +
442		uar->index * dev->uar_table.uarc_size +
443		page * MTHCA_ICM_PAGE_SIZE;
444}
445
446int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
447		      struct mthca_user_db_table *db_tab, int index, u64 uaddr)
448{
449	struct page *pages[1];
450	int ret = 0;
451	int i;
452
453	if (!mthca_is_memfree(dev))
454		return 0;
455
456	if (index < 0 || index > dev->uar_table.uarc_size / 8)
457		return -EINVAL;
458
459	mutex_lock(&db_tab->mutex);
460
461	i = index / MTHCA_DB_REC_PER_PAGE;
462
463	if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE)       ||
464	    (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
465	    (uaddr & 4095)) {
466		ret = -EINVAL;
467		goto out;
468	}
469
470	if (db_tab->page[i].refcount) {
471		++db_tab->page[i].refcount;
472		goto out;
473	}
474
475	ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
476				  FOLL_WRITE | FOLL_LONGTERM, pages);
477	if (ret < 0)
478		goto out;
479
480	sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
481			uaddr & ~PAGE_MASK);
482
483	ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
484			 DMA_TO_DEVICE);
485	if (ret < 0) {
486		unpin_user_page(pages[0]);
487		goto out;
488	}
489
490	ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
491				 mthca_uarc_virt(dev, uar, i));
492	if (ret) {
493		dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
494			     DMA_TO_DEVICE);
495		unpin_user_page(sg_page(&db_tab->page[i].mem));
496		goto out;
497	}
498
499	db_tab->page[i].uvirt    = uaddr;
500	db_tab->page[i].refcount = 1;
501
502out:
503	mutex_unlock(&db_tab->mutex);
504	return ret;
505}
506
507void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
508			 struct mthca_user_db_table *db_tab, int index)
509{
510	if (!mthca_is_memfree(dev))
511		return;
512
513	/*
514	 * To make our bookkeeping simpler, we don't unmap DB
515	 * pages until we clean up the whole db table.
516	 */
517
518	mutex_lock(&db_tab->mutex);
519
520	--db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
521
522	mutex_unlock(&db_tab->mutex);
523}
524
525struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
526{
527	struct mthca_user_db_table *db_tab;
528	int npages;
529	int i;
530
531	if (!mthca_is_memfree(dev))
532		return NULL;
533
534	npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
535	db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
536	if (!db_tab)
537		return ERR_PTR(-ENOMEM);
538
539	mutex_init(&db_tab->mutex);
540	for (i = 0; i < npages; ++i) {
541		db_tab->page[i].refcount = 0;
542		db_tab->page[i].uvirt    = 0;
543		sg_init_table(&db_tab->page[i].mem, 1);
544	}
545
546	return db_tab;
547}
548
549void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
550			       struct mthca_user_db_table *db_tab)
551{
552	int i;
553
554	if (!mthca_is_memfree(dev))
555		return;
556
557	for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
558		if (db_tab->page[i].uvirt) {
559			mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
560			dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
561				     DMA_TO_DEVICE);
562			unpin_user_page(sg_page(&db_tab->page[i].mem));
563		}
564	}
565
566	kfree(db_tab);
567}
568
569int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
570		   u32 qn, __be32 **db)
571{
572	int group;
573	int start, end, dir;
574	int i, j;
575	struct mthca_db_page *page;
576	int ret = 0;
577
578	mutex_lock(&dev->db_tab->mutex);
579
580	switch (type) {
581	case MTHCA_DB_TYPE_CQ_ARM:
582	case MTHCA_DB_TYPE_SQ:
583		group = 0;
584		start = 0;
585		end   = dev->db_tab->max_group1;
586		dir   = 1;
587		break;
588
589	case MTHCA_DB_TYPE_CQ_SET_CI:
590	case MTHCA_DB_TYPE_RQ:
591	case MTHCA_DB_TYPE_SRQ:
592		group = 1;
593		start = dev->db_tab->npages - 1;
594		end   = dev->db_tab->min_group2;
595		dir   = -1;
596		break;
597
598	default:
599		ret = -EINVAL;
600		goto out;
601	}
602
603	for (i = start; i != end; i += dir)
604		if (dev->db_tab->page[i].db_rec &&
605		    !bitmap_full(dev->db_tab->page[i].used,
606				 MTHCA_DB_REC_PER_PAGE)) {
607			page = dev->db_tab->page + i;
608			goto found;
609		}
610
611	for (i = start; i != end; i += dir)
612		if (!dev->db_tab->page[i].db_rec) {
613			page = dev->db_tab->page + i;
614			goto alloc;
615		}
616
617	if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
618		ret = -ENOMEM;
619		goto out;
620	}
621
622	if (group == 0)
623		++dev->db_tab->max_group1;
624	else
625		--dev->db_tab->min_group2;
626
627	page = dev->db_tab->page + end;
628
629alloc:
630	page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
631					  MTHCA_ICM_PAGE_SIZE, &page->mapping,
632					  GFP_KERNEL);
633	if (!page->db_rec) {
634		ret = -ENOMEM;
635		goto out;
636	}
637
638	ret = mthca_MAP_ICM_page(dev, page->mapping,
639				 mthca_uarc_virt(dev, &dev->driver_uar, i));
640	if (ret) {
641		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
642				  page->db_rec, page->mapping);
643		goto out;
644	}
645
646	bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
647
648found:
649	j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
650	set_bit(j, page->used);
651
652	if (group == 1)
653		j = MTHCA_DB_REC_PER_PAGE - 1 - j;
654
655	ret = i * MTHCA_DB_REC_PER_PAGE + j;
656
657	page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
658
659	*db = (__be32 *) &page->db_rec[j];
660
661out:
662	mutex_unlock(&dev->db_tab->mutex);
663
664	return ret;
665}
666
667void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
668{
669	int i, j;
670	struct mthca_db_page *page;
671
672	i = db_index / MTHCA_DB_REC_PER_PAGE;
673	j = db_index % MTHCA_DB_REC_PER_PAGE;
674
675	page = dev->db_tab->page + i;
676
677	mutex_lock(&dev->db_tab->mutex);
678
679	page->db_rec[j] = 0;
680	if (i >= dev->db_tab->min_group2)
681		j = MTHCA_DB_REC_PER_PAGE - 1 - j;
682	clear_bit(j, page->used);
683
684	if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
685	    i >= dev->db_tab->max_group1 - 1) {
686		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
687
688		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
689				  page->db_rec, page->mapping);
690		page->db_rec = NULL;
691
692		if (i == dev->db_tab->max_group1) {
693			--dev->db_tab->max_group1;
694			/* XXX may be able to unmap more pages now */
695		}
696		if (i == dev->db_tab->min_group2)
697			++dev->db_tab->min_group2;
698	}
699
700	mutex_unlock(&dev->db_tab->mutex);
701}
702
703int mthca_init_db_tab(struct mthca_dev *dev)
704{
705	int i;
706
707	if (!mthca_is_memfree(dev))
708		return 0;
709
710	dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
711	if (!dev->db_tab)
712		return -ENOMEM;
713
714	mutex_init(&dev->db_tab->mutex);
715
716	dev->db_tab->npages     = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
717	dev->db_tab->max_group1 = 0;
718	dev->db_tab->min_group2 = dev->db_tab->npages - 1;
719
720	dev->db_tab->page = kmalloc_array(dev->db_tab->npages,
721					  sizeof(*dev->db_tab->page),
722					  GFP_KERNEL);
723	if (!dev->db_tab->page) {
724		kfree(dev->db_tab);
725		return -ENOMEM;
726	}
727
728	for (i = 0; i < dev->db_tab->npages; ++i)
729		dev->db_tab->page[i].db_rec = NULL;
730
731	return 0;
732}
733
734void mthca_cleanup_db_tab(struct mthca_dev *dev)
735{
736	int i;
737
738	if (!mthca_is_memfree(dev))
739		return;
740
741	/*
742	 * Because we don't always free our UARC pages when they
743	 * become empty to make mthca_free_db() simpler we need to
744	 * make a sweep through the doorbell pages and free any
745	 * leftover pages now.
746	 */
747	for (i = 0; i < dev->db_tab->npages; ++i) {
748		if (!dev->db_tab->page[i].db_rec)
749			continue;
750
751		if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
752			mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
753
754		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
755
756		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
757				  dev->db_tab->page[i].db_rec,
758				  dev->db_tab->page[i].mapping);
759	}
760
761	kfree(dev->db_tab->page);
762	kfree(dev->db_tab);
763}
764