1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c 323223 2017-09-06 15:33:23Z hselasky $
26 */
27
28#include <linux/compiler.h>
29#include <linux/kref.h>
30#include <linux/slab.h>
31#include <rdma/ib_umem.h>
32
33#include "mlx5_ib.h"
34
35struct mlx5_ib_user_db_page {
36	struct list_head	list;
37	struct ib_umem	       *umem;
38	uintptr_t		user_virt;
39	int			refcnt;
40};
41
42int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
43			struct mlx5_db *db)
44{
45	struct mlx5_ib_user_db_page *page;
46	struct ib_umem_chunk *chunk;
47	int err = 0;
48
49	mutex_lock(&context->db_page_mutex);
50
51	list_for_each_entry(page, &context->db_page_list, list)
52		if (page->user_virt == (virt & PAGE_MASK))
53			goto found;
54
55	page = kmalloc(sizeof(*page), GFP_KERNEL);
56	if (!page) {
57		err = -ENOMEM;
58		goto out;
59	}
60
61	page->user_virt = (virt & PAGE_MASK);
62	page->refcnt    = 0;
63	page->umem      = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
64				      PAGE_SIZE, 0, 0);
65	if (IS_ERR(page->umem)) {
66		err = PTR_ERR(page->umem);
67		kfree(page);
68		goto out;
69	}
70
71	list_add(&page->list, &context->db_page_list);
72
73found:
74	chunk = list_entry(page->umem->chunk_list.next,
75	    struct ib_umem_chunk, list);
76	db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
77	db->u.user_page = page;
78	++page->refcnt;
79
80out:
81	mutex_unlock(&context->db_page_mutex);
82
83	return err;
84}
85
86void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
87{
88	mutex_lock(&context->db_page_mutex);
89
90	if (!--db->u.user_page->refcnt) {
91		list_del(&db->u.user_page->list);
92		ib_umem_release(db->u.user_page->umem);
93		kfree(db->u.user_page);
94	}
95
96	mutex_unlock(&context->db_page_mutex);
97}
98