1/*
2 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 *  userspace support verbs
5 *
6 *  Authors: Christoph Raisch <raisch@de.ibm.com>
7 *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 *           Heiko J Schick <schickhj@de.ibm.com>
9 *
10 *  Copyright (c) 2005 IBM Corporation
11 *
12 *  All rights reserved.
13 *
14 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 *  BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include <asm/current.h>
44
45#include "ehca_classes.h"
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "ehca_tools.h"
49#include "hcp_if.h"
50
51struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
52					struct ib_udata *udata)
53{
54	struct ehca_ucontext *my_context;
55
56	my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
57	if (!my_context) {
58		ehca_err(device, "Out of memory device=%p", device);
59		return ERR_PTR(-ENOMEM);
60	}
61
62	return &my_context->ib_ucontext;
63}
64
65int ehca_dealloc_ucontext(struct ib_ucontext *context)
66{
67	kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
68	return 0;
69}
70
71static void ehca_mm_open(struct vm_area_struct *vma)
72{
73	u32 *count = (u32*)vma->vm_private_data;
74	if (!count) {
75		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
76			     vma->vm_start, vma->vm_end);
77		return;
78	}
79	(*count)++;
80	if (!(*count))
81		ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
82			     vma->vm_start, vma->vm_end);
83	ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
84		     vma->vm_start, vma->vm_end, *count);
85}
86
87static void ehca_mm_close(struct vm_area_struct *vma)
88{
89	u32 *count = (u32*)vma->vm_private_data;
90	if (!count) {
91		ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92			     vma->vm_start, vma->vm_end);
93		return;
94	}
95	(*count)--;
96	ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97		     vma->vm_start, vma->vm_end, *count);
98}
99
100static struct vm_operations_struct vm_ops = {
101	.open =	ehca_mm_open,
102	.close = ehca_mm_close,
103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106			u32 *mm_count)
107{
108	int ret;
109	u64 vsize, physical;
110
111	vsize = vma->vm_end - vma->vm_start;
112	if (vsize != EHCA_PAGESIZE) {
113		ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114		return -EINVAL;
115	}
116
117	physical = galpas->user.fw_handle;
118	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119	ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120	/* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121	ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122			      vsize, vma->vm_page_prot);
123	if (unlikely(ret)) {
124		ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125		return -ENOMEM;
126	}
127
128	vma->vm_private_data = mm_count;
129	(*mm_count)++;
130	vma->vm_ops = &vm_ops;
131
132	return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136			   u32 *mm_count)
137{
138	int ret;
139	u64 start, ofs;
140	struct page *page;
141
142	vma->vm_flags |= VM_RESERVED;
143	start = vma->vm_start;
144	for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145		u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146		page = virt_to_page(virt_addr);
147		ret = vm_insert_page(vma, start, page);
148		if (unlikely(ret)) {
149			ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150			return ret;
151		}
152		start +=  PAGE_SIZE;
153	}
154	vma->vm_private_data = mm_count;
155	(*mm_count)++;
156	vma->vm_ops = &vm_ops;
157
158	return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162			u32 rsrc_type)
163{
164	int ret;
165
166	switch (rsrc_type) {
167	case 1: /* galpa fw handle */
168		ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169		ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170		if (unlikely(ret)) {
171			ehca_err(cq->ib_cq.device,
172				 "ehca_mmap_fw() failed rc=%x cq_num=%x",
173				 ret, cq->cq_number);
174			return ret;
175		}
176		break;
177
178	case 2: /* cq queue_addr */
179		ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
180		ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
181		if (unlikely(ret)) {
182			ehca_err(cq->ib_cq.device,
183				 "ehca_mmap_queue() failed rc=%x cq_num=%x",
184				 ret, cq->cq_number);
185			return ret;
186		}
187		break;
188
189	default:
190		ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
191			 rsrc_type, cq->cq_number);
192		return -EINVAL;
193	}
194
195	return 0;
196}
197
198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199			u32 rsrc_type)
200{
201	int ret;
202
203	switch (rsrc_type) {
204	case 1: /* galpa fw handle */
205		ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206		ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207		if (unlikely(ret)) {
208			ehca_err(qp->ib_qp.device,
209				 "remap_pfn_range() failed ret=%x qp_num=%x",
210				 ret, qp->ib_qp.qp_num);
211			return -ENOMEM;
212		}
213		break;
214
215	case 2: /* qp rqueue_addr */
216		ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217			 qp->ib_qp.qp_num);
218		ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219		if (unlikely(ret)) {
220			ehca_err(qp->ib_qp.device,
221				 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
222				 ret, qp->ib_qp.qp_num);
223			return ret;
224		}
225		break;
226
227	case 3: /* qp squeue_addr */
228		ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
229			 qp->ib_qp.qp_num);
230		ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
231		if (unlikely(ret)) {
232			ehca_err(qp->ib_qp.device,
233				 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
234				 ret, qp->ib_qp.qp_num);
235			return ret;
236		}
237		break;
238
239	default:
240		ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
241			 rsrc_type, qp->ib_qp.qp_num);
242		return -EINVAL;
243	}
244
245	return 0;
246}
247
248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
249{
250	u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
251	u32 idr_handle = fileoffset >> 32;
252	u32 q_type = (fileoffset >> 28) & 0xF;	  /* CQ, QP,...        */
253	u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
254	u32 cur_pid = current->tgid;
255	u32 ret;
256	unsigned long flags;
257	struct ehca_cq *cq;
258	struct ehca_qp *qp;
259	struct ehca_pd *pd;
260
261	switch (q_type) {
262	case  1: /* CQ */
263		spin_lock_irqsave(&ehca_cq_idr_lock, flags);
264		cq = idr_find(&ehca_cq_idr, idr_handle);
265		spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
266
267		/* make sure this mmap really belongs to the authorized user */
268		if (!cq)
269			return -EINVAL;
270
271		if (cq->ownpid != cur_pid) {
272			ehca_err(cq->ib_cq.device,
273				 "Invalid caller pid=%x ownpid=%x",
274				 cur_pid, cq->ownpid);
275			return -ENOMEM;
276		}
277
278		if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
279			return -EINVAL;
280
281		ret = ehca_mmap_cq(vma, cq, rsrc_type);
282		if (unlikely(ret)) {
283			ehca_err(cq->ib_cq.device,
284				 "ehca_mmap_cq() failed rc=%x cq_num=%x",
285				 ret, cq->cq_number);
286			return ret;
287		}
288		break;
289
290	case 2: /* QP */
291		spin_lock_irqsave(&ehca_qp_idr_lock, flags);
292		qp = idr_find(&ehca_qp_idr, idr_handle);
293		spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
294
295		/* make sure this mmap really belongs to the authorized user */
296		if (!qp)
297			return -EINVAL;
298
299		pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
300		if (pd->ownpid != cur_pid) {
301			ehca_err(qp->ib_qp.device,
302				 "Invalid caller pid=%x ownpid=%x",
303				 cur_pid, pd->ownpid);
304			return -ENOMEM;
305		}
306
307		if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
308			return -EINVAL;
309
310		ret = ehca_mmap_qp(vma, qp, rsrc_type);
311		if (unlikely(ret)) {
312			ehca_err(qp->ib_qp.device,
313				 "ehca_mmap_qp() failed rc=%x qp_num=%x",
314				 ret, qp->ib_qp.qp_num);
315			return ret;
316		}
317		break;
318
319	default:
320		ehca_gen_err("bad queue type %x", q_type);
321		return -EINVAL;
322	}
323
324	return 0;
325}
326