• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/ehca/
1/*
2 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 *  MR/MW functions
5 *
6 *  Authors: Dietmar Decker <ddecker@de.ibm.com>
7 *           Christoph Raisch <raisch@de.ibm.com>
8 *
9 *  Copyright (c) 2005 IBM Corporation
10 *
11 *  All rights reserved.
12 *
13 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 *  BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <rdma/ib_umem.h>
43
44#include <asm/current.h>
45
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "hcp_if.h"
49#include "hipz_hw.h"
50
51static struct kmem_cache *mr_cache;
52static struct kmem_cache *mw_cache;
53
54static struct ehca_mr *ehca_mr_new(void)
55{
56	struct ehca_mr *me;
57
58	me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
59	if (me) {
60		spin_lock_init(&me->mrlock);
61	} else
62		ehca_gen_err("alloc failed");
63
64	return me;
65}
66
67static void ehca_mr_delete(struct ehca_mr *me)
68{
69	kmem_cache_free(mr_cache, me);
70}
71
72static struct ehca_mw *ehca_mw_new(void)
73{
74	struct ehca_mw *me;
75
76	me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
77	if (me) {
78		spin_lock_init(&me->mwlock);
79	} else
80		ehca_gen_err("alloc failed");
81
82	return me;
83}
84
85static void ehca_mw_delete(struct ehca_mw *me)
86{
87	kmem_cache_free(mw_cache, me);
88}
89
90/*----------------------------------------------------------------------*/
91
92struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
93{
94	struct ib_mr *ib_mr;
95	int ret;
96	struct ehca_mr *e_maxmr;
97	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
98	struct ehca_shca *shca =
99		container_of(pd->device, struct ehca_shca, ib_device);
100
101	if (shca->maxmr) {
102		e_maxmr = ehca_mr_new();
103		if (!e_maxmr) {
104			ehca_err(&shca->ib_device, "out of memory");
105			ib_mr = ERR_PTR(-ENOMEM);
106			goto get_dma_mr_exit0;
107		}
108
109		ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
110				     mr_access_flags, e_pd,
111				     &e_maxmr->ib.ib_mr.lkey,
112				     &e_maxmr->ib.ib_mr.rkey);
113		if (ret) {
114			ib_mr = ERR_PTR(ret);
115			goto get_dma_mr_exit0;
116		}
117		ib_mr = &e_maxmr->ib.ib_mr;
118	} else {
119		ehca_err(&shca->ib_device, "no internal max-MR exist!");
120		ib_mr = ERR_PTR(-EINVAL);
121		goto get_dma_mr_exit0;
122	}
123
124get_dma_mr_exit0:
125	if (IS_ERR(ib_mr))
126		ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
127			 PTR_ERR(ib_mr), pd, mr_access_flags);
128	return ib_mr;
129} /* end ehca_get_dma_mr() */
130
131/*----------------------------------------------------------------------*/
132
133struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
134			       struct ib_phys_buf *phys_buf_array,
135			       int num_phys_buf,
136			       int mr_access_flags,
137			       u64 *iova_start)
138{
139	struct ib_mr *ib_mr;
140	int ret;
141	struct ehca_mr *e_mr;
142	struct ehca_shca *shca =
143		container_of(pd->device, struct ehca_shca, ib_device);
144	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
145
146	u64 size;
147	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
148	u32 num_pages_mr;
149	u32 num_pages_4k; /* 4k portion "pages" */
150
151	if ((num_phys_buf <= 0) || !phys_buf_array) {
152		ehca_err(pd->device, "bad input values: num_phys_buf=%x "
153			 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
154		ib_mr = ERR_PTR(-EINVAL);
155		goto reg_phys_mr_exit0;
156	}
157	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
158	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
159	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
160	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
161		/*
162		 * Remote Write Access requires Local Write Access
163		 * Remote Atomic Access requires Local Write Access
164		 */
165		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
166			 mr_access_flags);
167		ib_mr = ERR_PTR(-EINVAL);
168		goto reg_phys_mr_exit0;
169	}
170
171	/* check physical buffer list and calculate size */
172	ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
173					    iova_start, &size);
174	if (ret) {
175		ib_mr = ERR_PTR(ret);
176		goto reg_phys_mr_exit0;
177	}
178	if ((size == 0) ||
179	    (((u64)iova_start + size) < (u64)iova_start)) {
180		ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
181			 size, iova_start);
182		ib_mr = ERR_PTR(-EINVAL);
183		goto reg_phys_mr_exit0;
184	}
185
186	e_mr = ehca_mr_new();
187	if (!e_mr) {
188		ehca_err(pd->device, "out of memory");
189		ib_mr = ERR_PTR(-ENOMEM);
190		goto reg_phys_mr_exit0;
191	}
192
193	/* determine number of MR pages */
194	num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
195			 PAGE_SIZE - 1) / PAGE_SIZE);
196	num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
197			 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
198
199	/* register MR on HCA */
200	if (ehca_mr_is_maxmr(size, iova_start)) {
201		e_mr->flags |= EHCA_MR_FLAG_MAXMR;
202		ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
203				     e_pd, &e_mr->ib.ib_mr.lkey,
204				     &e_mr->ib.ib_mr.rkey);
205		if (ret) {
206			ib_mr = ERR_PTR(ret);
207			goto reg_phys_mr_exit1;
208		}
209	} else {
210		pginfo.type           = EHCA_MR_PGI_PHYS;
211		pginfo.num_pages      = num_pages_mr;
212		pginfo.num_4k         = num_pages_4k;
213		pginfo.num_phys_buf   = num_phys_buf;
214		pginfo.phys_buf_array = phys_buf_array;
215		pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
216					 EHCA_PAGESIZE);
217
218		ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
219				  e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
220				  &e_mr->ib.ib_mr.rkey);
221		if (ret) {
222			ib_mr = ERR_PTR(ret);
223			goto reg_phys_mr_exit1;
224		}
225	}
226
227	/* successful registration of all pages */
228	return &e_mr->ib.ib_mr;
229
230reg_phys_mr_exit1:
231	ehca_mr_delete(e_mr);
232reg_phys_mr_exit0:
233	if (IS_ERR(ib_mr))
234		ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
235			 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
236			 PTR_ERR(ib_mr), pd, phys_buf_array,
237			 num_phys_buf, mr_access_flags, iova_start);
238	return ib_mr;
239} /* end ehca_reg_phys_mr() */
240
241/*----------------------------------------------------------------------*/
242
243struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
244			       int mr_access_flags, struct ib_udata *udata)
245{
246	struct ib_mr *ib_mr;
247	struct ehca_mr *e_mr;
248	struct ehca_shca *shca =
249		container_of(pd->device, struct ehca_shca, ib_device);
250	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
251	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
252	int ret;
253	u32 num_pages_mr;
254	u32 num_pages_4k; /* 4k portion "pages" */
255
256	if (!pd) {
257		ehca_gen_err("bad pd=%p", pd);
258		return ERR_PTR(-EFAULT);
259	}
260
261	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
262	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
263	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
264	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
265		/*
266		 * Remote Write Access requires Local Write Access
267		 * Remote Atomic Access requires Local Write Access
268		 */
269		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
270			 mr_access_flags);
271		ib_mr = ERR_PTR(-EINVAL);
272		goto reg_user_mr_exit0;
273	}
274
275	if (length == 0 || virt + length < virt) {
276		ehca_err(pd->device, "bad input values: length=%lx "
277			 "virt_base=%lx", length, virt);
278		ib_mr = ERR_PTR(-EINVAL);
279		goto reg_user_mr_exit0;
280	}
281
282	e_mr = ehca_mr_new();
283	if (!e_mr) {
284		ehca_err(pd->device, "out of memory");
285		ib_mr = ERR_PTR(-ENOMEM);
286		goto reg_user_mr_exit0;
287	}
288
289	e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
290				 mr_access_flags);
291	if (IS_ERR(e_mr->umem)) {
292		ib_mr = (void *) e_mr->umem;
293		goto reg_user_mr_exit1;
294	}
295
296	if (e_mr->umem->page_size != PAGE_SIZE) {
297		ehca_err(pd->device, "page size not supported, "
298			 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
299		ib_mr = ERR_PTR(-EINVAL);
300		goto reg_user_mr_exit2;
301	}
302
303	/* determine number of MR pages */
304	num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
305			PAGE_SIZE);
306	num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
307			EHCA_PAGESIZE);
308
309	/* register MR on HCA */
310	pginfo.type       = EHCA_MR_PGI_USER;
311	pginfo.num_pages  = num_pages_mr;
312	pginfo.num_4k     = num_pages_4k;
313	pginfo.region     = e_mr->umem;
314	pginfo.next_4k	  = e_mr->umem->offset / EHCA_PAGESIZE;
315	pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
316					       (&e_mr->umem->chunk_list),
317					       list);
318
319	ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
320			  &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
321	if (ret) {
322		ib_mr = ERR_PTR(ret);
323		goto reg_user_mr_exit2;
324	}
325
326	/* successful registration of all pages */
327	return &e_mr->ib.ib_mr;
328
329reg_user_mr_exit2:
330	ib_umem_release(e_mr->umem);
331reg_user_mr_exit1:
332	ehca_mr_delete(e_mr);
333reg_user_mr_exit0:
334	if (IS_ERR(ib_mr))
335		ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
336			 " udata=%p",
337			 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
338	return ib_mr;
339} /* end ehca_reg_user_mr() */
340
341/*----------------------------------------------------------------------*/
342
343int ehca_rereg_phys_mr(struct ib_mr *mr,
344		       int mr_rereg_mask,
345		       struct ib_pd *pd,
346		       struct ib_phys_buf *phys_buf_array,
347		       int num_phys_buf,
348		       int mr_access_flags,
349		       u64 *iova_start)
350{
351	int ret;
352
353	struct ehca_shca *shca =
354		container_of(mr->device, struct ehca_shca, ib_device);
355	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
356	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
357	u64 new_size;
358	u64 *new_start;
359	u32 new_acl;
360	struct ehca_pd *new_pd;
361	u32 tmp_lkey, tmp_rkey;
362	unsigned long sl_flags;
363	u32 num_pages_mr = 0;
364	u32 num_pages_4k = 0; /* 4k portion "pages" */
365	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
366	u32 cur_pid = current->tgid;
367
368	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
369	    (my_pd->ownpid != cur_pid)) {
370		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
371			 cur_pid, my_pd->ownpid);
372		ret = -EINVAL;
373		goto rereg_phys_mr_exit0;
374	}
375
376	if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
377		/* TODO not supported, because PHYP rereg hCall needs pages */
378		ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
379			 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
380		ret = -EINVAL;
381		goto rereg_phys_mr_exit0;
382	}
383
384	if (mr_rereg_mask & IB_MR_REREG_PD) {
385		if (!pd) {
386			ehca_err(mr->device, "rereg with bad pd, pd=%p "
387				 "mr_rereg_mask=%x", pd, mr_rereg_mask);
388			ret = -EINVAL;
389			goto rereg_phys_mr_exit0;
390		}
391	}
392
393	if ((mr_rereg_mask &
394	     ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
395	    (mr_rereg_mask == 0)) {
396		ret = -EINVAL;
397		goto rereg_phys_mr_exit0;
398	}
399
400	/* check other parameters */
401	if (e_mr == shca->maxmr) {
402		/* should be impossible, however reject to be sure */
403		ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
404			 "shca->maxmr=%p mr->lkey=%x",
405			 mr, shca->maxmr, mr->lkey);
406		ret = -EINVAL;
407		goto rereg_phys_mr_exit0;
408	}
409	if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
410		if (e_mr->flags & EHCA_MR_FLAG_FMR) {
411			ehca_err(mr->device, "not supported for FMR, mr=%p "
412				 "flags=%x", mr, e_mr->flags);
413			ret = -EINVAL;
414			goto rereg_phys_mr_exit0;
415		}
416		if (!phys_buf_array || num_phys_buf <= 0) {
417			ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
418				 " phys_buf_array=%p num_phys_buf=%x",
419				 mr_rereg_mask, phys_buf_array, num_phys_buf);
420			ret = -EINVAL;
421			goto rereg_phys_mr_exit0;
422		}
423	}
424	if ((mr_rereg_mask & IB_MR_REREG_ACCESS) &&	/* change ACL */
425	    (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
426	      !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
427	     ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
428	      !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
429		/*
430		 * Remote Write Access requires Local Write Access
431		 * Remote Atomic Access requires Local Write Access
432		 */
433		ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
434			 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
435		ret = -EINVAL;
436		goto rereg_phys_mr_exit0;
437	}
438
439	/* set requested values dependent on rereg request */
440	spin_lock_irqsave(&e_mr->mrlock, sl_flags);
441	new_start = e_mr->start;  /* new == old address */
442	new_size  = e_mr->size;	  /* new == old length */
443	new_acl   = e_mr->acl;	  /* new == old access control */
444	new_pd    = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
445
446	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
447		new_start = iova_start;	/* change address */
448		/* check physical buffer list and calculate size */
449		ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
450						    num_phys_buf, iova_start,
451						    &new_size);
452		if (ret)
453			goto rereg_phys_mr_exit1;
454		if ((new_size == 0) ||
455		    (((u64)iova_start + new_size) < (u64)iova_start)) {
456			ehca_err(mr->device, "bad input values: new_size=%lx "
457				 "iova_start=%p", new_size, iova_start);
458			ret = -EINVAL;
459			goto rereg_phys_mr_exit1;
460		}
461		num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
462				 PAGE_SIZE - 1) / PAGE_SIZE);
463		num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
464				 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
465		pginfo.type           = EHCA_MR_PGI_PHYS;
466		pginfo.num_pages      = num_pages_mr;
467		pginfo.num_4k         = num_pages_4k;
468		pginfo.num_phys_buf   = num_phys_buf;
469		pginfo.phys_buf_array = phys_buf_array;
470		pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
471					 EHCA_PAGESIZE);
472	}
473	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
474		new_acl = mr_access_flags;
475	if (mr_rereg_mask & IB_MR_REREG_PD)
476		new_pd = container_of(pd, struct ehca_pd, ib_pd);
477
478	ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
479			    new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
480	if (ret)
481		goto rereg_phys_mr_exit1;
482
483	/* successful reregistration */
484	if (mr_rereg_mask & IB_MR_REREG_PD)
485		mr->pd = pd;
486	mr->lkey = tmp_lkey;
487	mr->rkey = tmp_rkey;
488
489rereg_phys_mr_exit1:
490	spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
491rereg_phys_mr_exit0:
492	if (ret)
493		ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
494			 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
495			 "iova_start=%p",
496			 ret, mr, mr_rereg_mask, pd, phys_buf_array,
497			 num_phys_buf, mr_access_flags, iova_start);
498	return ret;
499} /* end ehca_rereg_phys_mr() */
500
501/*----------------------------------------------------------------------*/
502
503int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
504{
505	int ret = 0;
506	u64 h_ret;
507	struct ehca_shca *shca =
508		container_of(mr->device, struct ehca_shca, ib_device);
509	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
510	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
511	u32 cur_pid = current->tgid;
512	unsigned long sl_flags;
513	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
514
515	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
516	    (my_pd->ownpid != cur_pid)) {
517		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
518			 cur_pid, my_pd->ownpid);
519		ret = -EINVAL;
520		goto query_mr_exit0;
521	}
522
523	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
524		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
525			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
526		ret = -EINVAL;
527		goto query_mr_exit0;
528	}
529
530	memset(mr_attr, 0, sizeof(struct ib_mr_attr));
531	spin_lock_irqsave(&e_mr->mrlock, sl_flags);
532
533	h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
534	if (h_ret != H_SUCCESS) {
535		ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
536			 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
537			 h_ret, mr, shca->ipz_hca_handle.handle,
538			 e_mr->ipz_mr_handle.handle, mr->lkey);
539		ret = ehca_mrmw_map_hrc_query_mr(h_ret);
540		goto query_mr_exit1;
541	}
542	mr_attr->pd               = mr->pd;
543	mr_attr->device_virt_addr = hipzout.vaddr;
544	mr_attr->size             = hipzout.len;
545	mr_attr->lkey             = hipzout.lkey;
546	mr_attr->rkey             = hipzout.rkey;
547	ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
548
549query_mr_exit1:
550	spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
551query_mr_exit0:
552	if (ret)
553		ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
554			 ret, mr, mr_attr);
555	return ret;
556} /* end ehca_query_mr() */
557
558/*----------------------------------------------------------------------*/
559
560int ehca_dereg_mr(struct ib_mr *mr)
561{
562	int ret = 0;
563	u64 h_ret;
564	struct ehca_shca *shca =
565		container_of(mr->device, struct ehca_shca, ib_device);
566	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
567	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
568	u32 cur_pid = current->tgid;
569
570	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
571	    (my_pd->ownpid != cur_pid)) {
572		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
573			 cur_pid, my_pd->ownpid);
574		ret = -EINVAL;
575		goto dereg_mr_exit0;
576	}
577
578	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
579		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
580			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
581		ret = -EINVAL;
582		goto dereg_mr_exit0;
583	} else if (e_mr == shca->maxmr) {
584		/* should be impossible, however reject to be sure */
585		ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
586			 "shca->maxmr=%p mr->lkey=%x",
587			 mr, shca->maxmr, mr->lkey);
588		ret = -EINVAL;
589		goto dereg_mr_exit0;
590	}
591
592	/* TODO: BUSY: MR still has bound window(s) */
593	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
594	if (h_ret != H_SUCCESS) {
595		ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
596			 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
597			 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
598			 e_mr->ipz_mr_handle.handle, mr->lkey);
599		ret = ehca_mrmw_map_hrc_free_mr(h_ret);
600		goto dereg_mr_exit0;
601	}
602
603	if (e_mr->umem)
604		ib_umem_release(e_mr->umem);
605
606	/* successful deregistration */
607	ehca_mr_delete(e_mr);
608
609dereg_mr_exit0:
610	if (ret)
611		ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
612	return ret;
613} /* end ehca_dereg_mr() */
614
615/*----------------------------------------------------------------------*/
616
617struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
618{
619	struct ib_mw *ib_mw;
620	u64 h_ret;
621	struct ehca_mw *e_mw;
622	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
623	struct ehca_shca *shca =
624		container_of(pd->device, struct ehca_shca, ib_device);
625	struct ehca_mw_hipzout_parms hipzout = {{0},0};
626
627	e_mw = ehca_mw_new();
628	if (!e_mw) {
629		ib_mw = ERR_PTR(-ENOMEM);
630		goto alloc_mw_exit0;
631	}
632
633	h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
634					 e_pd->fw_pd, &hipzout);
635	if (h_ret != H_SUCCESS) {
636		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
637			 "shca=%p hca_hndl=%lx mw=%p",
638			 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
639		ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
640		goto alloc_mw_exit1;
641	}
642	/* successful MW allocation */
643	e_mw->ipz_mw_handle = hipzout.handle;
644	e_mw->ib_mw.rkey    = hipzout.rkey;
645	return &e_mw->ib_mw;
646
647alloc_mw_exit1:
648	ehca_mw_delete(e_mw);
649alloc_mw_exit0:
650	if (IS_ERR(ib_mw))
651		ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
652	return ib_mw;
653} /* end ehca_alloc_mw() */
654
655/*----------------------------------------------------------------------*/
656
657int ehca_bind_mw(struct ib_qp *qp,
658		 struct ib_mw *mw,
659		 struct ib_mw_bind *mw_bind)
660{
661	/* TODO: not supported up to now */
662	ehca_gen_err("bind MW currently not supported by HCAD");
663
664	return -EPERM;
665} /* end ehca_bind_mw() */
666
667/*----------------------------------------------------------------------*/
668
669int ehca_dealloc_mw(struct ib_mw *mw)
670{
671	u64 h_ret;
672	struct ehca_shca *shca =
673		container_of(mw->device, struct ehca_shca, ib_device);
674	struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
675
676	h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
677	if (h_ret != H_SUCCESS) {
678		ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
679			 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
680			 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
681			 e_mw->ipz_mw_handle.handle);
682		return ehca_mrmw_map_hrc_free_mw(h_ret);
683	}
684	/* successful deallocation */
685	ehca_mw_delete(e_mw);
686	return 0;
687} /* end ehca_dealloc_mw() */
688
689/*----------------------------------------------------------------------*/
690
691struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
692			      int mr_access_flags,
693			      struct ib_fmr_attr *fmr_attr)
694{
695	struct ib_fmr *ib_fmr;
696	struct ehca_shca *shca =
697		container_of(pd->device, struct ehca_shca, ib_device);
698	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
699	struct ehca_mr *e_fmr;
700	int ret;
701	u32 tmp_lkey, tmp_rkey;
702	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
703
704	/* check other parameters */
705	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
706	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
707	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
708	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
709		/*
710		 * Remote Write Access requires Local Write Access
711		 * Remote Atomic Access requires Local Write Access
712		 */
713		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
714			 mr_access_flags);
715		ib_fmr = ERR_PTR(-EINVAL);
716		goto alloc_fmr_exit0;
717	}
718	if (mr_access_flags & IB_ACCESS_MW_BIND) {
719		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
720			 mr_access_flags);
721		ib_fmr = ERR_PTR(-EINVAL);
722		goto alloc_fmr_exit0;
723	}
724	if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
725		ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
726			 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
727			 fmr_attr->max_pages, fmr_attr->max_maps,
728			 fmr_attr->page_shift);
729		ib_fmr = ERR_PTR(-EINVAL);
730		goto alloc_fmr_exit0;
731	}
732	if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
733	    ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
734		ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
735			 fmr_attr->page_shift);
736		ib_fmr = ERR_PTR(-EINVAL);
737		goto alloc_fmr_exit0;
738	}
739
740	e_fmr = ehca_mr_new();
741	if (!e_fmr) {
742		ib_fmr = ERR_PTR(-ENOMEM);
743		goto alloc_fmr_exit0;
744	}
745	e_fmr->flags |= EHCA_MR_FLAG_FMR;
746
747	/* register MR on HCA */
748	ret = ehca_reg_mr(shca, e_fmr, NULL,
749			  fmr_attr->max_pages * (1 << fmr_attr->page_shift),
750			  mr_access_flags, e_pd, &pginfo,
751			  &tmp_lkey, &tmp_rkey);
752	if (ret) {
753		ib_fmr = ERR_PTR(ret);
754		goto alloc_fmr_exit1;
755	}
756
757	/* successful */
758	e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
759	e_fmr->fmr_max_pages = fmr_attr->max_pages;
760	e_fmr->fmr_max_maps = fmr_attr->max_maps;
761	e_fmr->fmr_map_cnt = 0;
762	return &e_fmr->ib.ib_fmr;
763
764alloc_fmr_exit1:
765	ehca_mr_delete(e_fmr);
766alloc_fmr_exit0:
767	if (IS_ERR(ib_fmr))
768		ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
769			 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
770			 mr_access_flags, fmr_attr);
771	return ib_fmr;
772} /* end ehca_alloc_fmr() */
773
774/*----------------------------------------------------------------------*/
775
776int ehca_map_phys_fmr(struct ib_fmr *fmr,
777		      u64 *page_list,
778		      int list_len,
779		      u64 iova)
780{
781	int ret;
782	struct ehca_shca *shca =
783		container_of(fmr->device, struct ehca_shca, ib_device);
784	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
785	struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
786	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
787	u32 tmp_lkey, tmp_rkey;
788
789	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
790		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
791			 e_fmr, e_fmr->flags);
792		ret = -EINVAL;
793		goto map_phys_fmr_exit0;
794	}
795	ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
796	if (ret)
797		goto map_phys_fmr_exit0;
798	if (iova % e_fmr->fmr_page_size) {
799		/* only whole-numbered pages */
800		ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
801			 iova, e_fmr->fmr_page_size);
802		ret = -EINVAL;
803		goto map_phys_fmr_exit0;
804	}
805	if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
806		/* HCAD does not limit the maps, however trace this anyway */
807		ehca_info(fmr->device, "map limit exceeded, fmr=%p "
808			  "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
809			  fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
810	}
811
812	pginfo.type      = EHCA_MR_PGI_FMR;
813	pginfo.num_pages = list_len;
814	pginfo.num_4k    = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
815	pginfo.page_list = page_list;
816	pginfo.next_4k   = ((iova & (e_fmr->fmr_page_size-1)) /
817			    EHCA_PAGESIZE);
818
819	ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
820			    list_len * e_fmr->fmr_page_size,
821			    e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
822	if (ret)
823		goto map_phys_fmr_exit0;
824
825	/* successful reregistration */
826	e_fmr->fmr_map_cnt++;
827	e_fmr->ib.ib_fmr.lkey = tmp_lkey;
828	e_fmr->ib.ib_fmr.rkey = tmp_rkey;
829	return 0;
830
831map_phys_fmr_exit0:
832	if (ret)
833		ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
834			 "iova=%lx",
835			 ret, fmr, page_list, list_len, iova);
836	return ret;
837} /* end ehca_map_phys_fmr() */
838
839/*----------------------------------------------------------------------*/
840
841int ehca_unmap_fmr(struct list_head *fmr_list)
842{
843	int ret = 0;
844	struct ib_fmr *ib_fmr;
845	struct ehca_shca *shca = NULL;
846	struct ehca_shca *prev_shca;
847	struct ehca_mr *e_fmr;
848	u32 num_fmr = 0;
849	u32 unmap_fmr_cnt = 0;
850
851	/* check all FMR belong to same SHCA, and check internal flag */
852	list_for_each_entry(ib_fmr, fmr_list, list) {
853		prev_shca = shca;
854		if (!ib_fmr) {
855			ehca_gen_err("bad fmr=%p in list", ib_fmr);
856			ret = -EINVAL;
857			goto unmap_fmr_exit0;
858		}
859		shca = container_of(ib_fmr->device, struct ehca_shca,
860				    ib_device);
861		e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
862		if ((shca != prev_shca) && prev_shca) {
863			ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
864				 "prev_shca=%p e_fmr=%p",
865				 shca, prev_shca, e_fmr);
866			ret = -EINVAL;
867			goto unmap_fmr_exit0;
868		}
869		if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
870			ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
871				 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
872			ret = -EINVAL;
873			goto unmap_fmr_exit0;
874		}
875		num_fmr++;
876	}
877
878	/* loop over all FMRs to unmap */
879	list_for_each_entry(ib_fmr, fmr_list, list) {
880		unmap_fmr_cnt++;
881		e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
882		shca = container_of(ib_fmr->device, struct ehca_shca,
883				    ib_device);
884		ret = ehca_unmap_one_fmr(shca, e_fmr);
885		if (ret) {
886			/* unmap failed, stop unmapping of rest of FMRs */
887			ehca_err(&shca->ib_device, "unmap of one FMR failed, "
888				 "stop rest, e_fmr=%p num_fmr=%x "
889				 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
890				 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
891			goto unmap_fmr_exit0;
892		}
893	}
894
895unmap_fmr_exit0:
896	if (ret)
897		ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
898			     ret, fmr_list, num_fmr, unmap_fmr_cnt);
899	return ret;
900} /* end ehca_unmap_fmr() */
901
902/*----------------------------------------------------------------------*/
903
904int ehca_dealloc_fmr(struct ib_fmr *fmr)
905{
906	int ret;
907	u64 h_ret;
908	struct ehca_shca *shca =
909		container_of(fmr->device, struct ehca_shca, ib_device);
910	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
911
912	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
913		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
914			 e_fmr, e_fmr->flags);
915		ret = -EINVAL;
916		goto free_fmr_exit0;
917	}
918
919	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
920	if (h_ret != H_SUCCESS) {
921		ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
922			 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
923			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
924			 e_fmr->ipz_mr_handle.handle, fmr->lkey);
925		ret = ehca_mrmw_map_hrc_free_mr(h_ret);
926		goto free_fmr_exit0;
927	}
928	/* successful deregistration */
929	ehca_mr_delete(e_fmr);
930	return 0;
931
932free_fmr_exit0:
933	if (ret)
934		ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
935	return ret;
936} /* end ehca_dealloc_fmr() */
937
938/*----------------------------------------------------------------------*/
939
940int ehca_reg_mr(struct ehca_shca *shca,
941		struct ehca_mr *e_mr,
942		u64 *iova_start,
943		u64 size,
944		int acl,
945		struct ehca_pd *e_pd,
946		struct ehca_mr_pginfo *pginfo,
947		u32 *lkey, /*OUT*/
948		u32 *rkey) /*OUT*/
949{
950	int ret;
951	u64 h_ret;
952	u32 hipz_acl;
953	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
954
955	ehca_mrmw_map_acl(acl, &hipz_acl);
956	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
957	if (ehca_use_hp_mr == 1)
958	        hipz_acl |= 0x00000001;
959
960	h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
961					 (u64)iova_start, size, hipz_acl,
962					 e_pd->fw_pd, &hipzout);
963	if (h_ret != H_SUCCESS) {
964		ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
965			 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
966		ret = ehca_mrmw_map_hrc_alloc(h_ret);
967		goto ehca_reg_mr_exit0;
968	}
969
970	e_mr->ipz_mr_handle = hipzout.handle;
971
972	ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
973	if (ret)
974		goto ehca_reg_mr_exit1;
975
976	/* successful registration */
977	e_mr->num_pages = pginfo->num_pages;
978	e_mr->num_4k    = pginfo->num_4k;
979	e_mr->start     = iova_start;
980	e_mr->size      = size;
981	e_mr->acl       = acl;
982	*lkey = hipzout.lkey;
983	*rkey = hipzout.rkey;
984	return 0;
985
986ehca_reg_mr_exit1:
987	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
988	if (h_ret != H_SUCCESS) {
989		ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
990			 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
991			 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
992			 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
993			 hipzout.lkey, pginfo, pginfo->num_pages,
994			 pginfo->num_4k, ret);
995		ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
996			 "not recoverable");
997	}
998ehca_reg_mr_exit0:
999	if (ret)
1000		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1001			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1002			 "num_pages=%lx num_4k=%lx",
1003			 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1004			 pginfo->num_pages, pginfo->num_4k);
1005	return ret;
1006} /* end ehca_reg_mr() */
1007
1008/*----------------------------------------------------------------------*/
1009
1010int ehca_reg_mr_rpages(struct ehca_shca *shca,
1011		       struct ehca_mr *e_mr,
1012		       struct ehca_mr_pginfo *pginfo)
1013{
1014	int ret = 0;
1015	u64 h_ret;
1016	u32 rnum;
1017	u64 rpage;
1018	u32 i;
1019	u64 *kpage;
1020
1021	kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1022	if (!kpage) {
1023		ehca_err(&shca->ib_device, "kpage alloc failed");
1024		ret = -ENOMEM;
1025		goto ehca_reg_mr_rpages_exit0;
1026	}
1027
1028	/* max 512 pages per shot */
1029	for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
1030
1031		if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1032			rnum = pginfo->num_4k % 512; /* last shot */
1033			if (rnum == 0)
1034				rnum = 512;      /* last shot is full */
1035		} else
1036			rnum = 512;
1037
1038		if (rnum > 1) {
1039			ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1040			if (ret) {
1041				ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1042					 "bad rc, ret=%x rnum=%x kpage=%p",
1043					 ret, rnum, kpage);
1044				ret = -EFAULT;
1045				goto ehca_reg_mr_rpages_exit1;
1046			}
1047			rpage = virt_to_abs(kpage);
1048			if (!rpage) {
1049				ehca_err(&shca->ib_device, "kpage=%p i=%x",
1050					 kpage, i);
1051				ret = -EFAULT;
1052				goto ehca_reg_mr_rpages_exit1;
1053			}
1054		} else {  /* rnum==1 */
1055			ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1056			if (ret) {
1057				ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1058					 "bad rc, ret=%x i=%x", ret, i);
1059				ret = -EFAULT;
1060				goto ehca_reg_mr_rpages_exit1;
1061			}
1062		}
1063
1064		h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1065						 0, /* pagesize 4k */
1066						 0, rpage, rnum);
1067
1068		if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1069			/*
1070			 * check for 'registration complete'==H_SUCCESS
1071			 * and for 'page registered'==H_PAGE_REGISTERED
1072			 */
1073			if (h_ret != H_SUCCESS) {
1074				ehca_err(&shca->ib_device, "last "
1075					 "hipz_reg_rpage_mr failed, h_ret=%lx "
1076					 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1077					 " lkey=%x", h_ret, e_mr, i,
1078					 shca->ipz_hca_handle.handle,
1079					 e_mr->ipz_mr_handle.handle,
1080					 e_mr->ib.ib_mr.lkey);
1081				ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
1082				break;
1083			} else
1084				ret = 0;
1085		} else if (h_ret != H_PAGE_REGISTERED) {
1086			ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1087				 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1088				 "mr_hndl=%lx", h_ret, e_mr, i,
1089				 e_mr->ib.ib_mr.lkey,
1090				 shca->ipz_hca_handle.handle,
1091				 e_mr->ipz_mr_handle.handle);
1092			ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
1093			break;
1094		} else
1095			ret = 0;
1096	} /* end for(i) */
1097
1098
1099ehca_reg_mr_rpages_exit1:
1100	ehca_free_fw_ctrlblock(kpage);
1101ehca_reg_mr_rpages_exit0:
1102	if (ret)
1103		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1104			 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
1105			 pginfo->num_pages, pginfo->num_4k);
1106	return ret;
1107} /* end ehca_reg_mr_rpages() */
1108
1109/*----------------------------------------------------------------------*/
1110
1111inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1112				struct ehca_mr *e_mr,
1113				u64 *iova_start,
1114				u64 size,
1115				u32 acl,
1116				struct ehca_pd *e_pd,
1117				struct ehca_mr_pginfo *pginfo,
1118				u32 *lkey, /*OUT*/
1119				u32 *rkey) /*OUT*/
1120{
1121	int ret;
1122	u64 h_ret;
1123	u32 hipz_acl;
1124	u64 *kpage;
1125	u64 rpage;
1126	struct ehca_mr_pginfo pginfo_save;
1127	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1128
1129	ehca_mrmw_map_acl(acl, &hipz_acl);
1130	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1131
1132	kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1133	if (!kpage) {
1134		ehca_err(&shca->ib_device, "kpage alloc failed");
1135		ret = -ENOMEM;
1136		goto ehca_rereg_mr_rereg1_exit0;
1137	}
1138
1139	pginfo_save = *pginfo;
1140	ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
1141	if (ret) {
1142		ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1143			 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1144			 e_mr, pginfo, pginfo->type, pginfo->num_pages,
1145			 pginfo->num_4k,kpage);
1146		goto ehca_rereg_mr_rereg1_exit1;
1147	}
1148	rpage = virt_to_abs(kpage);
1149	if (!rpage) {
1150		ehca_err(&shca->ib_device, "kpage=%p", kpage);
1151		ret = -EFAULT;
1152		goto ehca_rereg_mr_rereg1_exit1;
1153	}
1154	h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1155				      (u64)iova_start, size, hipz_acl,
1156				      e_pd->fw_pd, rpage, &hipzout);
1157	if (h_ret != H_SUCCESS) {
1158		/*
1159		 * reregistration unsuccessful, try it again with the 3 hCalls,
1160		 * e.g. this is required in case H_MR_CONDITION
1161		 * (MW bound or MR is shared)
1162		 */
1163		ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1164			  "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1165		*pginfo = pginfo_save;
1166		ret = -EAGAIN;
1167	} else if ((u64*)hipzout.vaddr != iova_start) {
1168		ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1169			 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1170			 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1171			 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1172			 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1173		ret = -EFAULT;
1174	} else {
1175		/*
1176		 * successful reregistration
1177		 * note: start and start_out are identical for eServer HCAs
1178		 */
1179		e_mr->num_pages = pginfo->num_pages;
1180		e_mr->num_4k    = pginfo->num_4k;
1181		e_mr->start     = iova_start;
1182		e_mr->size      = size;
1183		e_mr->acl       = acl;
1184		*lkey = hipzout.lkey;
1185		*rkey = hipzout.rkey;
1186	}
1187
1188ehca_rereg_mr_rereg1_exit1:
1189	ehca_free_fw_ctrlblock(kpage);
1190ehca_rereg_mr_rereg1_exit0:
1191	if ( ret && (ret != -EAGAIN) )
1192		ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1193			 "pginfo=%p num_pages=%lx num_4k=%lx",
1194			 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
1195			 pginfo->num_4k);
1196	return ret;
1197} /* end ehca_rereg_mr_rereg1() */
1198
1199/*----------------------------------------------------------------------*/
1200
1201int ehca_rereg_mr(struct ehca_shca *shca,
1202		  struct ehca_mr *e_mr,
1203		  u64 *iova_start,
1204		  u64 size,
1205		  int acl,
1206		  struct ehca_pd *e_pd,
1207		  struct ehca_mr_pginfo *pginfo,
1208		  u32 *lkey,
1209		  u32 *rkey)
1210{
1211	int ret = 0;
1212	u64 h_ret;
1213	int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1214	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1215
1216	/* first determine reregistration hCall(s) */
1217	if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
1218	    (pginfo->num_4k > e_mr->num_4k)) {
1219		ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
1220			 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
1221		rereg_1_hcall = 0;
1222		rereg_3_hcall = 1;
1223	}
1224
1225	if (e_mr->flags & EHCA_MR_FLAG_MAXMR) {	/* check for max-MR */
1226		rereg_1_hcall = 0;
1227		rereg_3_hcall = 1;
1228		e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1229		ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1230			 e_mr);
1231	}
1232
1233	if (rereg_1_hcall) {
1234		ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1235					   acl, e_pd, pginfo, lkey, rkey);
1236		if (ret) {
1237			if (ret == -EAGAIN)
1238				rereg_3_hcall = 1;
1239			else
1240				goto ehca_rereg_mr_exit0;
1241		}
1242	}
1243
1244	if (rereg_3_hcall) {
1245		struct ehca_mr save_mr;
1246
1247		/* first deregister old MR */
1248		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1249		if (h_ret != H_SUCCESS) {
1250			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1251				 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1252				 "mr->lkey=%x",
1253				 h_ret, e_mr, shca->ipz_hca_handle.handle,
1254				 e_mr->ipz_mr_handle.handle,
1255				 e_mr->ib.ib_mr.lkey);
1256			ret = ehca_mrmw_map_hrc_free_mr(h_ret);
1257			goto ehca_rereg_mr_exit0;
1258		}
1259		/* clean ehca_mr_t, without changing struct ib_mr and lock */
1260		save_mr = *e_mr;
1261		ehca_mr_deletenew(e_mr);
1262
1263		/* set some MR values */
1264		e_mr->flags = save_mr.flags;
1265		e_mr->fmr_page_size = save_mr.fmr_page_size;
1266		e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1267		e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1268		e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1269
1270		ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1271				      e_pd, pginfo, lkey, rkey);
1272		if (ret) {
1273			u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1274			memcpy(&e_mr->flags, &(save_mr.flags),
1275			       sizeof(struct ehca_mr) - offset);
1276			goto ehca_rereg_mr_exit0;
1277		}
1278	}
1279
1280ehca_rereg_mr_exit0:
1281	if (ret)
1282		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1283			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1284			 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1285			 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1286			 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
1287			 rereg_1_hcall, rereg_3_hcall);
1288	return ret;
1289} /* end ehca_rereg_mr() */
1290
1291/*----------------------------------------------------------------------*/
1292
1293int ehca_unmap_one_fmr(struct ehca_shca *shca,
1294		       struct ehca_mr *e_fmr)
1295{
1296	int ret = 0;
1297	u64 h_ret;
1298	int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1299	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1300	struct ehca_pd *e_pd =
1301		container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1302	struct ehca_mr save_fmr;
1303	u32 tmp_lkey, tmp_rkey;
1304	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1305	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1306
1307	/* first check if reregistration hCall can be used for unmap */
1308	if (e_fmr->fmr_max_pages > 512) {
1309		rereg_1_hcall = 0;
1310		rereg_3_hcall = 1;
1311	}
1312
1313	if (rereg_1_hcall) {
1314		/*
1315		 * note: after using rereg hcall with len=0,
1316		 * rereg hcall must be used again for registering pages
1317		 */
1318		h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1319					      0, 0, e_pd->fw_pd, 0, &hipzout);
1320		if (h_ret != H_SUCCESS) {
1321			/*
1322			 * should not happen, because length checked above,
1323			 * FMRs are not shared and no MW bound to FMRs
1324			 */
1325			ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1326				 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1327				 "mr_hndl=%lx lkey=%x lkey_out=%x",
1328				 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1329				 e_fmr->ipz_mr_handle.handle,
1330				 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1331			rereg_3_hcall = 1;
1332		} else {
1333			/* successful reregistration */
1334			e_fmr->start = NULL;
1335			e_fmr->size = 0;
1336			tmp_lkey = hipzout.lkey;
1337			tmp_rkey = hipzout.rkey;
1338		}
1339	}
1340
1341	if (rereg_3_hcall) {
1342		struct ehca_mr save_mr;
1343
1344		/* first free old FMR */
1345		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1346		if (h_ret != H_SUCCESS) {
1347			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1348				 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1349				 "lkey=%x",
1350				 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1351				 e_fmr->ipz_mr_handle.handle,
1352				 e_fmr->ib.ib_fmr.lkey);
1353			ret = ehca_mrmw_map_hrc_free_mr(h_ret);
1354			goto ehca_unmap_one_fmr_exit0;
1355		}
1356		/* clean ehca_mr_t, without changing lock */
1357		save_fmr = *e_fmr;
1358		ehca_mr_deletenew(e_fmr);
1359
1360		/* set some MR values */
1361		e_fmr->flags = save_fmr.flags;
1362		e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1363		e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1364		e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1365		e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1366		e_fmr->acl = save_fmr.acl;
1367
1368		pginfo.type      = EHCA_MR_PGI_FMR;
1369		pginfo.num_pages = 0;
1370		pginfo.num_4k    = 0;
1371		ret = ehca_reg_mr(shca, e_fmr, NULL,
1372				  (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1373				  e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1374				  &tmp_rkey);
1375		if (ret) {
1376			u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1377			memcpy(&e_fmr->flags, &(save_mr.flags),
1378			       sizeof(struct ehca_mr) - offset);
1379			goto ehca_unmap_one_fmr_exit0;
1380		}
1381	}
1382
1383ehca_unmap_one_fmr_exit0:
1384	if (ret)
1385		ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1386			 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1387			 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1388			 rereg_1_hcall, rereg_3_hcall);
1389	return ret;
1390} /* end ehca_unmap_one_fmr() */
1391
1392/*----------------------------------------------------------------------*/
1393
1394int ehca_reg_smr(struct ehca_shca *shca,
1395		 struct ehca_mr *e_origmr,
1396		 struct ehca_mr *e_newmr,
1397		 u64 *iova_start,
1398		 int acl,
1399		 struct ehca_pd *e_pd,
1400		 u32 *lkey, /*OUT*/
1401		 u32 *rkey) /*OUT*/
1402{
1403	int ret = 0;
1404	u64 h_ret;
1405	u32 hipz_acl;
1406	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1407
1408	ehca_mrmw_map_acl(acl, &hipz_acl);
1409	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1410
1411	h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1412				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
1413				    &hipzout);
1414	if (h_ret != H_SUCCESS) {
1415		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1416			 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1417			 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1418			 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1419			 shca->ipz_hca_handle.handle,
1420			 e_origmr->ipz_mr_handle.handle,
1421			 e_origmr->ib.ib_mr.lkey);
1422		ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
1423		goto ehca_reg_smr_exit0;
1424	}
1425	/* successful registration */
1426	e_newmr->num_pages     = e_origmr->num_pages;
1427	e_newmr->num_4k        = e_origmr->num_4k;
1428	e_newmr->start         = iova_start;
1429	e_newmr->size          = e_origmr->size;
1430	e_newmr->acl           = acl;
1431	e_newmr->ipz_mr_handle = hipzout.handle;
1432	*lkey = hipzout.lkey;
1433	*rkey = hipzout.rkey;
1434	return 0;
1435
1436ehca_reg_smr_exit0:
1437	if (ret)
1438		ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1439			 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1440			 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1441	return ret;
1442} /* end ehca_reg_smr() */
1443
1444/*----------------------------------------------------------------------*/
1445
1446/* register internal max-MR to internal SHCA */
1447int ehca_reg_internal_maxmr(
1448	struct ehca_shca *shca,
1449	struct ehca_pd *e_pd,
1450	struct ehca_mr **e_maxmr)  /*OUT*/
1451{
1452	int ret;
1453	struct ehca_mr *e_mr;
1454	u64 *iova_start;
1455	u64 size_maxmr;
1456	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1457	struct ib_phys_buf ib_pbuf;
1458	u32 num_pages_mr;
1459	u32 num_pages_4k; /* 4k portion "pages" */
1460
1461	e_mr = ehca_mr_new();
1462	if (!e_mr) {
1463		ehca_err(&shca->ib_device, "out of memory");
1464		ret = -ENOMEM;
1465		goto ehca_reg_internal_maxmr_exit0;
1466	}
1467	e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1468
1469	/* register internal max-MR on HCA */
1470	size_maxmr = (u64)high_memory - PAGE_OFFSET;
1471	iova_start = (u64*)KERNELBASE;
1472	ib_pbuf.addr = 0;
1473	ib_pbuf.size = size_maxmr;
1474	num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
1475			 PAGE_SIZE - 1) / PAGE_SIZE);
1476	num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
1477			 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
1478
1479	pginfo.type           = EHCA_MR_PGI_PHYS;
1480	pginfo.num_pages      = num_pages_mr;
1481	pginfo.num_4k         = num_pages_4k;
1482	pginfo.num_phys_buf   = 1;
1483	pginfo.phys_buf_array = &ib_pbuf;
1484
1485	ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1486			  &pginfo, &e_mr->ib.ib_mr.lkey,
1487			  &e_mr->ib.ib_mr.rkey);
1488	if (ret) {
1489		ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1490			 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1491			 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
1492			 num_pages_mr, num_pages_4k);
1493		goto ehca_reg_internal_maxmr_exit1;
1494	}
1495
1496	/* successful registration of all pages */
1497	e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1498	e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1499	e_mr->ib.ib_mr.uobject = NULL;
1500	atomic_inc(&(e_pd->ib_pd.usecnt));
1501	atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1502	*e_maxmr = e_mr;
1503	return 0;
1504
1505ehca_reg_internal_maxmr_exit1:
1506	ehca_mr_delete(e_mr);
1507ehca_reg_internal_maxmr_exit0:
1508	if (ret)
1509		ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1510			 ret, shca, e_pd, e_maxmr);
1511	return ret;
1512} /* end ehca_reg_internal_maxmr() */
1513
1514/*----------------------------------------------------------------------*/
1515
1516int ehca_reg_maxmr(struct ehca_shca *shca,
1517		   struct ehca_mr *e_newmr,
1518		   u64 *iova_start,
1519		   int acl,
1520		   struct ehca_pd *e_pd,
1521		   u32 *lkey,
1522		   u32 *rkey)
1523{
1524	u64 h_ret;
1525	struct ehca_mr *e_origmr = shca->maxmr;
1526	u32 hipz_acl;
1527	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1528
1529	ehca_mrmw_map_acl(acl, &hipz_acl);
1530	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1531
1532	h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1533				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
1534				    &hipzout);
1535	if (h_ret != H_SUCCESS) {
1536		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1537			 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1538			 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1539			 e_origmr->ipz_mr_handle.handle,
1540			 e_origmr->ib.ib_mr.lkey);
1541		return ehca_mrmw_map_hrc_reg_smr(h_ret);
1542	}
1543	/* successful registration */
1544	e_newmr->num_pages     = e_origmr->num_pages;
1545	e_newmr->num_4k        = e_origmr->num_4k;
1546	e_newmr->start         = iova_start;
1547	e_newmr->size          = e_origmr->size;
1548	e_newmr->acl           = acl;
1549	e_newmr->ipz_mr_handle = hipzout.handle;
1550	*lkey = hipzout.lkey;
1551	*rkey = hipzout.rkey;
1552	return 0;
1553} /* end ehca_reg_maxmr() */
1554
1555/*----------------------------------------------------------------------*/
1556
1557int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1558{
1559	int ret;
1560	struct ehca_mr *e_maxmr;
1561	struct ib_pd *ib_pd;
1562
1563	if (!shca->maxmr) {
1564		ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1565		ret = -EINVAL;
1566		goto ehca_dereg_internal_maxmr_exit0;
1567	}
1568
1569	e_maxmr = shca->maxmr;
1570	ib_pd = e_maxmr->ib.ib_mr.pd;
1571	shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1572
1573	ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1574	if (ret) {
1575		ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1576			 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1577			 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1578		shca->maxmr = e_maxmr;
1579		goto ehca_dereg_internal_maxmr_exit0;
1580	}
1581
1582	atomic_dec(&ib_pd->usecnt);
1583
1584ehca_dereg_internal_maxmr_exit0:
1585	if (ret)
1586		ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1587			 ret, shca, shca->maxmr);
1588	return ret;
1589} /* end ehca_dereg_internal_maxmr() */
1590
1591/*----------------------------------------------------------------------*/
1592
1593/*
1594 * check physical buffer array of MR verbs for validness and
1595 * calculates MR size
1596 */
1597int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1598				  int num_phys_buf,
1599				  u64 *iova_start,
1600				  u64 *size)
1601{
1602	struct ib_phys_buf *pbuf = phys_buf_array;
1603	u64 size_count = 0;
1604	u32 i;
1605
1606	if (num_phys_buf == 0) {
1607		ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1608		return -EINVAL;
1609	}
1610	/* check first buffer */
1611	if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1612		ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1613			     "pbuf->addr=%lx pbuf->size=%lx",
1614			     iova_start, pbuf->addr, pbuf->size);
1615		return -EINVAL;
1616	}
1617	if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1618	    (num_phys_buf > 1)) {
1619		ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1620			     "pbuf->size=%lx", pbuf->addr, pbuf->size);
1621		return -EINVAL;
1622	}
1623
1624	for (i = 0; i < num_phys_buf; i++) {
1625		if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1626			ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1627				     "pbuf->size=%lx",
1628				     i, pbuf->addr, pbuf->size);
1629			return -EINVAL;
1630		}
1631		if (((i > 0) &&	/* not 1st */
1632		     (i < (num_phys_buf - 1)) &&	/* not last */
1633		     (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1634			ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1635				     i, pbuf->size);
1636			return -EINVAL;
1637		}
1638		size_count += pbuf->size;
1639		pbuf++;
1640	}
1641
1642	*size = size_count;
1643	return 0;
1644} /* end ehca_mr_chk_buf_and_calc_size() */
1645
1646/*----------------------------------------------------------------------*/
1647
1648/* check page list of map FMR verb for validness */
1649int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1650			     u64 *page_list,
1651			     int list_len)
1652{
1653	u32 i;
1654	u64 *page;
1655
1656	if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1657		ehca_gen_err("bad list_len, list_len=%x "
1658			     "e_fmr->fmr_max_pages=%x fmr=%p",
1659			     list_len, e_fmr->fmr_max_pages, e_fmr);
1660		return -EINVAL;
1661	}
1662
1663	/* each page must be aligned */
1664	page = page_list;
1665	for (i = 0; i < list_len; i++) {
1666		if (*page % e_fmr->fmr_page_size) {
1667			ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1668				     "fmr_page_size=%x", i, *page, page, e_fmr,
1669				     e_fmr->fmr_page_size);
1670			return -EINVAL;
1671		}
1672		page++;
1673	}
1674
1675	return 0;
1676} /* end ehca_fmr_check_page_list() */
1677
1678/*----------------------------------------------------------------------*/
1679
1680/* setup page buffer from page info */
1681int ehca_set_pagebuf(struct ehca_mr *e_mr,
1682		     struct ehca_mr_pginfo *pginfo,
1683		     u32 number,
1684		     u64 *kpage)
1685{
1686	int ret = 0;
1687	struct ib_umem_chunk *prev_chunk;
1688	struct ib_umem_chunk *chunk;
1689	struct ib_phys_buf *pbuf;
1690	u64 *fmrlist;
1691	u64 num4k, pgaddr, offs4k;
1692	u32 i = 0;
1693	u32 j = 0;
1694
1695	if (pginfo->type == EHCA_MR_PGI_PHYS) {
1696		/* loop over desired phys_buf_array entries */
1697		while (i < number) {
1698			pbuf   = pginfo->phys_buf_array + pginfo->next_buf;
1699			num4k  = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
1700				  EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1701			offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1702			while (pginfo->next_4k < offs4k + num4k) {
1703				/* sanity check */
1704				if ((pginfo->page_cnt >= pginfo->num_pages) ||
1705				    (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1706					ehca_gen_err("page_cnt >= num_pages, "
1707						     "page_cnt=%lx "
1708						     "num_pages=%lx "
1709						     "page_4k_cnt=%lx "
1710						     "num_4k=%lx i=%x",
1711						     pginfo->page_cnt,
1712						     pginfo->num_pages,
1713						     pginfo->page_4k_cnt,
1714						     pginfo->num_4k, i);
1715					ret = -EFAULT;
1716					goto ehca_set_pagebuf_exit0;
1717				}
1718				*kpage = phys_to_abs(
1719					(pbuf->addr & EHCA_PAGEMASK)
1720					+ (pginfo->next_4k * EHCA_PAGESIZE));
1721				if ( !(*kpage) && pbuf->addr ) {
1722					ehca_gen_err("pbuf->addr=%lx "
1723						     "pbuf->size=%lx "
1724						     "next_4k=%lx", pbuf->addr,
1725						     pbuf->size,
1726						     pginfo->next_4k);
1727					ret = -EFAULT;
1728					goto ehca_set_pagebuf_exit0;
1729				}
1730				(pginfo->page_4k_cnt)++;
1731				(pginfo->next_4k)++;
1732				if (pginfo->next_4k %
1733				    (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1734					(pginfo->page_cnt)++;
1735				kpage++;
1736				i++;
1737				if (i >= number) break;
1738			}
1739			if (pginfo->next_4k >= offs4k + num4k) {
1740				(pginfo->next_buf)++;
1741				pginfo->next_4k = 0;
1742			}
1743		}
1744	} else if (pginfo->type == EHCA_MR_PGI_USER) {
1745		/* loop over desired chunk entries */
1746		chunk      = pginfo->next_chunk;
1747		prev_chunk = pginfo->next_chunk;
1748		list_for_each_entry_continue(chunk,
1749					     (&(pginfo->region->chunk_list)),
1750					     list) {
1751			for (i = pginfo->next_nmap; i < chunk->nmap; ) {
1752				pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1753					   << PAGE_SHIFT );
1754				*kpage = phys_to_abs(pgaddr +
1755						     (pginfo->next_4k *
1756						      EHCA_PAGESIZE));
1757				if ( !(*kpage) ) {
1758					ehca_gen_err("pgaddr=%lx "
1759						     "chunk->page_list[i]=%lx "
1760						     "i=%x next_4k=%lx mr=%p",
1761						     pgaddr,
1762						     (u64)sg_dma_address(
1763							     &chunk->
1764							     page_list[i]),
1765						     i, pginfo->next_4k, e_mr);
1766					ret = -EFAULT;
1767					goto ehca_set_pagebuf_exit0;
1768				}
1769				(pginfo->page_4k_cnt)++;
1770				(pginfo->next_4k)++;
1771				kpage++;
1772				if (pginfo->next_4k %
1773				    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1774					(pginfo->page_cnt)++;
1775					(pginfo->next_nmap)++;
1776					pginfo->next_4k = 0;
1777					i++;
1778				}
1779				j++;
1780				if (j >= number) break;
1781			}
1782			if ((pginfo->next_nmap >= chunk->nmap) &&
1783			    (j >= number)) {
1784				pginfo->next_nmap = 0;
1785				prev_chunk = chunk;
1786				break;
1787			} else if (pginfo->next_nmap >= chunk->nmap) {
1788				pginfo->next_nmap = 0;
1789				prev_chunk = chunk;
1790			} else if (j >= number)
1791				break;
1792			else
1793				prev_chunk = chunk;
1794		}
1795		pginfo->next_chunk =
1796			list_prepare_entry(prev_chunk,
1797					   (&(pginfo->region->chunk_list)),
1798					   list);
1799	} else if (pginfo->type == EHCA_MR_PGI_FMR) {
1800		/* loop over desired page_list entries */
1801		fmrlist = pginfo->page_list + pginfo->next_listelem;
1802		for (i = 0; i < number; i++) {
1803			*kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1804					     pginfo->next_4k * EHCA_PAGESIZE);
1805			if ( !(*kpage) ) {
1806				ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1807					     "next_listelem=%lx next_4k=%lx",
1808					     *fmrlist, fmrlist,
1809					     pginfo->next_listelem,
1810					     pginfo->next_4k);
1811				ret = -EFAULT;
1812				goto ehca_set_pagebuf_exit0;
1813			}
1814			(pginfo->page_4k_cnt)++;
1815			(pginfo->next_4k)++;
1816			kpage++;
1817			if (pginfo->next_4k %
1818			    (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1819				(pginfo->page_cnt)++;
1820				(pginfo->next_listelem)++;
1821				fmrlist++;
1822				pginfo->next_4k = 0;
1823			}
1824		}
1825	} else {
1826		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1827		ret = -EFAULT;
1828		goto ehca_set_pagebuf_exit0;
1829	}
1830
1831ehca_set_pagebuf_exit0:
1832	if (ret)
1833		ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1834			     "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1835			     "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1836			     "next_listelem=%lx region=%p next_chunk=%p "
1837			     "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1838			     pginfo->num_pages, pginfo->num_4k,
1839			     pginfo->next_buf, pginfo->next_4k, number, kpage,
1840			     pginfo->page_cnt, pginfo->page_4k_cnt, i,
1841			     pginfo->next_listelem, pginfo->region,
1842			     pginfo->next_chunk, pginfo->next_nmap);
1843	return ret;
1844} /* end ehca_set_pagebuf() */
1845
1846/*----------------------------------------------------------------------*/
1847
1848/* setup 1 page from page info page buffer */
1849int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1850		       struct ehca_mr_pginfo *pginfo,
1851		       u64 *rpage)
1852{
1853	int ret = 0;
1854	struct ib_phys_buf *tmp_pbuf;
1855	u64 *fmrlist;
1856	struct ib_umem_chunk *chunk;
1857	struct ib_umem_chunk *prev_chunk;
1858	u64 pgaddr, num4k, offs4k;
1859
1860	if (pginfo->type == EHCA_MR_PGI_PHYS) {
1861		/* sanity check */
1862		if ((pginfo->page_cnt >= pginfo->num_pages) ||
1863		    (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1864			ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1865				     "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1866				     pginfo->page_cnt, pginfo->num_pages,
1867				     pginfo->page_4k_cnt, pginfo->num_4k);
1868			ret = -EFAULT;
1869			goto ehca_set_pagebuf_1_exit0;
1870		}
1871		tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1872		num4k  = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
1873			  EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1874		offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1875		*rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1876				     (pginfo->next_4k * EHCA_PAGESIZE));
1877		if ( !(*rpage) && tmp_pbuf->addr ) {
1878			ehca_gen_err("tmp_pbuf->addr=%lx"
1879				     " tmp_pbuf->size=%lx next_4k=%lx",
1880				     tmp_pbuf->addr, tmp_pbuf->size,
1881				     pginfo->next_4k);
1882			ret = -EFAULT;
1883			goto ehca_set_pagebuf_1_exit0;
1884		}
1885		(pginfo->page_4k_cnt)++;
1886		(pginfo->next_4k)++;
1887		if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1888			(pginfo->page_cnt)++;
1889		if (pginfo->next_4k >= offs4k + num4k) {
1890			(pginfo->next_buf)++;
1891			pginfo->next_4k = 0;
1892		}
1893	} else if (pginfo->type == EHCA_MR_PGI_USER) {
1894		chunk      = pginfo->next_chunk;
1895		prev_chunk = pginfo->next_chunk;
1896		list_for_each_entry_continue(chunk,
1897					     (&(pginfo->region->chunk_list)),
1898					     list) {
1899			pgaddr = ( page_to_pfn(chunk->page_list[
1900						       pginfo->next_nmap].page)
1901				   << PAGE_SHIFT);
1902			*rpage = phys_to_abs(pgaddr +
1903					     (pginfo->next_4k * EHCA_PAGESIZE));
1904			if ( !(*rpage) ) {
1905				ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1906					     " next_nmap=%lx next_4k=%lx mr=%p",
1907					     pgaddr, (u64)sg_dma_address(
1908						     &chunk->page_list[
1909							     pginfo->
1910							     next_nmap]),
1911					     pginfo->next_nmap, pginfo->next_4k,
1912					     e_mr);
1913				ret = -EFAULT;
1914				goto ehca_set_pagebuf_1_exit0;
1915			}
1916			(pginfo->page_4k_cnt)++;
1917			(pginfo->next_4k)++;
1918			if (pginfo->next_4k %
1919			    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1920				(pginfo->page_cnt)++;
1921				(pginfo->next_nmap)++;
1922				pginfo->next_4k = 0;
1923			}
1924			if (pginfo->next_nmap >= chunk->nmap) {
1925				pginfo->next_nmap = 0;
1926				prev_chunk = chunk;
1927			}
1928			break;
1929		}
1930		pginfo->next_chunk =
1931			list_prepare_entry(prev_chunk,
1932					   (&(pginfo->region->chunk_list)),
1933					   list);
1934	} else if (pginfo->type == EHCA_MR_PGI_FMR) {
1935		fmrlist = pginfo->page_list + pginfo->next_listelem;
1936		*rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1937				     pginfo->next_4k * EHCA_PAGESIZE);
1938		if ( !(*rpage) ) {
1939			ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1940				     "next_listelem=%lx next_4k=%lx",
1941				     *fmrlist, fmrlist, pginfo->next_listelem,
1942				     pginfo->next_4k);
1943			ret = -EFAULT;
1944			goto ehca_set_pagebuf_1_exit0;
1945		}
1946		(pginfo->page_4k_cnt)++;
1947		(pginfo->next_4k)++;
1948		if (pginfo->next_4k %
1949		    (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1950			(pginfo->page_cnt)++;
1951			(pginfo->next_listelem)++;
1952			pginfo->next_4k = 0;
1953		}
1954	} else {
1955		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1956		ret = -EFAULT;
1957		goto ehca_set_pagebuf_1_exit0;
1958	}
1959
1960ehca_set_pagebuf_1_exit0:
1961	if (ret)
1962		ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1963			     "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1964			     "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1965			     "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1966			     pginfo, pginfo->type, pginfo->num_pages,
1967			     pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
1968			     rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
1969			     pginfo->next_listelem, pginfo->region,
1970			     pginfo->next_chunk, pginfo->next_nmap);
1971	return ret;
1972} /* end ehca_set_pagebuf_1() */
1973
1974/*----------------------------------------------------------------------*/
1975
1976/*
1977 * check MR if it is a max-MR, i.e. uses whole memory
1978 * in case it's a max-MR 1 is returned, else 0
1979 */
1980int ehca_mr_is_maxmr(u64 size,
1981		     u64 *iova_start)
1982{
1983	/* a MR is treated as max-MR only if it fits following: */
1984	if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1985	    (iova_start == (void*)KERNELBASE)) {
1986		ehca_gen_dbg("this is a max-MR");
1987		return 1;
1988	} else
1989		return 0;
1990} /* end ehca_mr_is_maxmr() */
1991
1992/*----------------------------------------------------------------------*/
1993
1994/* map access control for MR/MW. This routine is used for MR and MW. */
1995void ehca_mrmw_map_acl(int ib_acl,
1996		       u32 *hipz_acl)
1997{
1998	*hipz_acl = 0;
1999	if (ib_acl & IB_ACCESS_REMOTE_READ)
2000		*hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2001	if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2002		*hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2003	if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2004		*hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2005	if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2006		*hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2007	if (ib_acl & IB_ACCESS_MW_BIND)
2008		*hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2009} /* end ehca_mrmw_map_acl() */
2010
2011/*----------------------------------------------------------------------*/
2012
2013/* sets page size in hipz access control for MR/MW. */
2014void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2015{
2016	return; /* HCA supports only 4k */
2017} /* end ehca_mrmw_set_pgsize_hipz_acl() */
2018
2019/*----------------------------------------------------------------------*/
2020
2021/*
2022 * reverse map access control for MR/MW.
2023 * This routine is used for MR and MW.
2024 */
2025void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2026			       int *ib_acl) /*OUT*/
2027{
2028	*ib_acl = 0;
2029	if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2030		*ib_acl |= IB_ACCESS_REMOTE_READ;
2031	if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2032		*ib_acl |= IB_ACCESS_REMOTE_WRITE;
2033	if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2034		*ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2035	if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2036		*ib_acl |= IB_ACCESS_LOCAL_WRITE;
2037	if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2038		*ib_acl |= IB_ACCESS_MW_BIND;
2039} /* end ehca_mrmw_reverse_map_acl() */
2040
2041
2042/*----------------------------------------------------------------------*/
2043
2044/*
2045 * map HIPZ rc to IB retcodes for MR/MW allocations
2046 * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
2047 */
2048int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
2049{
2050	switch (hipz_rc) {
2051	case H_SUCCESS:	             /* successful completion */
2052		return 0;
2053	case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2054	case H_CONSTRAINED:          /* resource constraint */
2055	case H_NO_MEM:
2056		return -ENOMEM;
2057	case H_BUSY:                 /* long busy */
2058		return -EBUSY;
2059	default:
2060		return -EINVAL;
2061	}
2062} /* end ehca_mrmw_map_hrc_alloc() */
2063
2064/*----------------------------------------------------------------------*/
2065
2066/*
2067 * map HIPZ rc to IB retcodes for MR register rpage
2068 * Used for hipz_h_register_rpage_mr at registering last page
2069 */
2070int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
2071{
2072	switch (hipz_rc) {
2073	case H_SUCCESS:         /* registration complete */
2074		return 0;
2075	case H_PAGE_REGISTERED:	/* page registered */
2076	case H_ADAPTER_PARM:    /* invalid adapter handle */
2077	case H_RH_PARM:         /* invalid resource handle */
2078/*	case H_QT_PARM:            invalid queue type */
2079	case H_PARAMETER:       /*
2080				 * invalid logical address,
2081				 * or count zero or greater 512
2082				 */
2083	case H_TABLE_FULL:      /* page table full */
2084	case H_HARDWARE:        /* HCA not operational */
2085		return -EINVAL;
2086	case H_BUSY:            /* long busy */
2087		return -EBUSY;
2088	default:
2089		return -EINVAL;
2090	}
2091} /* end ehca_mrmw_map_hrc_rrpg_last() */
2092
2093/*----------------------------------------------------------------------*/
2094
2095/*
2096 * map HIPZ rc to IB retcodes for MR register rpage
2097 * Used for hipz_h_register_rpage_mr at registering one page, but not last page
2098 */
2099int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
2100{
2101	switch (hipz_rc) {
2102	case H_PAGE_REGISTERED:	/* page registered */
2103		return 0;
2104	case H_SUCCESS:         /* registration complete */
2105	case H_ADAPTER_PARM:    /* invalid adapter handle */
2106	case H_RH_PARM:         /* invalid resource handle */
2107/*	case H_QT_PARM:            invalid queue type */
2108	case H_PARAMETER:       /*
2109				 * invalid logical address,
2110				 * or count zero or greater 512
2111				 */
2112	case H_TABLE_FULL:      /* page table full */
2113	case H_HARDWARE:        /* HCA not operational */
2114		return -EINVAL;
2115	case H_BUSY:            /* long busy */
2116		return -EBUSY;
2117	default:
2118		return -EINVAL;
2119	}
2120} /* end ehca_mrmw_map_hrc_rrpg_notlast() */
2121
2122/*----------------------------------------------------------------------*/
2123
2124/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
2125int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
2126{
2127	switch (hipz_rc) {
2128	case H_SUCCESS:	             /* successful completion */
2129		return 0;
2130	case H_ADAPTER_PARM:         /* invalid adapter handle */
2131	case H_RH_PARM:              /* invalid resource handle */
2132		return -EINVAL;
2133	case H_BUSY:                 /* long busy */
2134		return -EBUSY;
2135	default:
2136		return -EINVAL;
2137	}
2138} /* end ehca_mrmw_map_hrc_query_mr() */
2139
2140/*----------------------------------------------------------------------*/
2141/*----------------------------------------------------------------------*/
2142
2143/*
2144 * map HIPZ rc to IB retcodes for freeing MR resource
2145 * Used for hipz_h_free_resource_mr
2146 */
2147int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
2148{
2149	switch (hipz_rc) {
2150	case H_SUCCESS:      /* resource freed */
2151		return 0;
2152	case H_ADAPTER_PARM: /* invalid adapter handle */
2153	case H_RH_PARM:      /* invalid resource handle */
2154	case H_R_STATE:      /* invalid resource state */
2155	case H_HARDWARE:     /* HCA not operational */
2156		return -EINVAL;
2157	case H_RESOURCE:     /* Resource in use */
2158	case H_BUSY:         /* long busy */
2159		return -EBUSY;
2160	default:
2161		return -EINVAL;
2162	}
2163} /* end ehca_mrmw_map_hrc_free_mr() */
2164
2165/*----------------------------------------------------------------------*/
2166
2167/*
2168 * map HIPZ rc to IB retcodes for freeing MW resource
2169 * Used for hipz_h_free_resource_mw
2170 */
2171int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
2172{
2173	switch (hipz_rc) {
2174	case H_SUCCESS:	     /* resource freed */
2175		return 0;
2176	case H_ADAPTER_PARM: /* invalid adapter handle */
2177	case H_RH_PARM:      /* invalid resource handle */
2178	case H_R_STATE:      /* invalid resource state */
2179	case H_HARDWARE:     /* HCA not operational */
2180		return -EINVAL;
2181	case H_RESOURCE:     /* Resource in use */
2182	case H_BUSY:         /* long busy */
2183		return -EBUSY;
2184	default:
2185		return -EINVAL;
2186	}
2187} /* end ehca_mrmw_map_hrc_free_mw() */
2188
2189/*----------------------------------------------------------------------*/
2190
2191/*
2192 * map HIPZ rc to IB retcodes for SMR registrations
2193 * Used for hipz_h_register_smr.
2194 */
2195int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
2196{
2197	switch (hipz_rc) {
2198	case H_SUCCESS:	             /* successful completion */
2199		return 0;
2200	case H_ADAPTER_PARM:         /* invalid adapter handle */
2201	case H_RH_PARM:              /* invalid resource handle */
2202	case H_MEM_PARM:             /* invalid MR virtual address */
2203	case H_MEM_ACCESS_PARM:      /* invalid access controls */
2204	case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2205		return -EINVAL;
2206	case H_BUSY:                 /* long busy */
2207		return -EBUSY;
2208	default:
2209		return -EINVAL;
2210	}
2211} /* end ehca_mrmw_map_hrc_reg_smr() */
2212
2213/*----------------------------------------------------------------------*/
2214
2215/*
2216 * MR destructor and constructor
2217 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2218 * except struct ib_mr and spinlock
2219 */
2220void ehca_mr_deletenew(struct ehca_mr *mr)
2221{
2222	mr->flags         = 0;
2223	mr->num_pages     = 0;
2224	mr->num_4k        = 0;
2225	mr->acl           = 0;
2226	mr->start         = NULL;
2227	mr->fmr_page_size = 0;
2228	mr->fmr_max_pages = 0;
2229	mr->fmr_max_maps  = 0;
2230	mr->fmr_map_cnt   = 0;
2231	memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2232	memset(&mr->galpas, 0, sizeof(mr->galpas));
2233	mr->nr_of_pages   = 0;
2234	mr->pagearray     = NULL;
2235} /* end ehca_mr_deletenew() */
2236
2237int ehca_init_mrmw_cache(void)
2238{
2239	mr_cache = kmem_cache_create("ehca_cache_mr",
2240				     sizeof(struct ehca_mr), 0,
2241				     SLAB_HWCACHE_ALIGN,
2242				     NULL, NULL);
2243	if (!mr_cache)
2244		return -ENOMEM;
2245	mw_cache = kmem_cache_create("ehca_cache_mw",
2246				     sizeof(struct ehca_mw), 0,
2247				     SLAB_HWCACHE_ALIGN,
2248				     NULL, NULL);
2249	if (!mw_cache) {
2250		kmem_cache_destroy(mr_cache);
2251		mr_cache = NULL;
2252		return -ENOMEM;
2253	}
2254	return 0;
2255}
2256
2257void ehca_cleanup_mrmw_cache(void)
2258{
2259	if (mr_cache)
2260		kmem_cache_destroy(mr_cache);
2261	if (mw_cache)
2262		kmem_cache_destroy(mw_cache);
2263}
2264