mem.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c 330897 2018-03-14 03:19:51Z eadler $");
36
37#include "opt_inet.h"
38
39#ifdef TCP_OFFLOAD
40#include <linux/types.h>
41#include <linux/kref.h>
42#include <rdma/ib_umem.h>
43#include <asm/atomic.h>
44
45#include <common/t4_msg.h>
46#include "iw_cxgbe.h"
47
48#define T4_ULPTX_MIN_IO 32
49#define C4IW_MAX_INLINE_SIZE 96
50
51static int
52mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
53{
54
55	return ((is_t4(dev->rdev.adap) ||
56		is_t5(dev->rdev.adap)) &&
57		length >= 8*1024*1024*1024ULL);
58}
59
60static int
61write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
62{
63	struct adapter *sc = rdev->adap;
64	struct ulp_mem_io *ulpmc;
65	struct ulptx_idata *ulpsc;
66	u8 wr_len, *to_dp, *from_dp;
67	int copy_len, num_wqe, i, ret = 0;
68	struct c4iw_wr_wait wr_wait;
69	struct wrqe *wr;
70	u32 cmd;
71
72	cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
73	if (is_t4(sc))
74		cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER);
75	else
76		cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
77
78	addr &= 0x7FFFFFF;
79	CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
80	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
81	c4iw_init_wr_wait(&wr_wait);
82	for (i = 0; i < num_wqe; i++) {
83
84		copy_len = min(len, C4IW_MAX_INLINE_SIZE);
85		wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
86				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
87
88		wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
89		if (wr == NULL)
90			return (0);
91		ulpmc = wrtod(wr);
92
93		memset(ulpmc, 0, wr_len);
94		INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
95
96		if (i == (num_wqe-1)) {
97			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
98						    F_FW_WR_COMPL);
99			ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
100		} else
101			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
102		ulpmc->wr.wr_mid = cpu_to_be32(
103				       V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
104
105		ulpmc->cmd = cmd;
106		ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
107		    DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
108		ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
109						      16));
110		ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
111
112		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
113		ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
114		ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
115
116		to_dp = (u8 *)(ulpsc + 1);
117		from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
118		if (data)
119			memcpy(to_dp, from_dp, copy_len);
120		else
121			memset(to_dp, 0, copy_len);
122		if (copy_len % T4_ULPTX_MIN_IO)
123			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
124			       (copy_len % T4_ULPTX_MIN_IO));
125		t4_wrq_tx(sc, wr);
126		len -= C4IW_MAX_INLINE_SIZE;
127	}
128
129	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
130	return ret;
131}
132
133/*
134 * Build and write a TPT entry.
135 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
136 *     pbl_size and pbl_addr
137 * OUT: stag index
138 */
139static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
140			   u32 *stag, u8 stag_state, u32 pdid,
141			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
142			   int bind_enabled, u32 zbva, u64 to,
143			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
144{
145	int err;
146	struct fw_ri_tpte tpt;
147	u32 stag_idx;
148	static atomic_t key;
149
150	if (c4iw_fatal_error(rdev))
151		return -EIO;
152
153	stag_state = stag_state > 0;
154	stag_idx = (*stag) >> 8;
155
156	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
157		stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
158		if (!stag_idx) {
159			mutex_lock(&rdev->stats.lock);
160			rdev->stats.stag.fail++;
161			mutex_unlock(&rdev->stats.lock);
162			return -ENOMEM;
163		}
164		mutex_lock(&rdev->stats.lock);
165		rdev->stats.stag.cur += 32;
166		if (rdev->stats.stag.cur > rdev->stats.stag.max)
167			rdev->stats.stag.max = rdev->stats.stag.cur;
168		mutex_unlock(&rdev->stats.lock);
169		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
170	}
171	CTR5(KTR_IW_CXGBE,
172	    "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
173	    __func__, stag_state, type, pdid, stag_idx);
174
175	/* write TPT entry */
176	if (reset_tpt_entry)
177		memset(&tpt, 0, sizeof(tpt));
178	else {
179		tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
180			V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
181			V_FW_RI_TPTE_STAGSTATE(stag_state) |
182			V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
183		tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
184			(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
185			V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
186						      FW_RI_VA_BASED_TO))|
187			V_FW_RI_TPTE_PS(page_size));
188		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
189			V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
190		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
191		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
192		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
193		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
194		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
195	}
196	err = write_adapter_mem(rdev, stag_idx +
197				(rdev->adap->vres.stag.start >> 5),
198				sizeof(tpt), &tpt);
199
200	if (reset_tpt_entry) {
201		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
202		mutex_lock(&rdev->stats.lock);
203		rdev->stats.stag.cur -= 32;
204		mutex_unlock(&rdev->stats.lock);
205	}
206	return err;
207}
208
209static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
210		     u32 pbl_addr, u32 pbl_size)
211{
212	int err;
213
214	CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
215	     __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
216
217	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
218	return err;
219}
220
221static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
222		     u32 pbl_addr)
223{
224	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
225			       pbl_size, pbl_addr);
226}
227
228static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
229{
230	*stag = T4_STAG_UNSET;
231	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
232			       0UL, 0, 0, 0, 0);
233}
234
235static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
236{
237	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
238			       0);
239}
240
241static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
242			 u32 pbl_size, u32 pbl_addr)
243{
244	*stag = T4_STAG_UNSET;
245	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
246			       0UL, 0, 0, pbl_size, pbl_addr);
247}
248
249static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
250{
251	u32 mmid;
252
253	mhp->attr.state = 1;
254	mhp->attr.stag = stag;
255	mmid = stag >> 8;
256	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
257	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
258	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
259}
260
261static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
262		      struct c4iw_mr *mhp, int shift)
263{
264	u32 stag = T4_STAG_UNSET;
265	int ret;
266
267	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
268			      FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0,
269			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
270			      mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12,
271			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
272	if (ret)
273		return ret;
274
275	ret = finish_mem_reg(mhp, stag);
276	if (ret)
277		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
278		       mhp->attr.pbl_addr);
279	return ret;
280}
281
282static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
283			  struct c4iw_mr *mhp, int shift, int npages)
284{
285	u32 stag;
286	int ret;
287
288	if (npages > mhp->attr.pbl_size)
289		return -ENOMEM;
290
291	stag = mhp->attr.stag;
292	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
293			      FW_RI_STAG_NSMR, mhp->attr.perms,
294			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
295			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
296			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
297	if (ret)
298		return ret;
299
300	ret = finish_mem_reg(mhp, stag);
301	if (ret)
302		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
303		       mhp->attr.pbl_addr);
304
305	return ret;
306}
307
308static int alloc_pbl(struct c4iw_mr *mhp, int npages)
309{
310	mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
311						    npages << 3);
312
313	if (!mhp->attr.pbl_addr)
314		return -ENOMEM;
315
316	mhp->attr.pbl_size = npages;
317
318	return 0;
319}
320
321static int build_phys_page_list(struct ib_phys_buf *buffer_list,
322				int num_phys_buf, u64 *iova_start,
323				u64 *total_size, int *npages,
324				int *shift, __be64 **page_list)
325{
326	u64 mask;
327	int i, j, n;
328
329	mask = 0;
330	*total_size = 0;
331	for (i = 0; i < num_phys_buf; ++i) {
332		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
333			return -EINVAL;
334		if (i != 0 && i != num_phys_buf - 1 &&
335		    (buffer_list[i].size & ~PAGE_MASK))
336			return -EINVAL;
337		*total_size += buffer_list[i].size;
338		if (i > 0)
339			mask |= buffer_list[i].addr;
340		else
341			mask |= buffer_list[i].addr & PAGE_MASK;
342		if (i != num_phys_buf - 1)
343			mask |= buffer_list[i].addr + buffer_list[i].size;
344		else
345			mask |= (buffer_list[i].addr + buffer_list[i].size +
346				PAGE_SIZE - 1) & PAGE_MASK;
347	}
348
349	/* Find largest page shift we can use to cover buffers */
350	for (*shift = PAGE_SHIFT; *shift < PAGE_SHIFT + M_FW_RI_TPTE_PS;
351	    ++(*shift))
352		if ((1ULL << *shift) & mask)
353			break;
354
355	buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
356	buffer_list[0].addr &= ~0ull << *shift;
357
358	*npages = 0;
359	for (i = 0; i < num_phys_buf; ++i)
360		*npages += (buffer_list[i].size +
361			(1ULL << *shift) - 1) >> *shift;
362
363	if (!*npages)
364		return -EINVAL;
365
366	*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
367	if (!*page_list)
368		return -ENOMEM;
369
370	n = 0;
371	for (i = 0; i < num_phys_buf; ++i)
372		for (j = 0;
373		     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
374		     ++j)
375			(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
376			    ((u64) j << *shift));
377
378	CTR6(KTR_IW_CXGBE,
379	    "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__,
380	    (unsigned long long)*iova_start, (unsigned long long)mask, *shift,
381	    (unsigned long long)*total_size, *npages);
382
383	return 0;
384
385}
386
387int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
388			     struct ib_pd *pd, struct ib_phys_buf *buffer_list,
389			     int num_phys_buf, int acc, u64 *iova_start)
390{
391
392	struct c4iw_mr mh, *mhp;
393	struct c4iw_pd *php;
394	struct c4iw_dev *rhp;
395	__be64 *page_list = NULL;
396	int shift = 0;
397	u64 total_size = 0;
398	int npages = 0;
399	int ret;
400
401	CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd);
402
403	/* There can be no memory windows */
404	if (atomic_read(&mr->usecnt))
405		return -EINVAL;
406
407	mhp = to_c4iw_mr(mr);
408	rhp = mhp->rhp;
409	php = to_c4iw_pd(mr->pd);
410
411	/* make sure we are on the same adapter */
412	if (rhp != php->rhp)
413		return -EINVAL;
414
415	memcpy(&mh, mhp, sizeof *mhp);
416
417	if (mr_rereg_mask & IB_MR_REREG_PD)
418		php = to_c4iw_pd(pd);
419	if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
420		mh.attr.perms = c4iw_ib_to_tpt_access(acc);
421		mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
422					 IB_ACCESS_MW_BIND;
423	}
424	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
425		ret = build_phys_page_list(buffer_list, num_phys_buf,
426						iova_start,
427						&total_size, &npages,
428						&shift, &page_list);
429		if (ret)
430			return ret;
431	}
432	if (mr_exceeds_hw_limits(rhp, total_size)) {
433		kfree(page_list);
434		return -EINVAL;
435	}
436	ret = reregister_mem(rhp, php, &mh, shift, npages);
437	kfree(page_list);
438	if (ret)
439		return ret;
440	if (mr_rereg_mask & IB_MR_REREG_PD)
441		mhp->attr.pdid = php->pdid;
442	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
443		mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
444	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
445		mhp->attr.zbva = 0;
446		mhp->attr.va_fbo = *iova_start;
447		mhp->attr.page_size = shift - 12;
448		mhp->attr.len = total_size;
449		mhp->attr.pbl_size = npages;
450	}
451
452	return 0;
453}
454
455struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
456				     struct ib_phys_buf *buffer_list,
457				     int num_phys_buf, int acc, u64 *iova_start)
458{
459	__be64 *page_list;
460	int shift;
461	u64 total_size;
462	int npages;
463	struct c4iw_dev *rhp;
464	struct c4iw_pd *php;
465	struct c4iw_mr *mhp;
466	int ret;
467
468	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
469	php = to_c4iw_pd(pd);
470	rhp = php->rhp;
471
472	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
473	if (!mhp)
474		return ERR_PTR(-ENOMEM);
475
476	mhp->rhp = rhp;
477
478	/* First check that we have enough alignment */
479	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
480		ret = -EINVAL;
481		goto err;
482	}
483
484	if (num_phys_buf > 1 &&
485	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
486		ret = -EINVAL;
487		goto err;
488	}
489
490	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
491					&total_size, &npages, &shift,
492					&page_list);
493	if (ret)
494		goto err;
495
496	if (mr_exceeds_hw_limits(rhp, total_size)) {
497		kfree(page_list);
498		ret = -EINVAL;
499		goto err;
500	}
501	ret = alloc_pbl(mhp, npages);
502	if (ret) {
503		kfree(page_list);
504		goto err;
505	}
506
507	ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
508			     npages);
509	kfree(page_list);
510	if (ret)
511		goto err_pbl;
512
513	mhp->attr.pdid = php->pdid;
514	mhp->attr.zbva = 0;
515
516	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
517	mhp->attr.va_fbo = *iova_start;
518	mhp->attr.page_size = shift - 12;
519
520	mhp->attr.len = total_size;
521	mhp->attr.pbl_size = npages;
522	ret = register_mem(rhp, php, mhp, shift);
523	if (ret)
524		goto err_pbl;
525
526	return &mhp->ibmr;
527
528err_pbl:
529	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
530			      mhp->attr.pbl_size << 3);
531
532err:
533	kfree(mhp);
534	return ERR_PTR(ret);
535
536}
537
538struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
539{
540	struct c4iw_dev *rhp;
541	struct c4iw_pd *php;
542	struct c4iw_mr *mhp;
543	int ret;
544	u32 stag = T4_STAG_UNSET;
545
546	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
547	php = to_c4iw_pd(pd);
548	rhp = php->rhp;
549
550	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
551	if (!mhp)
552		return ERR_PTR(-ENOMEM);
553
554	mhp->rhp = rhp;
555	mhp->attr.pdid = php->pdid;
556	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
557	mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
558	mhp->attr.zbva = 0;
559	mhp->attr.va_fbo = 0;
560	mhp->attr.page_size = 0;
561	mhp->attr.len = ~0UL;
562	mhp->attr.pbl_size = 0;
563
564	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
565			      FW_RI_STAG_NSMR, mhp->attr.perms,
566			      mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
567	if (ret)
568		goto err1;
569
570	ret = finish_mem_reg(mhp, stag);
571	if (ret)
572		goto err2;
573	return &mhp->ibmr;
574err2:
575	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
576		  mhp->attr.pbl_addr);
577err1:
578	kfree(mhp);
579	return ERR_PTR(ret);
580}
581
582struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
583    u64 virt, int acc, struct ib_udata *udata, int mr_id)
584{
585	__be64 *pages;
586	int shift, n, len;
587	int i, k, entry;
588	int err = 0;
589	struct scatterlist *sg;
590	struct c4iw_dev *rhp;
591	struct c4iw_pd *php;
592	struct c4iw_mr *mhp;
593
594	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
595
596	if (length == ~0ULL)
597		return ERR_PTR(-EINVAL);
598
599	if ((length + start) < start)
600		return ERR_PTR(-EINVAL);
601
602	php = to_c4iw_pd(pd);
603	rhp = php->rhp;
604
605	if (mr_exceeds_hw_limits(rhp, length))
606		return ERR_PTR(-EINVAL);
607
608	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
609	if (!mhp)
610		return ERR_PTR(-ENOMEM);
611
612	mhp->rhp = rhp;
613
614	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
615	if (IS_ERR(mhp->umem)) {
616		err = PTR_ERR(mhp->umem);
617		kfree(mhp);
618		return ERR_PTR(err);
619	}
620
621	shift = ffs(mhp->umem->page_size) - 1;
622
623	n = mhp->umem->nmap;
624	err = alloc_pbl(mhp, n);
625	if (err)
626		goto err;
627
628	pages = (__be64 *) __get_free_page(GFP_KERNEL);
629	if (!pages) {
630		err = -ENOMEM;
631		goto err_pbl;
632	}
633
634	i = n = 0;
635	for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
636		len = sg_dma_len(sg) >> shift;
637		for (k = 0; k < len; ++k) {
638			pages[i++] = cpu_to_be64(sg_dma_address(sg) +
639					mhp->umem->page_size * k);
640			if (i == PAGE_SIZE / sizeof *pages) {
641				err = write_pbl(&mhp->rhp->rdev,
642						pages,
643						mhp->attr.pbl_addr + (n << 3), i);
644				if (err)
645					goto pbl_done;
646				n += i;
647				i = 0;
648
649			}
650		}
651	}
652
653	if (i)
654		err = write_pbl(&mhp->rhp->rdev, pages,
655				     mhp->attr.pbl_addr + (n << 3), i);
656
657pbl_done:
658	free_page((unsigned long) pages);
659	if (err)
660		goto err_pbl;
661
662	mhp->attr.pdid = php->pdid;
663	mhp->attr.zbva = 0;
664	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
665	mhp->attr.va_fbo = virt;
666	mhp->attr.page_size = shift - 12;
667	mhp->attr.len = length;
668
669	err = register_mem(rhp, php, mhp, shift);
670	if (err)
671		goto err_pbl;
672
673	return &mhp->ibmr;
674
675err_pbl:
676	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
677			      mhp->attr.pbl_size << 3);
678
679err:
680	ib_umem_release(mhp->umem);
681	kfree(mhp);
682	return ERR_PTR(err);
683}
684
685struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
686{
687	struct c4iw_dev *rhp;
688	struct c4iw_pd *php;
689	struct c4iw_mw *mhp;
690	u32 mmid;
691	u32 stag = 0;
692	int ret;
693
694	php = to_c4iw_pd(pd);
695	rhp = php->rhp;
696	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
697	if (!mhp)
698		return ERR_PTR(-ENOMEM);
699	ret = allocate_window(&rhp->rdev, &stag, php->pdid);
700	if (ret) {
701		kfree(mhp);
702		return ERR_PTR(ret);
703	}
704	mhp->rhp = rhp;
705	mhp->attr.pdid = php->pdid;
706	mhp->attr.type = FW_RI_STAG_MW;
707	mhp->attr.stag = stag;
708	mmid = (stag) >> 8;
709	mhp->ibmw.rkey = stag;
710	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
711		deallocate_window(&rhp->rdev, mhp->attr.stag);
712		kfree(mhp);
713		return ERR_PTR(-ENOMEM);
714	}
715	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
716	    stag);
717	return &(mhp->ibmw);
718}
719
720int c4iw_dealloc_mw(struct ib_mw *mw)
721{
722	struct c4iw_dev *rhp;
723	struct c4iw_mw *mhp;
724	u32 mmid;
725
726	mhp = to_c4iw_mw(mw);
727	rhp = mhp->rhp;
728	mmid = (mw->rkey) >> 8;
729	remove_handle(rhp, &rhp->mmidr, mmid);
730	deallocate_window(&rhp->rdev, mhp->attr.stag);
731	kfree(mhp);
732	CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
733	    mhp);
734	return 0;
735}
736
737struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
738{
739	struct c4iw_dev *rhp;
740	struct c4iw_pd *php;
741	struct c4iw_mr *mhp;
742	u32 mmid;
743	u32 stag = 0;
744	int ret = 0;
745
746	php = to_c4iw_pd(pd);
747	rhp = php->rhp;
748	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
749	if (!mhp) {
750		ret = -ENOMEM;
751		goto err;
752	}
753
754	mhp->rhp = rhp;
755	ret = alloc_pbl(mhp, pbl_depth);
756	if (ret)
757		goto err1;
758	mhp->attr.pbl_size = pbl_depth;
759	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
760				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
761	if (ret)
762		goto err2;
763	mhp->attr.pdid = php->pdid;
764	mhp->attr.type = FW_RI_STAG_NSMR;
765	mhp->attr.stag = stag;
766	mhp->attr.state = 1;
767	mmid = (stag) >> 8;
768	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
769	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
770		ret = -ENOMEM;
771		goto err3;
772	}
773
774	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
775	    stag);
776	return &(mhp->ibmr);
777err3:
778	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
779		       mhp->attr.pbl_addr);
780err2:
781	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
782			      mhp->attr.pbl_size << 3);
783err1:
784	kfree(mhp);
785err:
786	return ERR_PTR(ret);
787}
788
789struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
790						     int page_list_len)
791{
792	struct c4iw_fr_page_list *c4pl;
793	struct c4iw_dev *dev = to_c4iw_dev(device);
794	bus_addr_t dma_addr;
795	int size = sizeof *c4pl + page_list_len * sizeof(u64);
796
797	c4pl = contigmalloc(size,
798            M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
799        if (c4pl)
800                dma_addr = vtophys(c4pl);
801        else
802                return ERR_PTR(-ENOMEM);
803
804	pci_unmap_addr_set(c4pl, mapping, dma_addr);
805	c4pl->dma_addr = dma_addr;
806	c4pl->dev = dev;
807	c4pl->size = size;
808	c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
809	c4pl->ibpl.max_page_list_len = page_list_len;
810
811	return &c4pl->ibpl;
812}
813
814void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
815{
816	struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
817	contigfree(c4pl, c4pl->size, M_DEVBUF);
818}
819
820int c4iw_dereg_mr(struct ib_mr *ib_mr)
821{
822	struct c4iw_dev *rhp;
823	struct c4iw_mr *mhp;
824	u32 mmid;
825
826	CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
827	/* There can be no memory windows */
828	if (atomic_read(&ib_mr->usecnt))
829		return -EINVAL;
830
831	mhp = to_c4iw_mr(ib_mr);
832	rhp = mhp->rhp;
833	mmid = mhp->attr.stag >> 8;
834	remove_handle(rhp, &rhp->mmidr, mmid);
835	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
836		       mhp->attr.pbl_addr);
837	if (mhp->attr.pbl_size)
838		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
839				  mhp->attr.pbl_size << 3);
840	if (mhp->kva)
841		kfree((void *) (unsigned long) mhp->kva);
842	if (mhp->umem)
843		ib_umem_release(mhp->umem);
844	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
845	kfree(mhp);
846	return 0;
847}
848#endif
849