1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3219820Sjeff * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4219820Sjeff * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5219820Sjeff *
6219820Sjeff * This software is available to you under a choice of one of two
7219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
8219820Sjeff * General Public License (GPL) Version 2, available from the file
9219820Sjeff * COPYING in the main directory of this source tree, or the
10219820Sjeff * OpenIB.org BSD license below:
11219820Sjeff *
12219820Sjeff *     Redistribution and use in source and binary forms, with or
13219820Sjeff *     without modification, are permitted provided that the following
14219820Sjeff *     conditions are met:
15219820Sjeff *
16219820Sjeff *      - Redistributions of source code must retain the above
17219820Sjeff *        copyright notice, this list of conditions and the following
18219820Sjeff *        disclaimer.
19219820Sjeff *
20219820Sjeff *      - Redistributions in binary form must reproduce the above
21219820Sjeff *        copyright notice, this list of conditions and the following
22219820Sjeff *        disclaimer in the documentation and/or other materials
23219820Sjeff *        provided with the distribution.
24219820Sjeff *
25219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32219820Sjeff * SOFTWARE.
33219820Sjeff */
34219820Sjeff
35219820Sjeff#include <linux/init.h>
36219820Sjeff#include <linux/errno.h>
37255932Salfred#include <linux/slab.h>
38255932Salfred#include <linux/kernel.h>
39255932Salfred#include <linux/vmalloc.h>
40219820Sjeff
41219820Sjeff#include <linux/mlx4/cmd.h>
42219820Sjeff
43219820Sjeff#include "mlx4.h"
44219820Sjeff#include "icm.h"
45219820Sjeff
46219820Sjeff#define MLX4_MPT_FLAG_SW_OWNS	    (0xfUL << 28)
47219820Sjeff#define MLX4_MPT_FLAG_FREE	    (0x3UL << 28)
48219820Sjeff#define MLX4_MPT_FLAG_MIO	    (1 << 17)
49219820Sjeff#define MLX4_MPT_FLAG_BIND_ENABLE   (1 << 15)
50219820Sjeff#define MLX4_MPT_FLAG_PHYSICAL	    (1 <<  9)
51219820Sjeff#define MLX4_MPT_FLAG_REGION	    (1 <<  8)
52219820Sjeff
53219820Sjeff#define MLX4_MPT_PD_FLAG_FAST_REG   (1 << 27)
54219820Sjeff#define MLX4_MPT_PD_FLAG_RAE	    (1 << 28)
55219820Sjeff#define MLX4_MPT_PD_FLAG_EN_INV	    (3 << 24)
56219820Sjeff
57219820Sjeff#define MLX4_MPT_STATUS_SW		0xF0
58219820Sjeff#define MLX4_MPT_STATUS_HW		0x00
59219820Sjeff
60219820Sjeffstatic u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
61219820Sjeff{
62219820Sjeff	int o;
63219820Sjeff	int m;
64219820Sjeff	u32 seg;
65219820Sjeff
66219820Sjeff	spin_lock(&buddy->lock);
67219820Sjeff
68219820Sjeff	for (o = order; o <= buddy->max_order; ++o)
69219820Sjeff		if (buddy->num_free[o]) {
70219820Sjeff			m = 1 << (buddy->max_order - o);
71219820Sjeff			seg = find_first_bit(buddy->bits[o], m);
72219820Sjeff			if (seg < m)
73219820Sjeff				goto found;
74219820Sjeff		}
75219820Sjeff
76219820Sjeff	spin_unlock(&buddy->lock);
77219820Sjeff	return -1;
78219820Sjeff
79219820Sjeff found:
80219820Sjeff	clear_bit(seg, buddy->bits[o]);
81219820Sjeff	--buddy->num_free[o];
82219820Sjeff
83219820Sjeff	while (o > order) {
84219820Sjeff		--o;
85219820Sjeff		seg <<= 1;
86219820Sjeff		set_bit(seg ^ 1, buddy->bits[o]);
87219820Sjeff		++buddy->num_free[o];
88219820Sjeff	}
89219820Sjeff
90219820Sjeff	spin_unlock(&buddy->lock);
91219820Sjeff
92219820Sjeff	seg <<= order;
93219820Sjeff
94219820Sjeff	return seg;
95219820Sjeff}
96219820Sjeff
97219820Sjeffstatic void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
98219820Sjeff{
99219820Sjeff	seg >>= order;
100219820Sjeff
101219820Sjeff	spin_lock(&buddy->lock);
102219820Sjeff
103219820Sjeff	while (test_bit(seg ^ 1, buddy->bits[order])) {
104219820Sjeff		clear_bit(seg ^ 1, buddy->bits[order]);
105219820Sjeff		--buddy->num_free[order];
106219820Sjeff		seg >>= 1;
107219820Sjeff		++order;
108219820Sjeff	}
109219820Sjeff
110219820Sjeff	set_bit(seg, buddy->bits[order]);
111219820Sjeff	++buddy->num_free[order];
112219820Sjeff
113219820Sjeff	spin_unlock(&buddy->lock);
114219820Sjeff}
115219820Sjeff
116219820Sjeffstatic int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
117219820Sjeff{
118219820Sjeff	int i, s;
119219820Sjeff
120219820Sjeff	buddy->max_order = max_order;
121219820Sjeff	spin_lock_init(&buddy->lock);
122219820Sjeff
123255932Salfred	buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
124219820Sjeff			      GFP_KERNEL);
125255932Salfred	buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
126219820Sjeff				  GFP_KERNEL);
127219820Sjeff	if (!buddy->bits || !buddy->num_free)
128219820Sjeff		goto err_out;
129219820Sjeff
130219820Sjeff	for (i = 0; i <= buddy->max_order; ++i) {
131219820Sjeff		s = BITS_TO_LONGS(1 << (buddy->max_order - i));
132255932Salfred		buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
133255932Salfred		if (!buddy->bits[i]) {
134255932Salfred                        goto err_out_free;
135255932Salfred		}
136219820Sjeff	}
137219820Sjeff
138219820Sjeff	set_bit(0, buddy->bits[buddy->max_order]);
139219820Sjeff	buddy->num_free[buddy->max_order] = 1;
140219820Sjeff
141219820Sjeff	return 0;
142219820Sjeff
143219820Sjefferr_out_free:
144219820Sjeff	for (i = 0; i <= buddy->max_order; ++i)
145255932Salfred		if ( buddy->bits[i] )
146255932Salfred			kfree(buddy->bits[i]);
147219820Sjeff
148219820Sjefferr_out:
149219820Sjeff	kfree(buddy->bits);
150219820Sjeff	kfree(buddy->num_free);
151219820Sjeff
152219820Sjeff	return -ENOMEM;
153219820Sjeff}
154219820Sjeff
155219820Sjeffstatic void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
156219820Sjeff{
157219820Sjeff	int i;
158219820Sjeff
159219820Sjeff	for (i = 0; i <= buddy->max_order; ++i)
160255932Salfred                kfree(buddy->bits[i]);
161219820Sjeff
162219820Sjeff	kfree(buddy->bits);
163219820Sjeff	kfree(buddy->num_free);
164219820Sjeff}
165219820Sjeff
166255932Salfredu32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
167219820Sjeff{
168219820Sjeff	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
169219820Sjeff	u32 seg;
170255932Salfred	int seg_order;
171255932Salfred	u32 offset;
172219820Sjeff
173255932Salfred	seg_order = max_t(int, order - log_mtts_per_seg, 0);
174255932Salfred
175255932Salfred	seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
176219820Sjeff	if (seg == -1)
177219820Sjeff		return -1;
178219820Sjeff
179255932Salfred	offset = seg * (1 << log_mtts_per_seg);
180255932Salfred
181255932Salfred	if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
182255932Salfred				 offset + (1 << order) - 1)) {
183255932Salfred		mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
184219820Sjeff		return -1;
185219820Sjeff	}
186219820Sjeff
187255932Salfred	return offset;
188219820Sjeff}
189219820Sjeff
190255932Salfredstatic u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
191255932Salfred{
192255932Salfred	u64 in_param = 0;
193255932Salfred	u64 out_param;
194255932Salfred	int err;
195255932Salfred
196255932Salfred	if (mlx4_is_mfunc(dev)) {
197255932Salfred		set_param_l(&in_param, order);
198255932Salfred		err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
199255932Salfred						       RES_OP_RESERVE_AND_MAP,
200255932Salfred						       MLX4_CMD_ALLOC_RES,
201255932Salfred						       MLX4_CMD_TIME_CLASS_A,
202255932Salfred						       MLX4_CMD_WRAPPED);
203255932Salfred		if (err)
204255932Salfred			return -1;
205255932Salfred		return get_param_l(&out_param);
206255932Salfred	}
207255932Salfred	return __mlx4_alloc_mtt_range(dev, order);
208255932Salfred}
209255932Salfred
210219820Sjeffint mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
211219820Sjeff		  struct mlx4_mtt *mtt)
212219820Sjeff{
213219820Sjeff	int i;
214219820Sjeff
215219820Sjeff	if (!npages) {
216219820Sjeff		mtt->order      = -1;
217219820Sjeff		mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
218219820Sjeff		return 0;
219219820Sjeff	} else
220219820Sjeff		mtt->page_shift = page_shift;
221219820Sjeff
222255932Salfred	for (mtt->order = 0, i = 1; i < npages; i <<= 1)
223219820Sjeff		++mtt->order;
224219820Sjeff
225255932Salfred	mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
226255932Salfred	if (mtt->offset == -1) {
227255932Salfred		mlx4_err(dev, "Failed to allocate mtts for %d pages(order %d)\n",
228255932Salfred			 npages, mtt->order);
229219820Sjeff		return -ENOMEM;
230255932Salfred	}
231219820Sjeff
232219820Sjeff	return 0;
233219820Sjeff}
234219820SjeffEXPORT_SYMBOL_GPL(mlx4_mtt_init);
235219820Sjeff
236255932Salfredvoid __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
237219820Sjeff{
238255932Salfred	u32 first_seg;
239255932Salfred	int seg_order;
240219820Sjeff	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
241219820Sjeff
242255932Salfred	seg_order = max_t(int, order - log_mtts_per_seg, 0);
243255932Salfred	first_seg = offset / (1 << log_mtts_per_seg);
244255932Salfred
245255932Salfred	mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
246255932Salfred	mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
247255932Salfred			     offset + (1 << order) - 1);
248255932Salfred}
249255932Salfred
250255932Salfredstatic void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
251255932Salfred{
252255932Salfred	u64 in_param = 0;
253255932Salfred	int err;
254255932Salfred
255255932Salfred	if (mlx4_is_mfunc(dev)) {
256255932Salfred		set_param_l(&in_param, offset);
257255932Salfred		set_param_h(&in_param, order);
258255932Salfred		err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
259255932Salfred						       MLX4_CMD_FREE_RES,
260255932Salfred						       MLX4_CMD_TIME_CLASS_A,
261255932Salfred						       MLX4_CMD_WRAPPED);
262255932Salfred		if (err)
263255932Salfred			mlx4_warn(dev, "Failed to free mtt range at:"
264255932Salfred				  "%d order:%d\n", offset, order);
265255932Salfred		return;
266255932Salfred	}
267255932Salfred	 __mlx4_free_mtt_range(dev, offset, order);
268255932Salfred}
269255932Salfred
270255932Salfredvoid mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
271255932Salfred{
272219820Sjeff	if (mtt->order < 0)
273219820Sjeff		return;
274219820Sjeff
275255932Salfred	mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
276219820Sjeff}
277219820SjeffEXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
278219820Sjeff
279219820Sjeffu64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
280219820Sjeff{
281255932Salfred	return (u64) mtt->offset * dev->caps.mtt_entry_sz;
282219820Sjeff}
283219820SjeffEXPORT_SYMBOL_GPL(mlx4_mtt_addr);
284219820Sjeff
285219820Sjeffstatic u32 hw_index_to_key(u32 ind)
286219820Sjeff{
287219820Sjeff	return (ind >> 24) | (ind << 8);
288219820Sjeff}
289219820Sjeff
290219820Sjeffstatic u32 key_to_hw_index(u32 key)
291219820Sjeff{
292219820Sjeff	return (key << 24) | (key >> 8);
293219820Sjeff}
294219820Sjeff
295219820Sjeffstatic int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
296219820Sjeff			  int mpt_index)
297219820Sjeff{
298255932Salfred	return mlx4_cmd(dev, mailbox->dma, mpt_index,
299255932Salfred			0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
300255932Salfred			MLX4_CMD_WRAPPED);
301219820Sjeff}
302219820Sjeff
303219820Sjeffstatic int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
304219820Sjeff			  int mpt_index)
305219820Sjeff{
306219820Sjeff	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
307255932Salfred			    !mailbox, MLX4_CMD_HW2SW_MPT,
308255932Salfred			    MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
309219820Sjeff}
310219820Sjeff
311255932Salfredstatic int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
312255932Salfred			   u64 iova, u64 size, u32 access, int npages,
313255932Salfred			   int page_shift, struct mlx4_mr *mr)
314219820Sjeff{
315255932Salfred	mr->iova       = iova;
316255932Salfred	mr->size       = size;
317255932Salfred	mr->pd	       = pd;
318255932Salfred	mr->access     = access;
319255932Salfred	mr->enabled    = MLX4_MR_DISABLED;
320255932Salfred	mr->key	       = hw_index_to_key(mridx);
321255932Salfred
322255932Salfred	return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
323255932Salfred}
324255932Salfred
325255932Salfredstatic int mlx4_WRITE_MTT(struct mlx4_dev *dev,
326255932Salfred			  struct mlx4_cmd_mailbox *mailbox,
327255932Salfred			  int num_entries)
328255932Salfred{
329255932Salfred	return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
330255932Salfred			MLX4_CMD_TIME_CLASS_A,  MLX4_CMD_WRAPPED);
331255932Salfred}
332255932Salfred
333255932Salfredint __mlx4_mr_reserve(struct mlx4_dev *dev)
334255932Salfred{
335219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
336219820Sjeff
337255932Salfred	return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
338255932Salfred}
339219820Sjeff
340255932Salfredstatic int mlx4_mr_reserve(struct mlx4_dev *dev)
341255932Salfred{
342255932Salfred	u64 out_param;
343219820Sjeff
344255932Salfred	if (mlx4_is_mfunc(dev)) {
345255932Salfred		if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
346255932Salfred				   MLX4_CMD_ALLOC_RES,
347255932Salfred				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
348255932Salfred			return -1;
349255932Salfred		return get_param_l(&out_param);
350255932Salfred	}
351255932Salfred	return  __mlx4_mr_reserve(dev);
352219820Sjeff}
353219820Sjeff
354255932Salfredvoid __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
355219820Sjeff{
356219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
357255932Salfred
358255932Salfred	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
359219820Sjeff}
360219820Sjeff
361255932Salfredstatic void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
362219820Sjeff{
363255932Salfred	u64 in_param = 0;
364219820Sjeff
365255932Salfred	if (mlx4_is_mfunc(dev)) {
366255932Salfred		set_param_l(&in_param, index);
367255932Salfred		if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
368255932Salfred			       MLX4_CMD_FREE_RES,
369255932Salfred			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
370255932Salfred			mlx4_warn(dev, "Failed to release mr index:%d\n",
371255932Salfred				  index);
372255932Salfred		return;
373255932Salfred	}
374255932Salfred	__mlx4_mr_release(dev, index);
375219820Sjeff}
376219820Sjeff
377255932Salfredint __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
378255932Salfred{
379255932Salfred	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
380255932Salfred
381255932Salfred	return mlx4_table_get(dev, &mr_table->dmpt_table, index);
382255932Salfred}
383255932Salfred
384255932Salfredstatic int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
385255932Salfred{
386255932Salfred	u64 param = 0;
387255932Salfred
388255932Salfred	if (mlx4_is_mfunc(dev)) {
389255932Salfred		set_param_l(&param, index);
390255932Salfred		return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
391255932Salfred							MLX4_CMD_ALLOC_RES,
392255932Salfred							MLX4_CMD_TIME_CLASS_A,
393255932Salfred							MLX4_CMD_WRAPPED);
394255932Salfred	}
395255932Salfred	return __mlx4_mr_alloc_icm(dev, index);
396255932Salfred}
397255932Salfred
398255932Salfredvoid __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
399255932Salfred{
400255932Salfred	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
401255932Salfred
402255932Salfred	mlx4_table_put(dev, &mr_table->dmpt_table, index);
403255932Salfred}
404255932Salfred
405255932Salfredstatic void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
406255932Salfred{
407255932Salfred	u64 in_param = 0;
408255932Salfred
409255932Salfred	if (mlx4_is_mfunc(dev)) {
410255932Salfred		set_param_l(&in_param, index);
411255932Salfred		if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
412255932Salfred			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
413255932Salfred			     MLX4_CMD_WRAPPED))
414255932Salfred			mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
415255932Salfred				  index);
416255932Salfred		return;
417255932Salfred	}
418255932Salfred	return __mlx4_mr_free_icm(dev, index);
419255932Salfred}
420255932Salfred
421219820Sjeffint mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
422219820Sjeff		  int npages, int page_shift, struct mlx4_mr *mr)
423219820Sjeff{
424219820Sjeff	u32 index;
425219820Sjeff	int err;
426219820Sjeff
427255932Salfred	index = mlx4_mr_reserve(dev);
428219820Sjeff	if (index == -1)
429219820Sjeff		return -ENOMEM;
430219820Sjeff
431219820Sjeff	err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
432219820Sjeff				     access, npages, page_shift, mr);
433219820Sjeff	if (err)
434255932Salfred		mlx4_mr_release(dev, index);
435219820Sjeff
436219820Sjeff	return err;
437219820Sjeff}
438219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_alloc);
439219820Sjeff
440255932Salfredstatic void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
441219820Sjeff{
442219820Sjeff	int err;
443219820Sjeff
444255932Salfred	if (mr->enabled == MLX4_MR_EN_HW) {
445219820Sjeff		err = mlx4_HW2SW_MPT(dev, NULL,
446219820Sjeff				     key_to_hw_index(mr->key) &
447219820Sjeff				     (dev->caps.num_mpts - 1));
448219820Sjeff		if (err)
449255932Salfred			mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
450255932Salfred
451255932Salfred		mr->enabled = MLX4_MR_EN_SW;
452219820Sjeff	}
453219820Sjeff	mlx4_mtt_cleanup(dev, &mr->mtt);
454219820Sjeff}
455219820Sjeff
456219820Sjeffvoid mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
457219820Sjeff{
458219820Sjeff	mlx4_mr_free_reserved(dev, mr);
459255932Salfred	if (mr->enabled)
460255932Salfred		mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
461255932Salfred	mlx4_mr_release(dev, key_to_hw_index(mr->key));
462219820Sjeff}
463219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_free);
464219820Sjeff
465219820Sjeffint mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
466219820Sjeff{
467219820Sjeff	struct mlx4_cmd_mailbox *mailbox;
468219820Sjeff	struct mlx4_mpt_entry *mpt_entry;
469219820Sjeff	int err;
470219820Sjeff
471255932Salfred	err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
472219820Sjeff	if (err)
473219820Sjeff		return err;
474219820Sjeff
475219820Sjeff	mailbox = mlx4_alloc_cmd_mailbox(dev);
476219820Sjeff	if (IS_ERR(mailbox)) {
477219820Sjeff		err = PTR_ERR(mailbox);
478219820Sjeff		goto err_table;
479219820Sjeff	}
480219820Sjeff	mpt_entry = mailbox->buf;
481219820Sjeff
482219820Sjeff	memset(mpt_entry, 0, sizeof *mpt_entry);
483219820Sjeff
484219820Sjeff	mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO	 |
485219820Sjeff				       MLX4_MPT_FLAG_REGION	 |
486219820Sjeff				       mr->access);
487219820Sjeff
488219820Sjeff	mpt_entry->key	       = cpu_to_be32(key_to_hw_index(mr->key));
489219820Sjeff	mpt_entry->pd_flags    = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
490219820Sjeff	mpt_entry->start       = cpu_to_be64(mr->iova);
491219820Sjeff	mpt_entry->length      = cpu_to_be64(mr->size);
492219820Sjeff	mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
493219820Sjeff
494219820Sjeff	if (mr->mtt.order < 0) {
495219820Sjeff		mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
496255932Salfred		mpt_entry->mtt_addr = 0;
497219820Sjeff	} else {
498255932Salfred		mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
499255932Salfred						  &mr->mtt));
500219820Sjeff	}
501219820Sjeff
502219820Sjeff	if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
503219820Sjeff		/* fast register MR in free state */
504219820Sjeff		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
505219820Sjeff		mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
506219820Sjeff						   MLX4_MPT_PD_FLAG_RAE);
507255932Salfred		mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
508219820Sjeff	} else {
509219820Sjeff		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
510219820Sjeff	}
511219820Sjeff
512219820Sjeff	err = mlx4_SW2HW_MPT(dev, mailbox,
513219820Sjeff			     key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
514219820Sjeff	if (err) {
515219820Sjeff		mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
516219820Sjeff		goto err_cmd;
517219820Sjeff	}
518255932Salfred	mr->enabled = MLX4_MR_EN_HW;
519219820Sjeff
520219820Sjeff	mlx4_free_cmd_mailbox(dev, mailbox);
521219820Sjeff
522219820Sjeff	return 0;
523219820Sjeff
524219820Sjefferr_cmd:
525219820Sjeff	mlx4_free_cmd_mailbox(dev, mailbox);
526219820Sjeff
527219820Sjefferr_table:
528255932Salfred	mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
529219820Sjeff	return err;
530219820Sjeff}
531219820SjeffEXPORT_SYMBOL_GPL(mlx4_mr_enable);
532219820Sjeff
533219820Sjeffstatic int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
534219820Sjeff				int start_index, int npages, u64 *page_list)
535219820Sjeff{
536219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
537219820Sjeff	__be64 *mtts;
538219820Sjeff	dma_addr_t dma_handle;
539219820Sjeff	int i;
540219820Sjeff
541255932Salfred	mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
542255932Salfred			       start_index, &dma_handle);
543219820Sjeff
544219820Sjeff	if (!mtts)
545219820Sjeff		return -ENOMEM;
546219820Sjeff
547255932Salfred	dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
548255932Salfred				npages * sizeof (u64), DMA_TO_DEVICE);
549255932Salfred
550219820Sjeff	for (i = 0; i < npages; ++i)
551219820Sjeff		mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
552219820Sjeff
553255932Salfred	dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
554255932Salfred				   npages * sizeof (u64), DMA_TO_DEVICE);
555219820Sjeff
556219820Sjeff	return 0;
557219820Sjeff}
558219820Sjeff
559255932Salfredint __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
560255932Salfred		     int start_index, int npages, u64 *page_list)
561219820Sjeff{
562255932Salfred	int err = 0;
563219820Sjeff	int chunk;
564255932Salfred	int mtts_per_page;
565255932Salfred	int max_mtts_first_page;
566219820Sjeff
567255932Salfred	/* compute how may mtts fit in the first page */
568255932Salfred	mtts_per_page = PAGE_SIZE / sizeof(u64);
569255932Salfred	max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
570255932Salfred			      % mtts_per_page;
571219820Sjeff
572255932Salfred	chunk = min_t(int, max_mtts_first_page, npages);
573255932Salfred
574219820Sjeff	while (npages > 0) {
575219820Sjeff		err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
576219820Sjeff		if (err)
577219820Sjeff			return err;
578219820Sjeff		npages      -= chunk;
579219820Sjeff		start_index += chunk;
580219820Sjeff		page_list   += chunk;
581255932Salfred
582255932Salfred		chunk = min_t(int, mtts_per_page, npages);
583219820Sjeff	}
584255932Salfred	return err;
585255932Salfred}
586219820Sjeff
587255932Salfredint mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
588255932Salfred		   int start_index, int npages, u64 *page_list)
589255932Salfred{
590255932Salfred	struct mlx4_cmd_mailbox *mailbox = NULL;
591255932Salfred	__be64 *inbox = NULL;
592255932Salfred	int chunk;
593255932Salfred	int err = 0;
594255932Salfred	int i;
595255932Salfred
596255932Salfred	if (mtt->order < 0)
597255932Salfred		return -EINVAL;
598255932Salfred
599255932Salfred	if (mlx4_is_mfunc(dev)) {
600255932Salfred		mailbox = mlx4_alloc_cmd_mailbox(dev);
601255932Salfred		if (IS_ERR(mailbox))
602255932Salfred			return PTR_ERR(mailbox);
603255932Salfred		inbox = mailbox->buf;
604255932Salfred
605255932Salfred		while (npages > 0) {
606255932Salfred			chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
607255932Salfred				      npages);
608255932Salfred			inbox[0] = cpu_to_be64(mtt->offset + start_index);
609255932Salfred			inbox[1] = 0;
610255932Salfred			for (i = 0; i < chunk; ++i)
611255932Salfred				inbox[i + 2] = cpu_to_be64(page_list[i] |
612255932Salfred					       MLX4_MTT_FLAG_PRESENT);
613255932Salfred			err = mlx4_WRITE_MTT(dev, mailbox, chunk);
614255932Salfred			if (err) {
615255932Salfred				mlx4_free_cmd_mailbox(dev, mailbox);
616255932Salfred				return err;
617255932Salfred			}
618255932Salfred
619255932Salfred			npages      -= chunk;
620255932Salfred			start_index += chunk;
621255932Salfred			page_list   += chunk;
622255932Salfred		}
623255932Salfred		mlx4_free_cmd_mailbox(dev, mailbox);
624255932Salfred		return err;
625255932Salfred	}
626255932Salfred
627255932Salfred	return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
628219820Sjeff}
629219820SjeffEXPORT_SYMBOL_GPL(mlx4_write_mtt);
630219820Sjeff
631219820Sjeffint mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
632219820Sjeff		       struct mlx4_buf *buf)
633219820Sjeff{
634219820Sjeff	u64 *page_list;
635219820Sjeff	int err;
636219820Sjeff	int i;
637219820Sjeff
638219820Sjeff	page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
639219820Sjeff	if (!page_list)
640219820Sjeff		return -ENOMEM;
641219820Sjeff
642219820Sjeff	for (i = 0; i < buf->npages; ++i)
643255932Salfred		if (buf->nbufs == 1)
644219820Sjeff			page_list[i] = buf->direct.map + (i << buf->page_shift);
645219820Sjeff		else
646219820Sjeff			page_list[i] = buf->page_list[i].map;
647219820Sjeff
648219820Sjeff	err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
649219820Sjeff
650219820Sjeff	kfree(page_list);
651219820Sjeff	return err;
652219820Sjeff}
653219820SjeffEXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
654219820Sjeff
655219820Sjeffint mlx4_init_mr_table(struct mlx4_dev *dev)
656219820Sjeff{
657255932Salfred	struct mlx4_priv *priv = mlx4_priv(dev);
658255932Salfred	struct mlx4_mr_table *mr_table = &priv->mr_table;
659219820Sjeff	int err;
660219820Sjeff
661255932Salfred	/* Nothing to do for slaves - all MR handling is forwarded
662255932Salfred	* to the master */
663255932Salfred	if (mlx4_is_slave(dev))
664255932Salfred		return 0;
665255932Salfred
666219820Sjeff	if (!is_power_of_2(dev->caps.num_mpts))
667219820Sjeff		return -EINVAL;
668219820Sjeff
669219820Sjeff	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
670219820Sjeff			       ~0, dev->caps.reserved_mrws, 0);
671219820Sjeff	if (err)
672219820Sjeff		return err;
673219820Sjeff
674219820Sjeff	err = mlx4_buddy_init(&mr_table->mtt_buddy,
675255932Salfred			      ilog2((u32)dev->caps.num_mtts /
676255932Salfred			      (1 << log_mtts_per_seg)));
677219820Sjeff	if (err)
678219820Sjeff		goto err_buddy;
679219820Sjeff
680219820Sjeff	if (dev->caps.reserved_mtts) {
681255932Salfred		priv->reserved_mtts =
682255932Salfred			mlx4_alloc_mtt_range(dev,
683255932Salfred					     fls(dev->caps.reserved_mtts - 1));
684255932Salfred		if (priv->reserved_mtts < 0) {
685255932Salfred			mlx4_warn(dev, "MTT table of order %u is too small.\n",
686219820Sjeff				  mr_table->mtt_buddy.max_order);
687219820Sjeff			err = -ENOMEM;
688219820Sjeff			goto err_reserve_mtts;
689219820Sjeff		}
690219820Sjeff	}
691219820Sjeff
692219820Sjeff	return 0;
693219820Sjeff
694219820Sjefferr_reserve_mtts:
695219820Sjeff	mlx4_buddy_cleanup(&mr_table->mtt_buddy);
696219820Sjeff
697219820Sjefferr_buddy:
698219820Sjeff	mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
699219820Sjeff
700219820Sjeff	return err;
701219820Sjeff}
702219820Sjeff
703219820Sjeffvoid mlx4_cleanup_mr_table(struct mlx4_dev *dev)
704219820Sjeff{
705255932Salfred	struct mlx4_priv *priv = mlx4_priv(dev);
706255932Salfred	struct mlx4_mr_table *mr_table = &priv->mr_table;
707219820Sjeff
708255932Salfred	if (mlx4_is_slave(dev))
709255932Salfred		return;
710255932Salfred	if (priv->reserved_mtts >= 0)
711255932Salfred		mlx4_free_mtt_range(dev, priv->reserved_mtts,
712255932Salfred				    fls(dev->caps.reserved_mtts - 1));
713219820Sjeff	mlx4_buddy_cleanup(&mr_table->mtt_buddy);
714219820Sjeff	mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
715219820Sjeff}
716219820Sjeff
717219820Sjeffstatic inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
718219820Sjeff				  int npages, u64 iova)
719219820Sjeff{
720219820Sjeff	int i, page_mask;
721219820Sjeff
722219820Sjeff	if (npages > fmr->max_pages)
723219820Sjeff		return -EINVAL;
724219820Sjeff
725219820Sjeff	page_mask = (1 << fmr->page_shift) - 1;
726219820Sjeff
727219820Sjeff	/* We are getting page lists, so va must be page aligned. */
728219820Sjeff	if (iova & page_mask)
729219820Sjeff		return -EINVAL;
730219820Sjeff
731219820Sjeff	/* Trust the user not to pass misaligned data in page_list */
732219820Sjeff	if (0)
733219820Sjeff		for (i = 0; i < npages; ++i) {
734219820Sjeff			if (page_list[i] & ~page_mask)
735219820Sjeff				return -EINVAL;
736219820Sjeff		}
737219820Sjeff
738219820Sjeff	if (fmr->maps >= fmr->max_maps)
739219820Sjeff		return -EINVAL;
740219820Sjeff
741219820Sjeff	return 0;
742219820Sjeff}
743219820Sjeff
744255932Salfredint mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
745255932Salfred		      int npages, u64 iova, u32 *lkey, u32 *rkey)
746219820Sjeff{
747219820Sjeff	u32 key;
748219820Sjeff	int i, err;
749219820Sjeff
750219820Sjeff	err = mlx4_check_fmr(fmr, page_list, npages, iova);
751219820Sjeff	if (err)
752219820Sjeff		return err;
753219820Sjeff
754219820Sjeff	++fmr->maps;
755219820Sjeff
756219820Sjeff	key = key_to_hw_index(fmr->mr.key);
757255932Salfred	key += dev->caps.num_mpts;
758219820Sjeff	*lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
759219820Sjeff
760219820Sjeff	*(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
761219820Sjeff
762219820Sjeff	/* Make sure MPT status is visible before writing MTT entries */
763219820Sjeff	wmb();
764219820Sjeff
765255932Salfred	dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
766255932Salfred				npages * sizeof(u64), DMA_TO_DEVICE);
767255932Salfred
768219820Sjeff	for (i = 0; i < npages; ++i)
769219820Sjeff		fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
770219820Sjeff
771255932Salfred	dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
772255932Salfred				   npages * sizeof(u64), DMA_TO_DEVICE);
773219820Sjeff
774219820Sjeff	fmr->mpt->key    = cpu_to_be32(key);
775219820Sjeff	fmr->mpt->lkey   = cpu_to_be32(key);
776255932Salfred	fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
777219820Sjeff	fmr->mpt->start  = cpu_to_be64(iova);
778219820Sjeff
779219820Sjeff	/* Make MTT entries are visible before setting MPT status */
780219820Sjeff	wmb();
781219820Sjeff
782219820Sjeff	*(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
783219820Sjeff
784219820Sjeff	/* Make sure MPT status is visible before consumer can use FMR */
785219820Sjeff	wmb();
786219820Sjeff
787219820Sjeff	return 0;
788219820Sjeff}
789219820SjeffEXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
790219820Sjeff
791219820Sjeffint mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
792219820Sjeff		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
793219820Sjeff{
794219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
795219820Sjeff	int err = -ENOMEM;
796219820Sjeff
797255932Salfred	if (max_maps > dev->caps.max_fmr_maps)
798255932Salfred		return -EINVAL;
799255932Salfred
800219820Sjeff	if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
801219820Sjeff		return -EINVAL;
802219820Sjeff
803219820Sjeff	/* All MTTs must fit in the same page */
804219820Sjeff	if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
805219820Sjeff		return -EINVAL;
806219820Sjeff
807219820Sjeff	fmr->page_shift = page_shift;
808219820Sjeff	fmr->max_pages  = max_pages;
809219820Sjeff	fmr->max_maps   = max_maps;
810219820Sjeff	fmr->maps = 0;
811219820Sjeff
812219820Sjeff	err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
813219820Sjeff			    page_shift, &fmr->mr);
814219820Sjeff	if (err)
815219820Sjeff		return err;
816219820Sjeff
817219820Sjeff	fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
818255932Salfred				    fmr->mr.mtt.offset,
819219820Sjeff				    &fmr->dma_handle);
820255932Salfred
821219820Sjeff	if (!fmr->mtts) {
822219820Sjeff		err = -ENOMEM;
823219820Sjeff		goto err_free;
824219820Sjeff	}
825219820Sjeff
826219820Sjeff	return 0;
827219820Sjeff
828219820Sjefferr_free:
829219820Sjeff	mlx4_mr_free(dev, &fmr->mr);
830219820Sjeff	return err;
831219820Sjeff}
832219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
833219820Sjeff
834219820Sjeffint mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
835219820Sjeff{
836219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
837219820Sjeff	int err;
838219820Sjeff
839219820Sjeff	err = mlx4_mr_enable(dev, &fmr->mr);
840219820Sjeff	if (err)
841219820Sjeff		return err;
842219820Sjeff
843219820Sjeff	fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
844219820Sjeff				    key_to_hw_index(fmr->mr.key), NULL);
845219820Sjeff	if (!fmr->mpt)
846219820Sjeff		return -ENOMEM;
847219820Sjeff
848219820Sjeff	return 0;
849219820Sjeff}
850219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_enable);
851219820Sjeff
852219820Sjeffvoid mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
853219820Sjeff		    u32 *lkey, u32 *rkey)
854219820Sjeff{
855255932Salfred	struct mlx4_cmd_mailbox *mailbox;
856255932Salfred	int err;
857255932Salfred
858219820Sjeff	if (!fmr->maps)
859219820Sjeff		return;
860219820Sjeff
861219820Sjeff	fmr->maps = 0;
862219820Sjeff
863255932Salfred	mailbox = mlx4_alloc_cmd_mailbox(dev);
864255932Salfred	if (IS_ERR(mailbox)) {
865255932Salfred		err = PTR_ERR(mailbox);
866255932Salfred		mlx4_warn(dev, "mlx4_alloc_cmd_mailbox failed (%d)\n", err);
867255932Salfred		return;
868255932Salfred	}
869255932Salfred
870255932Salfred	err = mlx4_HW2SW_MPT(dev, NULL,
871255932Salfred			     key_to_hw_index(fmr->mr.key) &
872255932Salfred			     (dev->caps.num_mpts - 1));
873255932Salfred	mlx4_free_cmd_mailbox(dev, mailbox);
874255932Salfred	if (err) {
875255932Salfred		mlx4_warn(dev, "mlx4_HW2SW_MPT failed (%d)\n", err);
876255932Salfred		return;
877255932Salfred	}
878255932Salfred	fmr->mr.enabled = MLX4_MR_EN_SW;
879219820Sjeff}
880219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
881219820Sjeff
882219820Sjeffint mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
883219820Sjeff{
884219820Sjeff	if (fmr->maps)
885219820Sjeff		return -EBUSY;
886219820Sjeff
887219820Sjeff	mlx4_mr_free(dev, &fmr->mr);
888255932Salfred	fmr->mr.enabled = MLX4_MR_DISABLED;
889219820Sjeff
890219820Sjeff	return 0;
891219820Sjeff}
892219820SjeffEXPORT_SYMBOL_GPL(mlx4_fmr_free);
893219820Sjeff
894219820Sjeffint mlx4_SYNC_TPT(struct mlx4_dev *dev)
895219820Sjeff{
896255932Salfred	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
897255932Salfred			MLX4_CMD_NATIVE);
898219820Sjeff}
899219820SjeffEXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
900