Lines Matching defs:dm

7 #include "dm.h"
12 static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
15 struct mlx5_core_dev *dev = dm->dev;
45 spin_lock(&dm->lock);
46 page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
52 bitmap_set(dm->memic_alloc_pages,
55 spin_unlock(&dm->lock);
65 spin_lock(&dm->lock);
66 bitmap_clear(dm->memic_alloc_pages,
68 spin_unlock(&dm->lock);
87 void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
90 struct mlx5_core_dev *dev = dm->dev;
108 spin_lock(&dm->lock);
109 bitmap_clear(dm->memic_alloc_pages,
111 spin_unlock(&dm->lock);
114 void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
118 struct mlx5_core_dev *dev = dm->dev;
128 static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
133 struct mlx5_core_dev *dev = dm->dev;
165 struct mlx5_ib_dm_memic *dm =
167 struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
169 mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
170 kfree(dm);
192 static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
197 op_entry = xa_load(&dm->ops, op);
211 struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
226 mutex_lock(&dm->ops_xa_lock);
227 err = map_existing_op(dm, op, attrs);
235 err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
242 op_entry->dm = dm;
245 MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
248 mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
253 kref_get(&dm->ref);
259 err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
262 mutex_unlock(&dm->ops_xa_lock);
269 mutex_unlock(&dm->ops_xa_lock);
278 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
279 struct mlx5_ib_dm_memic *dm;
288 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
289 if (!dm)
292 dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
293 dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
294 dm->base.ibdm.device = ctx->device;
296 kref_init(&dm->ref);
297 xa_init(&dm->ops);
298 mutex_init(&dm->ops_xa_lock);
299 dm->req_length = attr->length;
301 err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
302 dm->base.size, attr->alignment);
304 kfree(dm);
308 address = dm->base.dev_addr & PAGE_MASK;
309 err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
310 dm->base.size, address);
312 mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
313 kfree(dm);
317 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
323 start_offset = dm->base.dev_addr & ~PAGE_MASK;
330 return &dm->base.ibdm;
333 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
359 struct mlx5_ib_dm_icm *dm;
385 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
386 if (!dm)
389 dm->base.type = type;
390 dm->base.ibdm.device = ctx->device;
398 dm->base.size = act_size;
403 &dm->base.dev_addr, &dm->obj_id);
408 &dm->base.dev_addr, sizeof(dm->base.dev_addr));
410 mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
412 dm->base.dev_addr, dm->obj_id);
415 return &dm->base.ibdm;
417 kfree(dm);
451 static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
456 mutex_lock(&dm->ops_xa_lock);
457 xa_for_each(&dm->ops, idx, entry) {
458 xa_erase(&dm->ops, idx);
461 mutex_unlock(&dm->ops_xa_lock);
464 static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
466 dm_memic_remove_ops(dm);
467 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
471 struct mlx5_ib_dm_icm *dm)
473 enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
474 struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
477 err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
478 dm->base.dev_addr, dm->obj_id);
480 kfree(dm);
489 struct mlx5_ib_dm *dm = to_mdm(ibdm);
491 switch (dm->type) {
510 struct mlx5_ib_dm *dm = to_mdm(ibdm);
516 if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
551 mdm = op_entry->dm;
552 mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,