Lines Matching refs:page_list
534 int start_index, int npages, u64 *page_list)
551 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
560 int start_index, int npages, u64 *page_list)
575 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
580 page_list += chunk;
588 int start_index, int npages, u64 *page_list)
611 inbox[i + 2] = cpu_to_be64(page_list[i] |
621 page_list += chunk;
627 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
634 u64 *page_list;
638 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
639 if (!page_list)
644 page_list[i] = buf->direct.map + (i << buf->page_shift);
646 page_list[i] = buf->page_list[i].map;
648 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
650 kfree(page_list);
717 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
731 /* Trust the user not to pass misaligned data in page_list */
734 if (page_list[i] & ~page_mask)
744 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
750 err = mlx4_check_fmr(fmr, page_list, npages, iova);
769 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);