Searched refs:map (Results 226 - 250 of 2546) sorted by last modified time

1234567891011>>

/linux-master/fs/f2fs/
H A Df2fs.h3848 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
H A Ddata.c1454 struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1464 if (map->m_may_create &&
1468 if (map->m_next_pgofs)
1469 *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1470 if (map->m_next_extent)
1471 *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1476 struct f2fs_map_blocks *map, int flag)
1479 unsigned int maxblocks = map->m_len;
1480 pgoff_t pgoff = (pgoff_t)map->m_lblk;
1486 map
1453 f2fs_map_no_dnode(struct inode *inode, struct f2fs_map_blocks *map, struct dnode_of_data *dn, pgoff_t pgoff) argument
1475 f2fs_map_blocks_cached(struct inode *inode, struct f2fs_map_blocks *map, int flag) argument
1515 f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) argument
1763 struct f2fs_map_blocks map; local
1892 struct f2fs_map_blocks map; local
2045 f2fs_read_single_page(struct inode *inode, struct page *page, unsigned nr_pages, struct f2fs_map_blocks *map, struct bio **bio_ret, sector_t *last_block_in_bio, bool is_readahead) argument
2331 struct f2fs_map_blocks map; local
3805 struct f2fs_map_blocks map; local
3919 struct f2fs_map_blocks map; local
4155 struct f2fs_map_blocks map = {}; local
[all...]
/linux-master/drivers/vhost/
H A Dvdpa.c157 * a chance to clean up or reset the map to the desired
900 struct vhost_iotlb_map *map, u32 asid)
905 ops->dma_unmap(vdpa, asid, map->start, map->size);
907 iommu_unmap(v->domain, map->start, map->size);
915 struct vhost_iotlb_map *map; local
919 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
920 pinned = PFN_DOWN(map->size);
921 for (pfn = PFN_DOWN(map
899 vhost_vdpa_general_unmap(struct vhost_vdpa *v, struct vhost_iotlb_map *map, u32 asid) argument
937 struct vhost_iotlb_map *map; local
[all...]
/linux-master/drivers/vdpa/vdpa_user/
H A Dvduse_dev.c18 #include <linux/dma-map-ops.h>
1155 struct vhost_iotlb_map *map; local
1173 map = vhost_iotlb_itree_first(dev->domain->iotlb,
1175 if (map) {
1176 map_file = (struct vdpa_map_file *)map->opaque;
1179 entry.start = map->start;
1180 entry.last = map->last;
1181 entry.perm = map->perm;
1362 struct vhost_iotlb_map *map; local
1382 map
[all...]
H A Diova_domain.c49 struct vhost_iotlb_map *map; local
51 while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
52 map_file = (struct vdpa_map_file *)map->opaque;
55 vhost_iotlb_map_free(domain->iotlb, map);
63 struct vhost_iotlb_map *map; local
70 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
71 map = vhost_iotlb_itree_next(map, start, last)) {
72 map_file = (struct vdpa_map_file *)map
92 struct vhost_iotlb_map *map; local
106 struct vduse_bounce_map *map; local
126 struct vduse_bounce_map *map; local
164 struct vduse_bounce_map *map; local
194 struct vhost_iotlb_map *map; local
213 struct vduse_bounce_map *map; local
232 struct vduse_bounce_map *map; local
253 struct vduse_bounce_map *map; local
288 struct vduse_bounce_map *map; local
484 struct vhost_iotlb_map *map; local
577 struct vduse_bounce_map *map; local
[all...]
/linux-master/drivers/vdpa/vdpa_sim/
H A Dvdpa_sim.c16 #include <linux/dma-map-ops.h>
633 struct vhost_iotlb_map *map; local
647 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
648 map = vhost_iotlb_itree_next(map, start, last)) {
649 ret = vhost_iotlb_add_range(iommu, map->start,
650 map->last, map->addr, map
[all...]
/linux-master/drivers/vdpa/mlx5/net/
H A Dmlx5_vnet.c87 struct msi_map map; member in struct:mlx5_vq_restore_info
129 struct msi_map map; member in struct:mlx5_vdpa_virtqueue
519 void __iomem *uar_page = ndev->mvdev.res.uar->map;
546 void __iomem *uar_page = ndev->mvdev.res.uar->map;
909 if (mvq->map.virq) {
911 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index);
1394 err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0,
1400 mvq->map = ent->map;
1413 if (mvq->map
[all...]
/linux-master/drivers/irqchip/
H A Dirq-riscv-intc.c167 .map = riscv_intc_domain_map,
/linux-master/drivers/input/misc/
H A D88pm80x_onkey.c26 struct regmap *map; member in struct:pm80x_onkey_info
37 ret = regmap_read(info->map, PM800_STATUS_1, &val);
72 info->map = info->pm80x->regmap;
73 if (!info->map) {
110 regmap_update_bits(info->map, PM800_RTC_MISC4, PM800_LONG_ONKEY_EN,
113 regmap_update_bits(info->map, PM800_RTC_MISC3,
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c105 * initialized and uninitialized map values, we plus 1 to the actual value when
2441 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2442 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2482 &link_tbl->table.map,
2499 tbl->table.map);
3681 } map[] = { local
3704 for (i = 0; i < ARRAY_SIZE(map); i++)
3705 if (cqe_status == map[i].cqe_status) {
3706 wc->status = map[i].wc_status;
3756 * between initialized and uninitialized map value
5260 static const enum ib_qp_state map[] = { local
[all...]
H A Dhns_roce_device.h362 dma_addr_t map; member in struct:hns_roce_buf_list
1100 return buf->trunk_list[offset >> buf->trunk_shift].map +
/linux-master/drivers/infiniband/hw/hfi1/
H A Dtid_rdma.c1111 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
1112 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
1214 "unexpected odd allocation cnt %u map 0x%x used %u",
1215 cnt, grp->map, grp->used);
1218 node->map = grp->map;
1221 grp->base, grp->map, grp->used, cnt);
1228 * modifying grp->map. This is done as follows, being cogizant of the lists
1315 if (node->map & BIT(i) || cnt >= node->cnt) {
1333 * (b) the group map show
[all...]
/linux-master/samples/vfio-mdev/
H A Dmbochs.c445 char *map; local
482 map = kmap(pg);
484 memcpy(map + poff, buf, count);
486 memcpy(buf, map + poff, count);
/linux-master/include/linux/
H A Dkvm_host.h307 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) argument
309 return !!map->hva;
684 struct hlist_head map[] __counted_by(nr_rt_entries);
1305 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
1306 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
1338 * @gpa: guest physical address to map.
1355 * @hva: userspace virtual address to map.
H A Dacpi.h210 void __acpi_unmap_table(void __iomem *map, unsigned long size);
1002 static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) argument
/linux-master/fs/fuse/
H A Ddev.c686 * Get another pagefull of userspace buffer, and map it to kernel
2348 struct fuse_backing_map map; local
2356 if (copy_from_user(&map, argp, sizeof(map)))
2359 return fuse_backing_open(fud->fc, &map);
/linux-master/fs/ext4/
H A Dxattr.c988 /* New ea_inode, inode map, block bitmap, group descriptor. */
1005 * Old ea_inode, inode map, block bitmap, group descriptor.
1396 struct ext4_map_blocks map; local
1397 map.m_lblk = block += ret;
1398 map.m_len = max_blocks -= ret;
1400 ret = ext4_map_blocks(handle, ea_inode, &map,
H A Dinode.c246 * status. Erase i_data so that it becomes a valid empty block map.
379 struct ext4_map_blocks *map)
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
388 "(length %d)", (unsigned long) map->m_lblk,
389 map->m_pblk, map->m_len);
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
377 __check_block_validity(struct inode *inode, const char *func, unsigned int line, struct ext4_map_blocks *map) argument
414 ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, struct ext4_map_blocks *es_map, struct ext4_map_blocks *map, int flags) argument
479 ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) argument
756 struct ext4_map_blocks map; local
821 struct ext4_map_blocks map; local
1537 struct ext4_map_blocks map; member in struct:mpage_da_data
1686 ext4_da_map_blocks(struct inode *inode, sector_t iblock, struct ext4_map_blocks *map, struct buffer_head *bh) argument
1803 struct ext4_map_blocks map; local
1902 struct ext4_map_blocks *map = &mpd->map; local
2134 struct ext4_map_blocks *map = &mpd->map; local
2204 struct ext4_map_blocks *map = &mpd->map; local
3216 ext4_set_iomap(struct inode *inode, struct iomap *iomap, struct ext4_map_blocks *map, loff_t offset, loff_t length, unsigned int flags) argument
3274 ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, unsigned int flags) argument
3340 struct ext4_map_blocks map; local
3435 struct ext4_map_blocks map; local
[all...]
H A Dextents.c3291 * by @map as split_flags indicates
3303 struct ext4_map_blocks *map,
3314 int allocated = map->m_len;
3322 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3331 map->m_lblk + map->m_len, split_flag1, flags1);
3335 allocated = ee_len - (map->m_lblk - ee_block);
3341 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3348 (unsigned long) map
3300 ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path **ppath, struct ext4_map_blocks *map, int split_flag, int flags) argument
3391 ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags) argument
3649 ext4_split_convert_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags) argument
3691 ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath) argument
3754 convert_initialized_extent(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, unsigned int *allocated) argument
3825 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path **ppath, int flags, unsigned int allocated, ext4_fsblk_t newblock) argument
3980 get_implied_cluster_alloc(struct super_block *sb, struct ext4_map_blocks *map, struct ext4_extent *ex, struct ext4_ext_path *path) argument
4126 ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) argument
4457 struct ext4_map_blocks map; local
4821 struct ext4_map_blocks map; local
5973 struct ext4_map_blocks map; local
5995 struct ext4_map_blocks map; local
6087 struct ext4_map_blocks map; local
[all...]
/linux-master/drivers/vfio/
H A Dvfio_iommu_type1.c598 * the iommu can only map chunks of consecutive pfns anyway, so get the
1135 * That way the user will be able to map/unmap buffers whose size/
1138 * to map the buffer.
1493 * Check dma map request is within a valid iova range
1546 struct vfio_iommu_type1_dma_map *map)
1548 bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
1549 dma_addr_t iova = map->iova;
1550 unsigned long vaddr = map->vaddr;
1551 size_t size = map->size;
1557 if (map
1545 vfio_dma_do_map(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_map *map) argument
2814 struct vfio_iommu_type1_dma_map map; local
[all...]
/linux-master/drivers/vfio/pci/
H A Dvfio_pci_intrs.c430 struct msi_map map; local
441 map = pci_msix_alloc_irq_at(pdev, vector, NULL);
444 return map.index < 0 ? map.index : map.virq;
H A Dvfio_pci_config.c1519 u8 *map = vdev->pci_config_map; local
1578 if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
1582 __func__, pos + i, map[pos + i], cap);
1587 memset(map + pos, cap, len);
1612 u8 *map = vdev->pci_config_map; local
1666 if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
1670 __func__, epos + i, map[epos + i], ecap);
1676 * up to 0xFE we'll need to up this to a two-byte, byte map.
1680 memset(map + epos, ecap, len);
1732 u8 *map, *vconfi local
[all...]
/linux-master/drivers/vfio/pci/mlx5/
H A Dcmd.c1147 mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
1750 mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
/linux-master/drivers/staging/media/ipu3/
H A Dipu3-v4l2.c325 return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
341 imgu_dmamap_unmap(imgu, &buf->map);
370 imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
/linux-master/drivers/staging/media/atomisp/pci/
H A Dsh_css.c1592 map_sp_threads(struct ia_css_stream *stream, bool map) argument
1600 IA_CSS_ENTER_PRIVATE("stream = %p, map = %s",
1601 stream, map ? "true" : "false");
1611 ia_css_pipeline_map(main_pipe->pipe_num, map);
1630 ia_css_pipeline_map(capture_pipe->pipe_num, map);
1634 ia_css_pipeline_map(copy_pipe->pipe_num, map);
1636 /* DH regular multi pipe - not continuous mode: map the next pipes too */
1641 ia_css_pipeline_map(stream->pipes[i]->pipe_num, map);
9048 void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map) argument
9066 /* map require
[all...]

Completed in 804 milliseconds

1234567891011>>