Searched refs:map (Results 51 - 75 of 2546) sorted by last modified time

1234567891011>>

/linux-master/drivers/pinctrl/
H A Dcore.c967 const struct pinctrl_map *map)
973 state = find_state(p, map->name);
975 state = create_state(p, map->name);
979 if (map->type == PIN_MAP_TYPE_DUMMY_STATE)
986 setting->type = map->type;
992 get_pinctrl_dev_from_devname(map->ctrl_dev_name);
996 if (!strcmp(map->ctrl_dev_name, map->dev_name))
1002 dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
1003 map
966 add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev, const struct pinctrl_map *map) argument
1054 const struct pinctrl_map *map; local
1503 pinctrl_unregister_mappings(const struct pinctrl_map *map) argument
1835 const struct pinctrl_map *map; local
[all...]
H A Ddevicetree.c20 * @map: the mapping table entries
26 struct pinctrl_map *map; member in struct:pinctrl_dt_map
31 struct pinctrl_map *map, unsigned int num_maps)
36 kfree_const(map[i].dev_name);
37 map[i].dev_name = NULL;
43 ops->dt_free_map(pctldev, map, num_maps);
46 kfree(map);
55 pinctrl_unregister_mappings(dt_map->map);
57 dt_free_map(dt_map->pctldev, dt_map->map,
67 struct pinctrl_map *map, unsigne
30 dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned int num_maps) argument
65 dt_remember_or_free_map(struct pinctrl *p, const char *statename, struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned int num_maps) argument
118 struct pinctrl_map *map; local
184 struct pinctrl_map *map; local
[all...]
/linux-master/scripts/
H A DMakefile.build258 System.map $(OBJDUMP) > $@
/linux-master/fs/erofs/
H A Dinternal.h214 EROFS_NO_KMAP, /* don't map the buffer */
215 EROFS_KMAP, /* use kmap_local_page() to map the buffer */
360 /* Used to map the whole extent if non-negligible data is requested for LZMA */
362 /* Used to map tail extent for tailpacking inline or fragment pcluster */
413 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
464 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
H A Dfscache.c262 struct erofs_map_blocks map; local
268 map.m_la = pos;
269 ret = erofs_map_blocks(inode, &map);
273 if (map.m_flags & EROFS_MAP_META) {
281 offset = erofs_blkoff(sb, map.m_pa);
282 blknr = erofs_blknr(sb, map.m_pa);
283 size = map.m_llen;
301 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
310 count = min_t(size_t, map.m_llen - (pos - map
[all...]
/linux-master/drivers/irqchip/
H A Dirq-gic-v3-its.c287 struct its_vlpi_map *map = get_vlpi_map(d); local
288 if (map)
289 vpe = map->vpe;
312 struct its_vlpi_map *map = get_vlpi_map(d); local
313 if (map)
314 vpe = map->vpe;
832 * VPT is empty on map. This is why we never advertise PTZ.
917 struct its_vlpi_map *map; local
919 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
928 return valid_vpe(its, map
935 struct its_vlpi_map *map; local
953 struct its_vlpi_map *map; local
1279 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); local
1293 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); local
1423 struct its_vlpi_map *map = get_vlpi_map(d); local
1481 struct its_vlpi_map *map = get_vlpi_map(d); local
1517 struct its_vlpi_map *map; local
1910 struct its_vlpi_map *map; local
[all...]
/linux-master/drivers/i2c/
H A Di2c-core-base.c745 /* This is a permissive address validity check, I2C address map constraints
1433 .map = i2c_host_notify_irq_map,
/linux-master/drivers/dma/xilinx/
H A Dxdma.c1278 xdev->dma_dev.filter.map = pdata->device_map;
/linux-master/arch/riscv/mm/
H A Dinit.c20 #include <linux/dma-map-ops.h>
211 * map the kernel in the linear mapping as read-only: we do not want
225 * at worst, we map the linear mapping with PMD mappings.
987 * map the allocated physical pages since the linear mapping does not exist yet.
1128 * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset
1178 * us to reach paging_init(). We map all memory banks later
1196 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
/linux-master/net/mac80211/
H A Dmlme.c5931 new_active_links = sdata->u.mgd.ttlm_info.map &
5933 new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
5974 sdata_info(sdata, "Invalid advertised T2L map direction\n");
6002 * not advertise a TID-to-link mapping that does not map all TIDs to the
6011 ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
6012 if (!ttlm_info->map) {
6014 "Invalid advertised T2L map for TID 0\n");
6021 u16 map = ieee80211_get_ttlm(map_size, pos); local
6023 if (map != ttlm_info->map) {
6439 u16 new_active_links, new_dormant_links, new_suspended_links, map = 0; local
6492 __le16 map; local
6690 u16 map; local
[all...]
/linux-master/mm/
H A Dhugetlb.c548 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, argument
555 nrg = get_file_region_entry_from_cache(map, from, to);
556 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
558 coalesce_file_region(map, nrg);
691 * map. Regions will be taken from the cache to fill in this range.
699 * Return the number of new huge pages added to the map. This number is greater
754 * Examine the existing reserve map and determine how many
758 * map to add the specified range [f, t). region_chg does
760 * map. A number of new file_region structures is added to the cache as a
769 * reservation map fo
1163 set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) argument
[all...]
/linux-master/include/linux/
H A Dmm.h179 * Default maximum number of active map areas, this limits the number of vmas
2541 * Whether we should manually check if we can map individual PTEs writable,
3956 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
/linux-master/fs/ntfs3/
H A Dntfs_fs.h482 bool are_bits_clear(const void *map, size_t bit, size_t nbits);
483 bool are_bits_set(const void *map, size_t bit, size_t nbits);
484 size_t get_set_bits_ex(const void *map, size_t bit, size_t nbits);
855 void ntfs_bitmap_set_le(void *map, unsigned int start, int len);
856 void ntfs_bitmap_clear_le(void *map, unsigned int start, int len);
/linux-master/drivers/net/ethernet/marvell/octeontx2/af/
H A Drvu_npc.c81 u32 map; local
85 map = pkind->pfchan_map[i];
86 if (((map >> 16) & 0x3F) == pf)
2060 * packet. And map this action to a counter to count dropped
2373 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, argument
2382 index = find_next_zero_bit(map, size, start);
2387 next = find_next_bit(map, end, index);
2404 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) argument
2413 index = find_next_zero_bit(map, end, start);
2417 next = find_next_bit(map, en
[all...]
/linux-master/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt.c1356 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; local
1359 if (test_bit(idx, map->agg_idx_bmap))
1360 idx = find_first_zero_bit(map->agg_idx_bmap,
1362 __set_bit(idx, map->agg_idx_bmap);
1363 map->agg_id_tbl[agg_id] = idx;
1369 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; local
1371 __clear_bit(idx, map->agg_idx_bmap);
1376 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; local
1378 return map->agg_id_tbl[agg_id];
3275 struct bnxt_tpa_idx_map *map; local
[all...]
/linux-master/drivers/net/ethernet/broadcom/
H A Db44.c629 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 struct ring_info *src_map, *map; local
646 map = &bp->rx_buffers[dest_idx];
685 map->skb = skb;
686 map->mapping = mapping;
766 dma_addr_t map = rp->mapping; local
770 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
804 dma_unmap_single(bp->sdev->dma_dev, map,
/linux-master/drivers/net/dsa/mv88e6xxx/
H A Dport.h391 int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
462 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
/linux-master/drivers/md/
H A Ddm.c83 struct dm_table *map; member in struct:clone_info
416 struct dm_table *map; local
421 map = dm_get_live_table(md, srcu_idx);
422 if (!map || !dm_table_get_size(map))
426 if (map->num_targets != 1)
429 ti = dm_table_get_target(map, 0);
682 * function to access the md->map field, and make sure they call
690 return srcu_dereference(md->map, &md->io_barrier);
712 return rcu_dereference(md->map);
1218 struct dm_table *map; variable in typeref:struct:dm_table
1760 init_clone_info(struct clone_info *ci, struct dm_io *io, struct dm_table *map, struct bio *bio, bool is_abnormal) argument
1780 dm_split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) argument
1853 struct dm_table *map; local
2294 struct dm_table *map = rcu_dereference_protected(md->map, 1); local
2487 struct dm_table *map; local
2639 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); local
2711 __dm_suspend(struct mapped_device *md, struct dm_table *map, unsigned int suspend_flags, unsigned int task_state, int dmf_suspended_flag) argument
2821 struct dm_table *map = NULL; local
2860 __dm_resume(struct mapped_device *md, struct dm_table *map) argument
2887 struct dm_table *map = NULL; local
2928 struct dm_table *map = NULL; local
2959 struct dm_table *map; local
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_huc.c139 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map, argument
142 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
144 huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
145 huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
146 huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
147 huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
149 huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
150 huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
/linux-master/drivers/gpu/drm/
H A Ddrm_gem_atomic_helper.c83 * // access shadow buffer via shadow_plane_state->map
87 * struct &drm_shadow_plane_state.map. The mappings are valid while the state
109 * // access shadow buffer via shadow_plane_state->map
349 * space and stores them in struct &drm_shadow_plane_state.map. The first data
365 return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
387 drm_gem_fb_vunmap(fb, shadow_plane_state->map);
/linux-master/drivers/gpio/
H A Dgpio-tegra186.c980 irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio,
981 sizeof(*irq->map), GFP_KERNEL);
982 if (!irq->map)
989 irq->map[offset + j] = irq->parents[port->bank];
/linux-master/arch/arc/kernel/
H A Dintc-arcv2.c156 .map = arcv2_irq_map,
/linux-master/fs/btrfs/tests/
H A Dextent-map-tests.c20 while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
21 node = rb_first_cached(&em_tree->map);
43 * Suppose that no extent map has been loaded into memory yet, there is a file
125 test_err("case1 [%llu %llu]: no extent map returned",
218 test_err("case2 [0 1K]: no extent map returned");
285 test_err("case3 [%llu %llu): no extent map returned",
312 * Suppose that no extent map has been loaded into memory yet.
409 test_err("case4 [%llu %llu): no extent map returned",
431 * Suppose that no extent map has been loaded into memory yet.
489 test_err("cannot add extent map [
888 struct btrfs_chunk_map *map; local
[all...]
/linux-master/fs/btrfs/
H A Dscrub.c1292 struct btrfs_chunk_map *map, u64 *offset,
1298 const int data_stripes = nr_data_stripes(map);
1300 last_offset = (physical - map->stripes[num].physical) * data_stripes;
1315 rot = stripe_nr % map->num_stripes;
1318 stripe_index = rot % map->num_stripes;
1926 struct btrfs_chunk_map *map,
1938 const int data_stripes = nr_data_stripes(map);
1963 stripe_index = (i + rot) % map->num_stripes;
1964 physical = map->stripes[stripe_index].physical +
1970 map
1291 get_raid56_logic_offset(u64 physical, int num, struct btrfs_chunk_map *map, u64 *offset, u64 *stripe_start) argument
1923 scrub_raid56_parity_stripe(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, struct btrfs_block_group *bg, struct btrfs_chunk_map *map, u64 full_stripe_start) argument
2093 scrub_simple_mirror(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct btrfs_chunk_map *map, u64 logical_start, u64 logical_length, struct btrfs_device *device, u64 physical, int mirror_num) argument
2156 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map) argument
2165 simple_stripe_get_logical(struct btrfs_chunk_map *map, struct btrfs_block_group *bg, int stripe_index) argument
2182 simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index) argument
2192 scrub_simple_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct btrfs_chunk_map *map, struct btrfs_device *device, int stripe_index) argument
2225 scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct btrfs_chunk_map *map, struct btrfs_device *scrub_dev, int stripe_index) argument
2400 struct btrfs_chunk_map *map; local
[all...]
H A Dinode.c2914 * The new extent is pinned in the extent map, and we don't want
3222 * we don't mess with the extent map tree in the NOCOW case, but
7268 * than one extent map, if at this point has already
7269 * created an ordered extent for a previous extent map
7271 * concurrent write against that previous extent map's
7353 static int btrfs_get_blocks_direct_write(struct extent_map **map, argument
7361 struct extent_map *em = *map;
7406 /* Our caller expects us to free the input extent map. */
7408 *map = NULL;
7423 *map
10693 struct btrfs_chunk_map *map = NULL; local
[all...]

Completed in 365 milliseconds

1234567891011>>