Searched refs:bmap (Results 26 - 50 of 122) sorted by relevance

12345

/linux-master/tools/testing/selftests/kvm/
H A Ddirty_log_test.c564 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap) argument
577 TEST_ASSERT(test_bit_le(page, bmap),
583 if (__test_and_clear_bit_le(page, bmap)) {
707 unsigned long *bmap; local
754 bmap = bitmap_zalloc(host_num_pages);
801 bmap, host_num_pages,
819 vm_dirty_log_verify(mode, bmap);
841 free(bmap);
/linux-master/fs/jfs/
H A Djfs_incore.h190 struct bmap *bmap; /* incore bmap descriptor */ member in struct:jfs_sb_info
H A Djfs_dmap.h29 #define BMAPBLKNO 0 /* lblkno of bmap within the map */
121 #define BLKTOAG(b,sbi) ((b) >> ((sbi)->bmap->db_agl2size))
127 ((s64)(a) << (JFS_SBI((ip)->i_sb)->bmap->db_agl2size))
230 struct bmap { struct
H A Djfs_extent.c305 struct bmap *bmp = sbi->bmap;
H A Dresize.c25 /* convert block number to bmap file page number */
58 struct bmap *bmp = sbi->bmap;
284 /* number of data pages of new bmap file:
296 * at end of bmap by mkfs() or previous extendfs();
320 * update bmap control page;
341 /* compute number of data pages of current bmap file */
349 * grow bmap file for the new map pages required:
352 * bmap file only grows sequentially, i.e., both data pages
355 * by washing away bmap fil
[all...]
/linux-master/drivers/crypto/marvell/octeontx/
H A Dotx_cptpf_ucode.h114 unsigned long *bmap; /* attached engines bitmap */ member in struct:otx_cpt_engs_rsvd
/linux-master/drivers/crypto/marvell/octeontx2/
H A Dotx2_cptpf_ucode.h109 unsigned long *bmap; /* attached engines bitmap */ member in struct:otx2_cpt_engs_rsvd
/linux-master/drivers/net/ethernet/marvell/octeontx2/af/
H A Dmcs_cnf10kb.c171 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
204 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
H A Drvu_npc_hash.h180 unsigned long *bmap; member in struct:npc_exact_table::__anon13
188 unsigned long *bmap; member in struct:npc_exact_table::__anon14
H A Drvu_npc_hash.c477 if (test_bit(hash + i * depth, table->mem_table.bmap))
480 set_bit(hash + i * depth, table->mem_table.bmap);
493 bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
560 idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
564 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
569 set_bit(idx, table->cam_table.bmap);
919 if (!test_bit(index, table->cam_table.bmap)) {
927 clear_bit(index, table->cam_table.bmap);
933 if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
941 clear_bit(index + ways * depth, table->mem_table.bmap);
[all...]
H A Drvu.c132 if (!rsrc->bmap)
135 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
139 __set_bit(id, rsrc->bmap);
148 if (!rsrc->bmap)
151 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
155 bitmap_set(rsrc->bmap, start, nrsrc);
161 if (!rsrc->bmap)
166 bitmap_clear(rsrc->bmap, start, nrsrc);
173 if (!rsrc->bmap)
176 start = bitmap_find_next_zero_area(rsrc->bmap, rsr
[all...]
H A Drvu_npa.c214 unsigned long *bmap; local
228 bmap = pfvf->pool_bmap;
235 bmap = pfvf->aura_bmap;
242 if (!test_bit(id, bmap))
H A Dcgx.c347 set_bit(0, lmac->mac_to_index_bmap.bmap);
385 if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
641 if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
900 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
902 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
905 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
907 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
910 if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
916 if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
1670 set_bit(0, lmac->mac_to_index_bmap.bmap);
[all...]
H A Drvu_npc.c1835 bitmap_free(mcam->bmap);
1842 kfree(mcam->counters.bmap);
1889 mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
1890 if (!mcam->bmap)
1968 kfree(mcam->counters.bmap);
1974 bitmap_free(mcam->bmap);
2182 kfree(pkind->rsrc.bmap);
2304 __set_bit(entry, mcam->bmap);
2320 __clear_bit(entry, mcam->bmap);
2455 fcnt = npc_mcam_get_free_count(mcam->bmap,
2497 unsigned long *bmap; local
[all...]
H A Dmcs.c501 __set_bit(flow_id, mcs->rx.flow_ids.bmap);
502 __set_bit(flow_id, mcs->tx.flow_ids.bmap);
514 __set_bit(secy_id, mcs->rx.secy.bmap);
515 __set_bit(secy_id, mcs->tx.secy.bmap);
569 if (!rsrc->bmap)
572 rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
576 bitmap_set(rsrc->bmap, rsrc_id, 1);
822 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
854 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
/linux-master/include/linux/
H A Dhid.h1046 unsigned long *bmap = NULL; local
1051 bmap = input->absbit;
1055 bmap = input->relbit;
1059 bmap = input->keybit;
1063 bmap = input->ledbit;
1067 bmap = input->mscbit;
1072 if (unlikely(c > limit || !bmap)) {
1082 *bit = bmap;
/linux-master/fs/xfs/libxfs/
H A Dxfs_rmap.c2721 struct xfs_bmbt_irec *bmap)
2726 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
2728 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
2730 bmap->br_startoff,
2731 bmap->br_blockcount,
2732 bmap->br_state);
2739 ri->ri_bmap = *bmap;
2817 struct xfs_bmbt_irec bmap; local
2822 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2823 bmap
2716 __xfs_rmap_add( struct xfs_trans *tp, enum xfs_rmap_intent_type type, uint64_t owner, int whichfork, struct xfs_bmbt_irec *bmap) argument
2839 struct xfs_bmbt_irec bmap; local
[all...]
/linux-master/fs/bfs/
H A Dfile.c199 .bmap = bfs_bmap,
/linux-master/fs/fat/
H A Dcache.c312 unsigned long *mapped_blocks, sector_t *bmap)
324 *bmap = fat_clus_to_blknr(sbi, cluster) + offset;
310 fat_get_mapped_cluster(struct inode *inode, sector_t sector, sector_t last_block, unsigned long *mapped_blocks, sector_t *bmap) argument
/linux-master/fs/nilfs2/
H A Dnilfs.h20 #include "bmap.h"
73 NILFS_BMAP_I(const struct nilfs_bmap *bmap) argument
75 return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
92 NILFS_I_BMAP, /* has bmap and btnode_cache */
H A Dbtree.c21 static void __nilfs_btree_init(struct nilfs_bmap *bmap);
489 * -EINVAL (broken bmap) to notify bmap layer of fatal
649 * @btree: bmap struct of btree
787 ret = -EINVAL; /* Notify bmap layer of metadata corruption */
1861 * @bmap:
2406 static void __nilfs_btree_init(struct nilfs_bmap *bmap) argument
2408 bmap->b_ops = &nilfs_btree_ops;
2409 bmap->b_nchildren_per_block =
2410 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
2413 nilfs_btree_init(struct nilfs_bmap *bmap) argument
2428 nilfs_btree_init_gc(struct nilfs_bmap *bmap) argument
[all...]
H A Dioctl.c22 #include "bmap.h"
452 struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; local
458 ret = nilfs_bmap_lookup_at_level(bmap,
748 struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; local
755 ret = nilfs_bmap_lookup_at_level(bmap,
779 ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset,
/linux-master/fs/reiserfs/
H A Dbitmap.c64 unsigned int bmap, offset; local
74 get_bit_address(s, block, &bmap, &offset);
99 if (bmap >= bmap_count) {
102 block, bmap);
116 * Searches in journal structures for a given block number (bmap, off).
120 static inline int is_block_in_journal(struct super_block *s, unsigned int bmap, argument
125 if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) {
156 PROC_INFO_INC(s, scan_bitmap.bmap);
/linux-master/tools/testing/selftests/kvm/aarch64/
H A Dpage_fault_test.c455 unsigned long *bmap; local
460 bmap = bitmap_zalloc(size / getpagesize());
461 kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
462 first_page_dirty = test_bit(host_pg_nr, bmap);
463 free(bmap);
/linux-master/fs/ecryptfs/
H A Dmmap.c529 int ret = bmap(lower_inode, &block);
555 .bmap = ecryptfs_bmap,

Completed in 624 milliseconds

12345