Lines Matching refs:map

246 	 * status. Erase i_data so that it becomes a valid empty block map.
379 struct ext4_map_blocks *map)
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
388 "(length %d)", (unsigned long) map->m_lblk,
389 map->m_pblk, map->m_len);
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
417 struct ext4_map_blocks *map,
422 map->m_flags = 0;
432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
442 if (es_map->m_lblk != map->m_lblk ||
443 es_map->m_flags != map->m_flags ||
444 es_map->m_pblk != map->m_pblk) {
449 es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 map->m_len, map->m_pblk, map->m_flags,
470 * pre-allocated and unwritten, the resulting @map is marked as unwritten.
471 * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
474 * that case, @map is returned as unmapped but we still do fill map->m_len to
475 * indicate the length of a hole starting at map->m_lblk.
480 struct ext4_map_blocks *map, int flags)
488 memcpy(&orig_map, map, sizeof(*map));
491 map->m_flags = 0;
493 flags, map->m_len, (unsigned long) map->m_lblk);
498 if (unlikely(map->m_len > INT_MAX))
499 map->m_len = INT_MAX;
502 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
507 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
509 map->m_pblk = ext4_es_pblock(&es) +
510 map->m_lblk - es.es_lblk;
511 map->m_flags |= ext4_es_is_written(&es) ?
513 retval = es.es_len - (map->m_lblk - es.es_lblk);
514 if (retval > map->m_len)
515 retval = map->m_len;
516 map->m_len = retval;
518 map->m_pblk = 0;
519 map->m_flags |= ext4_es_is_delayed(&es) ?
521 retval = es.es_len - (map->m_lblk - es.es_lblk);
522 if (retval > map->m_len)
523 retval = map->m_len;
524 map->m_len = retval;
533 ext4_map_blocks_es_recheck(handle, inode, map,
551 retval = ext4_ext_map_blocks(handle, inode, map, 0);
553 retval = ext4_ind_map_blocks(handle, inode, map, 0);
558 if (unlikely(retval != map->m_len)) {
561 "%lu: retval %d != map->m_len %d",
562 inode->i_ino, retval, map->m_len);
566 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
570 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
571 map->m_lblk + map->m_len - 1))
573 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
574 map->m_pblk, status);
579 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
580 ret = check_block_validity(inode, map);
595 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
608 map->m_flags &= ~EXT4_MAP_FLAGS;
623 retval = ext4_ext_map_blocks(handle, inode, map, flags);
625 retval = ext4_ind_map_blocks(handle, inode, map, flags);
627 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
640 if (unlikely(retval != map->m_len)) {
643 "%lu: retval %d != map->m_len %d",
644 inode->i_ino, retval, map->m_len);
656 map->m_flags & EXT4_MAP_MAPPED &&
657 map->m_flags & EXT4_MAP_NEW) {
658 ret = ext4_issue_zeroout(inode, map->m_lblk,
659 map->m_pblk, map->m_len);
671 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
675 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
679 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
680 map->m_lblk + map->m_len - 1))
682 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
683 map->m_pblk, status);
688 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
689 ret = check_block_validity(inode, map);
698 if (map->m_flags & EXT4_MAP_NEW &&
699 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
704 (loff_t)map->m_lblk << inode->i_blkbits;
705 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
717 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
718 map->m_flags & EXT4_MAP_MAPPED))
719 ext4_fc_track_range(handle, inode, map->m_lblk,
720 map->m_lblk + map->m_len - 1);
756 struct ext4_map_blocks map;
762 map.m_lblk = iblock;
763 map.m_len = bh->b_size >> inode->i_blkbits;
765 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
768 map_bh(bh, inode->i_sb, map.m_pblk);
769 ext4_update_bh_state(bh, map.m_flags);
770 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
774 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
812 /* Maximum number of blocks we map for direct IO at once. */
821 struct ext4_map_blocks map;
831 map.m_lblk = block;
832 map.m_len = 1;
833 err = ext4_map_blocks(handle, inode, &map, map_flags);
841 return sb_find_get_block(inode->i_sb, map.m_pblk);
843 bh = sb_getblk(inode->i_sb, map.m_pblk);
846 if (map.m_flags & EXT4_MAP_NEW) {
1526 unsigned int can_map:1; /* Can writepages call map blocks? */
1533 * Extent to map - this can be after first_page because that can be
1537 struct ext4_map_blocks map;
1687 struct ext4_map_blocks *map,
1696 memcpy(&orig_map, map, sizeof(*map));
1702 map->m_flags = 0;
1703 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1704 (unsigned long) map->m_lblk);
1722 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1724 if (retval > map->m_len)
1725 retval = map->m_len;
1726 map->m_len = retval;
1728 map->m_flags |= EXT4_MAP_MAPPED;
1730 map->m_flags |= EXT4_MAP_UNWRITTEN;
1735 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1748 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1750 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1758 if (unlikely(retval != map->m_len)) {
1761 "%lu: retval %d != map->m_len %d",
1762 inode->i_ino, retval, map->m_len);
1766 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1768 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1769 map->m_pblk, status);
1777 retval = ext4_insert_delayed_block(inode, map->m_lblk);
1803 struct ext4_map_blocks map;
1809 map.m_lblk = iblock;
1810 map.m_len = 1;
1817 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1821 map_bh(bh, inode->i_sb, map.m_pblk);
1822 ext4_update_bh_state(bh, map.m_flags);
1886 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1894 * extent of buffers to map yet, the function returns 'true' immediately - the
1902 struct ext4_map_blocks *map = &mpd->map;
1907 /* So far no extent to map => we write the buffer right away */
1908 if (map->m_len == 0)
1914 if (map->m_len == 0) {
1915 /* We cannot map unless handle is started... */
1918 map->m_lblk = lblk;
1919 map->m_len = 1;
1920 map->m_flags = bh->b_state & BH_FLAGS;
1925 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1929 if (lblk == map->m_lblk + map->m_len &&
1930 (bh->b_state & BH_FLAGS) == map->m_flags) {
1931 map->m_len++;
1947 * accumulated extent of buffers to map or add buffers in the page to the
1948 * extent of buffers to map. The function returns 1 if the caller can continue
1950 * extent to map because we cannot extend it anymore. It can also return value
1970 /* Found extent to map? */
1971 if (mpd->map.m_len)
1981 if (mpd->map.m_len == 0) {
1997 * @mpd: description of extent to map, on return next extent to map
2006 * We map delalloc buffers to their physical location, clear unwritten bits.
2025 if (lblk < mpd->map.m_lblk)
2027 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2030 * Find next buffer in the folio to map.
2032 mpd->map.m_len = 0;
2033 mpd->map.m_flags = 0;
2039 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2045 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2070 * @mpd - description of extent to map, on return next extent to map
2074 * We map delalloc buffers to their physical location, clear unwritten bits,
2077 * mapped, we update @map to the next extent in the last page that needs
2092 start = mpd->map.m_lblk >> bpp_bits;
2093 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2095 pblock = mpd->map.m_pblk;
2123 mpd->map.m_len = 0;
2124 mpd->map.m_flags = 0;
2134 struct ext4_map_blocks *map = &mpd->map;
2138 trace_ext4_da_write_pages_extent(inode, map);
2160 if (map->m_flags & BIT(BH_Delay))
2163 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2166 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2175 BUG_ON(map->m_len == 0);
2180 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2184 * @mpd - extent to map
2192 * extent. Note that we need not map all the described range since allocation
2194 * cannot map more because we are limited by reserved transaction credits. On
2204 struct ext4_map_blocks *map = &mpd->map;
2214 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2238 (unsigned long long)map->m_lblk,
2239 (unsigned)map->m_len, -err);
2252 * extent to map
2257 } while (map->m_len);
2290 * iteration. This is called from ext4_writepages(). We map an extent of
2292 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2349 * IO immediately. If we cannot map blocks, we submit just already mapped
2350 * buffers in the page for IO and keep page dirty. When we can map blocks and
2382 mpd->map.m_len = 0;
2410 mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2414 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2459 if (mpd->map.m_len == 0)
2652 * We have two constraints: We find one extent to map and we
2678 if (!ret && mpd->map.m_len)
3066 * which will map the blocks, and start the I/O, but not
3214 struct ext4_map_blocks *map, loff_t offset,
3229 if (map->m_flags & EXT4_MAP_NEW)
3236 iomap->offset = (u64) map->m_lblk << blkbits;
3237 iomap->length = (u64) map->m_len << blkbits;
3239 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3252 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3254 iomap->addr = (u64) map->m_pblk << blkbits;
3257 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3259 iomap->addr = (u64) map->m_pblk << blkbits;
3262 } else if (map->m_flags & EXT4_MAP_DELAYED) {
3271 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3279 * Trim the mapping request to the maximum value that we can map at
3282 if (map->m_len > DIO_MAX_BLOCKS)
3283 map->m_len = DIO_MAX_BLOCKS;
3284 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3310 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3315 ret = ext4_map_blocks(handle, inode, map, m_flags);
3337 struct ext4_map_blocks map;
3349 map.m_lblk = offset >> blkbits;
3350 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3351 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3361 ret = ext4_map_blocks(NULL, inode, &map, 0);
3362 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3365 ret = ext4_iomap_alloc(inode, &map, flags);
3367 ret = ext4_map_blocks(NULL, inode, &map, 0);
3378 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3380 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3432 struct ext4_map_blocks map;
3450 map.m_lblk = offset >> blkbits;
3451 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3452 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3464 map.m_flags = 0;
3469 ret = ext4_map_blocks(NULL, inode, &map, 0);
3473 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
5619 * How many index blocks need to touch to map @lblocks logical blocks
5673 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.