• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/fs/xfs/

Lines Matching refs:mp

61 #define xfs_icsb_balance_counter(mp, a, b)		do { } while (0)
62 #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
63 #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
132 struct xfs_mount *mp)
134 uuid_t *uuid = &mp->m_sb.sb_uuid;
137 if (mp->m_flags & XFS_MOUNT_NOUUID)
143 mp->m_fsname);
172 mp->m_fsname);
178 struct xfs_mount *mp)
180 uuid_t *uuid = &mp->m_sb.sb_uuid;
183 if (mp->m_flags & XFS_MOUNT_NOUUID)
204 xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
209 spin_lock(&mp->m_perag_lock);
210 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
217 spin_unlock(&mp->m_perag_lock);
218 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
239 xfs_mount_t *mp)
244 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
245 spin_lock(&mp->m_perag_lock);
246 pag = radix_tree_delete(&mp->m_perag_tree, agno);
249 spin_unlock(&mp->m_perag_lock);
281 xfs_mount_t *mp,
303 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
311 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
415 xfs_mount_t *mp,
424 xfs_sb_t *sbp = &mp->m_sb;
433 pag = xfs_perag_get(mp, index);
445 pag->pag_mount = mp;
452 spin_lock(&mp->m_perag_lock);
453 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
455 spin_unlock(&mp->m_perag_lock);
460 spin_unlock(&mp->m_perag_lock);
468 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
469 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
471 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
472 mp->m_flags |= XFS_MOUNT_32BITINODES;
474 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
476 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
481 if (mp->m_maxicount) {
494 ino = XFS_AGINO_TO_INO(mp, index, agino);
500 pag = xfs_perag_get(mp, index);
508 pag = xfs_perag_get(mp, index);
521 pag = radix_tree_delete(&mp->m_perag_tree, index);
639 xfs_readsb(xfs_mount_t *mp, int flags)
646 ASSERT(mp->m_sb_bp == NULL);
647 ASSERT(mp->m_ddev_targp != NULL);
654 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
657 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
671 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
673 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
682 if (sector_size > mp->m_sb.sb_sectsize) {
685 sector_size, mp->m_sb.sb_sectsize);
694 if (sector_size < mp->m_sb.sb_sectsize) {
697 sector_size = mp->m_sb.sb_sectsize;
698 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR,
710 xfs_icsb_reinit_counters(mp);
712 mp->m_sb_bp = bp;
734 xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
736 mp->m_agfrotor = mp->m_agirotor = 0;
737 spin_lock_init(&mp->m_agirotor_lock);
738 mp->m_maxagi = mp->m_sb.sb_agcount;
739 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
740 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
741 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
742 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
743 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
744 mp->m_blockmask = sbp->sb_blocksize - 1;
745 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
746 mp->m_blockwmask = mp->m_blockwsize - 1;
748 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
749 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
750 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
751 mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
753 mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
754 mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
755 mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
756 mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
758 mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
759 mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
760 mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
761 mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
763 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
764 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
766 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
778 xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
782 xfs_sb_t *sbp = &mp->m_sb;
796 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
800 error = xfs_ialloc_pagi_init(mp, NULL, index);
803 pag = xfs_perag_get(mp, index);
814 spin_lock(&mp->m_sb_lock);
818 spin_unlock(&mp->m_sb_lock);
821 xfs_icsb_reinit_counters(mp);
830 xfs_update_alignment(xfs_mount_t *mp)
832 xfs_sb_t *sbp = &(mp->m_sb);
834 if (mp->m_dalign) {
839 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
840 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
841 if (mp->m_flags & XFS_MOUNT_RETERR) {
846 mp->m_dalign = mp->m_swidth = 0;
851 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
852 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
853 if (mp->m_flags & XFS_MOUNT_RETERR) {
856 xfs_fs_cmn_err(CE_WARN, mp,
858 mp->m_dalign, mp->m_swidth,
861 mp->m_dalign = 0;
862 mp->m_swidth = 0;
863 } else if (mp->m_dalign) {
864 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
866 if (mp->m_flags & XFS_MOUNT_RETERR) {
867 xfs_fs_cmn_err(CE_WARN, mp,
869 mp->m_dalign,
870 mp->m_blockmask +1);
873 mp->m_swidth = 0;
882 if (sbp->sb_unit != mp->m_dalign) {
883 sbp->sb_unit = mp->m_dalign;
884 mp->m_update_flags |= XFS_SB_UNIT;
886 if (sbp->sb_width != mp->m_swidth) {
887 sbp->sb_width = mp->m_swidth;
888 mp->m_update_flags |= XFS_SB_WIDTH;
891 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
892 xfs_sb_version_hasdalign(&mp->m_sb)) {
893 mp->m_dalign = sbp->sb_unit;
894 mp->m_swidth = sbp->sb_width;
904 xfs_set_maxicount(xfs_mount_t *mp)
906 xfs_sb_t *sbp = &(mp->m_sb);
916 do_div(icount, mp->m_ialloc_blks);
917 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
920 mp->m_maxicount = 0;
931 xfs_set_rw_sizes(xfs_mount_t *mp)
933 xfs_sb_t *sbp = &(mp->m_sb);
936 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
937 if (mp->m_flags & XFS_MOUNT_WSYNC) {
945 readio_log = mp->m_readio_log;
946 writeio_log = mp->m_writeio_log;
950 mp->m_readio_log = sbp->sb_blocklog;
952 mp->m_readio_log = readio_log;
954 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
956 mp->m_writeio_log = sbp->sb_blocklog;
958 mp->m_writeio_log = writeio_log;
960 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
967 xfs_set_inoalignment(xfs_mount_t *mp)
969 if (xfs_sb_version_hasalign(&mp->m_sb) &&
970 mp->m_sb.sb_inoalignmt >=
971 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
972 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
974 mp->m_inoalign_mask = 0;
979 if (mp->m_dalign && mp->m_inoalign_mask &&
980 !(mp->m_dalign & mp->m_inoalign_mask))
981 mp->m_sinoalign = mp->m_dalign;
983 mp->m_sinoalign = 0;
990 xfs_check_sizes(xfs_mount_t *mp)
996 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
997 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
1001 error = xfs_read_buf(mp, mp->m_ddev_targp,
1002 d - XFS_FSS_TO_BB(mp, 1),
1003 XFS_FSS_TO_BB(mp, 1), 0, &bp);
1013 if (mp->m_logdev_targp != mp->m_ddev_targp) {
1014 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
1015 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
1019 error = xfs_read_buf(mp, mp->m_logdev_targp,
1020 d - XFS_FSB_TO_BB(mp, 1),
1021 XFS_FSB_TO_BB(mp, 1), 0, &bp);
1039 struct xfs_mount *mp)
1044 mp->m_qflags = 0;
1050 if (mp->m_sb.sb_qflags == 0)
1052 spin_lock(&mp->m_sb_lock);
1053 mp->m_sb.sb_qflags = 0;
1054 spin_unlock(&mp->m_sb_lock);
1060 if (mp->m_flags & XFS_MOUNT_RDONLY)
1064 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
1067 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1068 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1072 xfs_fs_cmn_err(CE_ALERT, mp,
1082 xfs_default_resblks(xfs_mount_t *mp)
1093 resblks = mp->m_sb.sb_dblocks;
1111 xfs_mount_t *mp)
1113 xfs_sb_t *sbp = &(mp->m_sb);
1120 xfs_mount_common(mp, sbp);
1143 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
1149 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1150 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1151 mp->m_flags |= XFS_MOUNT_ATTR2;
1154 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1155 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1156 xfs_sb_version_removeattr2(&mp->m_sb);
1157 mp->m_update_flags |= XFS_SB_FEATURES2;
1161 mp->m_update_flags |= XFS_SB_VERSIONNUM;
1170 error = xfs_update_alignment(mp);
1174 xfs_alloc_compute_maxlevels(mp);
1175 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
1176 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
1177 xfs_ialloc_compute_maxlevels(mp);
1179 xfs_set_maxicount(mp);
1181 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
1183 error = xfs_uuid_mount(mp);
1190 xfs_set_rw_sizes(mp);
1197 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
1202 xfs_set_inoalignment(mp);
1207 error = xfs_check_sizes(mp);
1214 error = xfs_rtmount_init(mp);
1224 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
1226 mp->m_dmevmask = 0; /* not persistent; set after each mount */
1228 xfs_dir_mount(mp);
1233 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
1238 xfs_trans_init(mp);
1243 spin_lock_init(&mp->m_perag_lock);
1244 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1245 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
1253 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
1261 error = xfs_log_mount(mp, mp->m_logdev_targp,
1262 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
1263 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
1288 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1289 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1290 !mp->m_sb.sb_inprogress) {
1291 error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1300 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
1311 XFS_BUFTARG_NAME(mp->m_ddev_targp),
1315 mp);
1319 mp->m_rootip = rip; /* save it */
1326 error = xfs_rtmount_inodes(mp);
1340 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1341 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1351 if (XFS_IS_QUOTA_RUNNING(mp)) {
1352 error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
1356 ASSERT(!XFS_IS_QUOTA_ON(mp));
1363 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
1366 mp->m_fsname);
1368 error = xfs_mount_reset_sbqflags(mp);
1379 error = xfs_log_mount_finish(mp);
1389 ASSERT(mp->m_qflags == 0);
1390 mp->m_qflags = quotaflags;
1392 xfs_qm_mount_quotas(mp);
1406 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1407 resblks = xfs_default_resblks(mp);
1408 error = xfs_reserve_blocks(mp, &resblks, NULL);
1417 xfs_rtunmount_inodes(mp);
1421 xfs_log_unmount(mp);
1423 xfs_free_perag(mp);
1425 xfs_uuid_unmount(mp);
1436 struct xfs_mount *mp)
1441 xfs_qm_unmount_quotas(mp);
1442 xfs_rtunmount_inodes(mp);
1443 IRELE(mp->m_rootip);
1455 xfs_log_force(mp, XFS_LOG_SYNC);
1465 xfs_reclaim_inodes(mp, 0);
1466 XFS_bflush(mp->m_ddev_targp);
1467 xfs_reclaim_inodes(mp, SYNC_WAIT);
1469 xfs_qm_unmount(mp);
1476 xfs_log_force(mp, XFS_LOG_SYNC);
1478 xfs_binval(mp->m_ddev_targp);
1479 if (mp->m_rtdev_targp) {
1480 xfs_binval(mp->m_rtdev_targp);
1498 error = xfs_reserve_blocks(mp, &resblks, NULL);
1503 error = xfs_log_sbcount(mp, 1);
1507 xfs_unmountfs_writesb(mp);
1508 xfs_unmountfs_wait(mp); /* wait for async bufs */
1509 xfs_log_unmount_write(mp);
1510 xfs_log_unmount(mp);
1511 xfs_uuid_unmount(mp);
1514 xfs_errortag_clearall(mp, 0);
1516 xfs_free_perag(mp);
1520 xfs_unmountfs_wait(xfs_mount_t *mp)
1522 if (mp->m_logdev_targp != mp->m_ddev_targp)
1523 xfs_wait_buftarg(mp->m_logdev_targp);
1524 if (mp->m_rtdev_targp)
1525 xfs_wait_buftarg(mp->m_rtdev_targp);
1526 xfs_wait_buftarg(mp->m_ddev_targp);
1530 xfs_fs_writable(xfs_mount_t *mp)
1532 return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) ||
1533 (mp->m_flags & XFS_MOUNT_RDONLY));
1549 xfs_mount_t *mp,
1555 if (!xfs_fs_writable(mp))
1558 xfs_icsb_sync_counters(mp, 0);
1564 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1567 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1568 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1583 xfs_unmountfs_writesb(xfs_mount_t *mp)
1592 if (!((mp->m_flags & XFS_MOUNT_RDONLY) ||
1593 XFS_FORCED_SHUTDOWN(mp))) {
1595 sbp = xfs_getsb(mp, 0);
1602 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
1603 xfsbdstrat(mp, sbp);
1607 mp, sbp, XFS_BUF_ADDR(sbp));
1626 xfs_mount_t *mp;
1632 mp = tp->t_mountp;
1633 bp = xfs_trans_getsb(tp, mp, 0);
1639 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
1665 xfs_mount_t *mp,
1682 lcounter = (long long)mp->m_sb.sb_icount;
1688 mp->m_sb.sb_icount = lcounter;
1691 lcounter = (long long)mp->m_sb.sb_ifree;
1697 mp->m_sb.sb_ifree = lcounter;
1701 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1702 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1706 mp->m_resblks_avail += delta;
1709 mp->m_resblks_avail = mp->m_resblks;
1715 mp->m_sb.sb_fdblocks = lcounter +
1716 XFS_ALLOC_SET_ASIDE(mp);
1727 lcounter = (long long)mp->m_resblks_avail + delta;
1729 mp->m_resblks_avail = lcounter;
1735 mp->m_fsname);
1739 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1742 lcounter = (long long)mp->m_sb.sb_frextents;
1747 mp->m_sb.sb_frextents = lcounter;
1750 lcounter = (long long)mp->m_sb.sb_dblocks;
1756 mp->m_sb.sb_dblocks = lcounter;
1759 scounter = mp->m_sb.sb_agcount;
1765 mp->m_sb.sb_agcount = scounter;
1768 scounter = mp->m_sb.sb_imax_pct;
1774 mp->m_sb.sb_imax_pct = scounter;
1777 scounter = mp->m_sb.sb_rextsize;
1783 mp->m_sb.sb_rextsize = scounter;
1786 scounter = mp->m_sb.sb_rbmblocks;
1792 mp->m_sb.sb_rbmblocks = scounter;
1795 lcounter = (long long)mp->m_sb.sb_rblocks;
1801 mp->m_sb.sb_rblocks = lcounter;
1804 lcounter = (long long)mp->m_sb.sb_rextents;
1810 mp->m_sb.sb_rextents = lcounter;
1813 scounter = mp->m_sb.sb_rextslog;
1819 mp->m_sb.sb_rextslog = scounter;
1835 xfs_mount_t *mp,
1848 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1849 status = xfs_icsb_modify_counters(mp, field,
1856 spin_lock(&mp->m_sb_lock);
1857 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1858 spin_unlock(&mp->m_sb_lock);
1877 xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1889 spin_lock(&mp->m_sb_lock);
1902 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1903 spin_unlock(&mp->m_sb_lock);
1904 status = xfs_icsb_modify_counters(mp,
1907 spin_lock(&mp->m_sb_lock);
1913 status = xfs_mod_incore_sb_unlocked(mp,
1940 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1941 spin_unlock(&mp->m_sb_lock);
1942 status = xfs_icsb_modify_counters(mp,
1946 spin_lock(&mp->m_sb_lock);
1952 status = xfs_mod_incore_sb_unlocked(mp,
1962 spin_unlock(&mp->m_sb_lock);
1977 xfs_mount_t *mp,
1982 ASSERT(mp->m_sb_bp != NULL);
1983 bp = mp->m_sb_bp;
2001 xfs_mount_t *mp)
2009 bp = xfs_getsb(mp, 0);
2012 mp->m_sb_bp = NULL;
2022 xfs_mount_t *mp,
2032 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
2033 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
2050 struct xfs_mount *mp,
2053 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
2054 xfs_readonly_buftarg(mp->m_logdev_targp) ||
2055 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
2134 xfs_mount_t *mp;
2136 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
2138 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
2148 xfs_icsb_lock(mp);
2149 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2150 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2151 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2152 xfs_icsb_unlock(mp);
2159 xfs_icsb_lock(mp);
2160 spin_lock(&mp->m_sb_lock);
2161 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
2162 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
2163 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
2165 mp->m_sb.sb_icount += cntp->icsb_icount;
2166 mp->m_sb.sb_ifree += cntp->icsb_ifree;
2167 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
2171 xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
2172 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
2173 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
2174 spin_unlock(&mp->m_sb_lock);
2175 xfs_icsb_unlock(mp);
2185 xfs_mount_t *mp)
2190 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
2191 if (mp->m_sb_cnts == NULL)
2195 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
2196 mp->m_icsb_notifier.priority = 0;
2197 register_hotcpu_notifier(&mp->m_icsb_notifier);
2201 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2205 mutex_init(&mp->m_icsb_mutex);
2211 mp->m_icsb_counters = -1;
2217 xfs_mount_t *mp)
2219 xfs_icsb_lock(mp);
2224 mp->m_icsb_counters = -1;
2225 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2226 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2227 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2228 xfs_icsb_unlock(mp);
2233 xfs_mount_t *mp)
2235 if (mp->m_sb_cnts) {
2236 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
2237 free_percpu(mp->m_sb_cnts);
2239 mutex_destroy(&mp->m_icsb_mutex);
2261 xfs_mount_t *mp)
2267 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2274 xfs_mount_t *mp)
2280 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2287 xfs_mount_t *mp,
2297 xfs_icsb_lock_all_counters(mp);
2300 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2307 xfs_icsb_unlock_all_counters(mp);
2312 xfs_mount_t *mp,
2316 return test_bit(field, &mp->m_icsb_counters);
2321 xfs_mount_t *mp,
2336 if (xfs_icsb_counter_disabled(mp, field))
2339 xfs_icsb_lock_all_counters(mp);
2340 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
2343 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
2346 mp->m_sb.sb_icount = cnt.icsb_icount;
2349 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2352 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2359 xfs_icsb_unlock_all_counters(mp);
2364 xfs_mount_t *mp,
2374 xfs_icsb_lock_all_counters(mp);
2376 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
2393 clear_bit(field, &mp->m_icsb_counters);
2394 xfs_icsb_unlock_all_counters(mp);
2399 xfs_mount_t *mp,
2404 xfs_icsb_count(mp, &cnt, flags);
2406 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
2407 mp->m_sb.sb_icount = cnt.icsb_icount;
2408 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
2409 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2410 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
2411 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2419 xfs_mount_t *mp,
2422 spin_lock(&mp->m_sb_lock);
2423 xfs_icsb_sync_counters_locked(mp, flags);
2424 spin_unlock(&mp->m_sb_lock);
2444 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2445 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2448 xfs_mount_t *mp,
2457 xfs_icsb_disable_counter(mp, field);
2462 count = mp->m_sb.sb_icount;
2468 count = mp->m_sb.sb_ifree;
2474 count = mp->m_sb.sb_fdblocks;
2476 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2485 xfs_icsb_enable_counter(mp, field, count, resid);
2490 xfs_mount_t *mp,
2494 spin_lock(&mp->m_sb_lock);
2495 xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
2496 spin_unlock(&mp->m_sb_lock);
2501 xfs_mount_t *mp,
2513 icsbp = this_cpu_ptr(mp->m_sb_cnts);
2518 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2521 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2544 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2546 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2550 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2568 xfs_icsb_lock(mp);
2576 if (!(xfs_icsb_counter_disabled(mp, field))) {
2577 xfs_icsb_unlock(mp);
2592 spin_lock(&mp->m_sb_lock);
2593 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2594 spin_unlock(&mp->m_sb_lock);
2603 xfs_icsb_balance_counter(mp, field, 0);
2604 xfs_icsb_unlock(mp);
2617 xfs_icsb_lock(mp);
2627 xfs_icsb_balance_counter(mp, field, delta);
2628 xfs_icsb_unlock(mp);