1/* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19#include "xfs.h" 20#include "xfs_bit.h" 21#include "xfs_log.h" 22#include "xfs_inum.h" 23#include "xfs_trans.h" 24#include "xfs_sb.h" 25#include "xfs_ag.h" 26#include "xfs_dir2.h" 27#include "xfs_alloc.h" 28#include "xfs_quota.h" 29#include "xfs_mount.h" 30#include "xfs_bmap_btree.h" 31#include "xfs_alloc_btree.h" 32#include "xfs_ialloc_btree.h" 33#include "xfs_dinode.h" 34#include "xfs_inode.h" 35#include "xfs_btree.h" 36#include "xfs_btree_trace.h" 37#include "xfs_ialloc.h" 38#include "xfs_bmap.h" 39#include "xfs_rtalloc.h" 40#include "xfs_error.h" 41#include "xfs_itable.h" 42#include "xfs_fsops.h" 43#include "xfs_attr.h" 44#include "xfs_buf_item.h" 45#include "xfs_utils.h" 46#include "xfs_vnodeops.h" 47#include "xfs_version.h" 48#include "xfs_log_priv.h" 49#include "xfs_trans_priv.h" 50#include "xfs_filestream.h" 51#include "xfs_da_btree.h" 52#include "xfs_extfree_item.h" 53#include "xfs_mru_cache.h" 54#include "xfs_inode_item.h" 55#include "xfs_sync.h" 56#include "xfs_trace.h" 57 58#include <linux/namei.h> 59#include <linux/init.h> 60#include <linux/slab.h> 61#include <linux/mount.h> 62#include <linux/mempool.h> 63#include <linux/writeback.h> 64#include <linux/kthread.h> 65#include <linux/freezer.h> 66#include <linux/parser.h> 67 68static const struct super_operations xfs_super_operations; 69static kmem_zone_t *xfs_ioend_zone; 70mempool_t *xfs_ioend_pool; 71 72#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 73#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ 74#define MNTOPT_LOGDEV "logdev" /* log device */ 75#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 76#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 77#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 78#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 79#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 80#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 81#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 82#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 83#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 84#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ 85#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ 86#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ 87#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ 88#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 89#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 90#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 91 * unwritten extent conversion */ 92#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 93#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 94#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 95#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 96#define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ 97#define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes 98 * in stat(). */ 99#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 100#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 101#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 102#define MNTOPT_QUOTA "quota" /* disk quotas (user) */ 103#define MNTOPT_NOQUOTA "noquota" /* no quotas */ 104#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ 105#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ 106#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ 107#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ 108#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ 109#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ 110#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ 111#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 112#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 113#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 114#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */ 115#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */ 116 117/* 118 * Table driven mount option parser. 119 * 120 * Currently only used for remount, but it will be used for mount 121 * in the future, too. 122 */ 123enum { 124 Opt_barrier, Opt_nobarrier, Opt_err 125}; 126 127static const match_table_t tokens = { 128 {Opt_barrier, "barrier"}, 129 {Opt_nobarrier, "nobarrier"}, 130 {Opt_err, NULL} 131}; 132 133 134STATIC unsigned long 135suffix_strtoul(char *s, char **endp, unsigned int base) 136{ 137 int last, shift_left_factor = 0; 138 char *value = s; 139 140 last = strlen(value) - 1; 141 if (value[last] == 'K' || value[last] == 'k') { 142 shift_left_factor = 10; 143 value[last] = '\0'; 144 } 145 if (value[last] == 'M' || value[last] == 'm') { 146 shift_left_factor = 20; 147 value[last] = '\0'; 148 } 149 if (value[last] == 'G' || value[last] == 'g') { 150 shift_left_factor = 30; 151 value[last] = '\0'; 152 } 153 154 return simple_strtoul((const char *)s, endp, base) << shift_left_factor; 155} 156 157/* 158 * This function fills in xfs_mount_t fields based on mount args. 159 * Note: the superblock has _not_ yet been read in. 160 * 161 * Note that this function leaks the various device name allocations on 162 * failure. The caller takes care of them. 163 */ 164STATIC int 165xfs_parseargs( 166 struct xfs_mount *mp, 167 char *options) 168{ 169 struct super_block *sb = mp->m_super; 170 char *this_char, *value, *eov; 171 int dsunit = 0; 172 int dswidth = 0; 173 int iosize = 0; 174 __uint8_t iosizelog = 0; 175 176 /* 177 * Copy binary VFS mount flags we are interested in. 178 */ 179 if (sb->s_flags & MS_RDONLY) 180 mp->m_flags |= XFS_MOUNT_RDONLY; 181 if (sb->s_flags & MS_DIRSYNC) 182 mp->m_flags |= XFS_MOUNT_DIRSYNC; 183 if (sb->s_flags & MS_SYNCHRONOUS) 184 mp->m_flags |= XFS_MOUNT_WSYNC; 185 186 /* 187 * Set some default flags that could be cleared by the mount option 188 * parsing. 189 */ 190 mp->m_flags |= XFS_MOUNT_BARRIER; 191 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 192 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 193 194 /* 195 * These can be overridden by the mount option parsing. 196 */ 197 mp->m_logbufs = -1; 198 mp->m_logbsize = -1; 199 200 if (!options) 201 goto done; 202 203 while ((this_char = strsep(&options, ",")) != NULL) { 204 if (!*this_char) 205 continue; 206 if ((value = strchr(this_char, '=')) != NULL) 207 *value++ = 0; 208 209 if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 210 if (!value || !*value) { 211 cmn_err(CE_WARN, 212 "XFS: %s option requires an argument", 213 this_char); 214 return EINVAL; 215 } 216 mp->m_logbufs = simple_strtoul(value, &eov, 10); 217 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 218 if (!value || !*value) { 219 cmn_err(CE_WARN, 220 "XFS: %s option requires an argument", 221 this_char); 222 return EINVAL; 223 } 224 mp->m_logbsize = suffix_strtoul(value, &eov, 10); 225 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 226 if (!value || !*value) { 227 cmn_err(CE_WARN, 228 "XFS: %s option requires an argument", 229 this_char); 230 return EINVAL; 231 } 232 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 233 if (!mp->m_logname) 234 return ENOMEM; 235 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 236 cmn_err(CE_WARN, 237 "XFS: %s option not allowed on this system", 238 this_char); 239 return EINVAL; 240 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 241 if (!value || !*value) { 242 cmn_err(CE_WARN, 243 "XFS: %s option requires an argument", 244 this_char); 245 return EINVAL; 246 } 247 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 248 if (!mp->m_rtname) 249 return ENOMEM; 250 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 251 if (!value || !*value) { 252 cmn_err(CE_WARN, 253 "XFS: %s option requires an argument", 254 this_char); 255 return EINVAL; 256 } 257 iosize = simple_strtoul(value, &eov, 10); 258 iosizelog = ffs(iosize) - 1; 259 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 260 if (!value || !*value) { 261 cmn_err(CE_WARN, 262 "XFS: %s option requires an argument", 263 this_char); 264 return EINVAL; 265 } 266 iosize = suffix_strtoul(value, &eov, 10); 267 iosizelog = ffs(iosize) - 1; 268 } else if (!strcmp(this_char, MNTOPT_GRPID) || 269 !strcmp(this_char, MNTOPT_BSDGROUPS)) { 270 mp->m_flags |= XFS_MOUNT_GRPID; 271 } else if (!strcmp(this_char, MNTOPT_NOGRPID) || 272 !strcmp(this_char, MNTOPT_SYSVGROUPS)) { 273 mp->m_flags &= ~XFS_MOUNT_GRPID; 274 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 275 mp->m_flags |= XFS_MOUNT_WSYNC; 276 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 277 mp->m_flags |= XFS_MOUNT_NORECOVERY; 278 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 279 mp->m_flags |= XFS_MOUNT_NOALIGN; 280 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 281 mp->m_flags |= XFS_MOUNT_SWALLOC; 282 } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 283 if (!value || !*value) { 284 cmn_err(CE_WARN, 285 "XFS: %s option requires an argument", 286 this_char); 287 return EINVAL; 288 } 289 dsunit = simple_strtoul(value, &eov, 10); 290 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 291 if (!value || !*value) { 292 cmn_err(CE_WARN, 293 "XFS: %s option requires an argument", 294 this_char); 295 return EINVAL; 296 } 297 dswidth = simple_strtoul(value, &eov, 10); 298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 300#if !XFS_BIG_INUMS 301 cmn_err(CE_WARN, 302 "XFS: %s option not allowed on this system", 303 this_char); 304 return EINVAL; 305#endif 306 } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 307 mp->m_flags |= XFS_MOUNT_NOUUID; 308 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 309 mp->m_flags |= XFS_MOUNT_BARRIER; 310 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 311 mp->m_flags &= ~XFS_MOUNT_BARRIER; 312 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 313 mp->m_flags |= XFS_MOUNT_IKEEP; 314 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 315 mp->m_flags &= ~XFS_MOUNT_IKEEP; 316 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 317 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 318 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 319 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 320 } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 321 mp->m_flags |= XFS_MOUNT_ATTR2; 322 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 323 mp->m_flags &= ~XFS_MOUNT_ATTR2; 324 mp->m_flags |= XFS_MOUNT_NOATTR2; 325 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 326 mp->m_flags |= XFS_MOUNT_FILESTREAMS; 327 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { 328 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 329 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 330 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 331 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); 332 } else if (!strcmp(this_char, MNTOPT_QUOTA) || 333 !strcmp(this_char, MNTOPT_UQUOTA) || 334 !strcmp(this_char, MNTOPT_USRQUOTA)) { 335 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 336 XFS_UQUOTA_ENFD); 337 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || 338 !strcmp(this_char, MNTOPT_UQUOTANOENF)) { 339 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 340 mp->m_qflags &= ~XFS_UQUOTA_ENFD; 341 } else if (!strcmp(this_char, MNTOPT_PQUOTA) || 342 !strcmp(this_char, MNTOPT_PRJQUOTA)) { 343 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 344 XFS_OQUOTA_ENFD); 345 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { 346 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 347 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 348 } else if (!strcmp(this_char, MNTOPT_GQUOTA) || 349 !strcmp(this_char, MNTOPT_GRPQUOTA)) { 350 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 351 XFS_OQUOTA_ENFD); 352 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 353 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 354 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 355 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 356 mp->m_flags |= XFS_MOUNT_DELAYLOG; 357 cmn_err(CE_WARN, 358 "Enabling EXPERIMENTAL delayed logging feature " 359 "- use at your own risk.\n"); 360 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 361 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 362 } else if (!strcmp(this_char, "ihashsize")) { 363 cmn_err(CE_WARN, 364 "XFS: ihashsize no longer used, option is deprecated."); 365 } else if (!strcmp(this_char, "osyncisdsync")) { 366 cmn_err(CE_WARN, 367 "XFS: osyncisdsync has no effect, option is deprecated."); 368 } else if (!strcmp(this_char, "osyncisosync")) { 369 cmn_err(CE_WARN, 370 "XFS: osyncisosync has no effect, option is deprecated."); 371 } else if (!strcmp(this_char, "irixsgid")) { 372 cmn_err(CE_WARN, 373 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 374 } else { 375 cmn_err(CE_WARN, 376 "XFS: unknown mount option [%s].", this_char); 377 return EINVAL; 378 } 379 } 380 381 /* 382 * no recovery flag requires a read-only mount 383 */ 384 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 385 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 386 cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only."); 387 return EINVAL; 388 } 389 390 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 391 cmn_err(CE_WARN, 392 "XFS: sunit and swidth options incompatible with the noalign option"); 393 return EINVAL; 394 } 395 396#ifndef CONFIG_XFS_QUOTA 397 if (XFS_IS_QUOTA_RUNNING(mp)) { 398 cmn_err(CE_WARN, 399 "XFS: quota support not available in this kernel."); 400 return EINVAL; 401 } 402#endif 403 404 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 405 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 406 cmn_err(CE_WARN, 407 "XFS: cannot mount with both project and group quota"); 408 return EINVAL; 409 } 410 411 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 412 cmn_err(CE_WARN, 413 "XFS: sunit and swidth must be specified together"); 414 return EINVAL; 415 } 416 417 if (dsunit && (dswidth % dsunit != 0)) { 418 cmn_err(CE_WARN, 419 "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", 420 dswidth, dsunit); 421 return EINVAL; 422 } 423 424done: 425 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { 426 /* 427 * At this point the superblock has not been read 428 * in, therefore we do not know the block size. 429 * Before the mount call ends we will convert 430 * these to FSBs. 431 */ 432 if (dsunit) { 433 mp->m_dalign = dsunit; 434 mp->m_flags |= XFS_MOUNT_RETERR; 435 } 436 437 if (dswidth) 438 mp->m_swidth = dswidth; 439 } 440 441 if (mp->m_logbufs != -1 && 442 mp->m_logbufs != 0 && 443 (mp->m_logbufs < XLOG_MIN_ICLOGS || 444 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 445 cmn_err(CE_WARN, 446 "XFS: invalid logbufs value: %d [not %d-%d]", 447 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 448 return XFS_ERROR(EINVAL); 449 } 450 if (mp->m_logbsize != -1 && 451 mp->m_logbsize != 0 && 452 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 453 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 454 !is_power_of_2(mp->m_logbsize))) { 455 cmn_err(CE_WARN, 456 "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 457 mp->m_logbsize); 458 return XFS_ERROR(EINVAL); 459 } 460 461 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 462 if (!mp->m_fsname) 463 return ENOMEM; 464 mp->m_fsname_len = strlen(mp->m_fsname) + 1; 465 466 if (iosizelog) { 467 if (iosizelog > XFS_MAX_IO_LOG || 468 iosizelog < XFS_MIN_IO_LOG) { 469 cmn_err(CE_WARN, 470 "XFS: invalid log iosize: %d [not %d-%d]", 471 iosizelog, XFS_MIN_IO_LOG, 472 XFS_MAX_IO_LOG); 473 return XFS_ERROR(EINVAL); 474 } 475 476 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; 477 mp->m_readio_log = iosizelog; 478 mp->m_writeio_log = iosizelog; 479 } 480 481 return 0; 482} 483 484struct proc_xfs_info { 485 int flag; 486 char *str; 487}; 488 489STATIC int 490xfs_showargs( 491 struct xfs_mount *mp, 492 struct seq_file *m) 493{ 494 static struct proc_xfs_info xfs_info_set[] = { 495 /* the few simple ones we can get from the mount struct */ 496 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, 497 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 498 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 499 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 500 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 501 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 502 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 503 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 504 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 505 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, 506 { 0, NULL } 507 }; 508 static struct proc_xfs_info xfs_info_unset[] = { 509 /* the few simple ones we can get from the mount struct */ 510 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, 511 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, 512 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, 513 { 0, NULL } 514 }; 515 struct proc_xfs_info *xfs_infop; 516 517 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 518 if (mp->m_flags & xfs_infop->flag) 519 seq_puts(m, xfs_infop->str); 520 } 521 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { 522 if (!(mp->m_flags & xfs_infop->flag)) 523 seq_puts(m, xfs_infop->str); 524 } 525 526 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 527 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 528 (int)(1 << mp->m_writeio_log) >> 10); 529 530 if (mp->m_logbufs > 0) 531 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 532 if (mp->m_logbsize > 0) 533 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 534 535 if (mp->m_logname) 536 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 537 if (mp->m_rtname) 538 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 539 540 if (mp->m_dalign > 0) 541 seq_printf(m, "," MNTOPT_SUNIT "=%d", 542 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 543 if (mp->m_swidth > 0) 544 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 545 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 546 547 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) 548 seq_puts(m, "," MNTOPT_USRQUOTA); 549 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 550 seq_puts(m, "," MNTOPT_UQUOTANOENF); 551 552 /* Either project or group quotas can be active, not both */ 553 554 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 555 if (mp->m_qflags & XFS_OQUOTA_ENFD) 556 seq_puts(m, "," MNTOPT_PRJQUOTA); 557 else 558 seq_puts(m, "," MNTOPT_PQUOTANOENF); 559 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { 560 if (mp->m_qflags & XFS_OQUOTA_ENFD) 561 seq_puts(m, "," MNTOPT_GRPQUOTA); 562 else 563 seq_puts(m, "," MNTOPT_GQUOTANOENF); 564 } 565 566 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 567 seq_puts(m, "," MNTOPT_NOQUOTA); 568 569 return 0; 570} 571__uint64_t 572xfs_max_file_offset( 573 unsigned int blockshift) 574{ 575 unsigned int pagefactor = 1; 576 unsigned int bitshift = BITS_PER_LONG - 1; 577 578 /* Figure out maximum filesize, on Linux this can depend on 579 * the filesystem blocksize (on 32 bit platforms). 580 * __block_prepare_write does this in an [unsigned] long... 581 * page->index << (PAGE_CACHE_SHIFT - bbits) 582 * So, for page sized blocks (4K on 32 bit platforms), 583 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 584 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 585 * but for smaller blocksizes it is less (bbits = log2 bsize). 586 * Note1: get_block_t takes a long (implicit cast from above) 587 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 588 * can optionally convert the [unsigned] long from above into 589 * an [unsigned] long long. 590 */ 591 592#if BITS_PER_LONG == 32 593# if defined(CONFIG_LBDAF) 594 ASSERT(sizeof(sector_t) == 8); 595 pagefactor = PAGE_CACHE_SIZE; 596 bitshift = BITS_PER_LONG; 597# else 598 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); 599# endif 600#endif 601 602 return (((__uint64_t)pagefactor) << bitshift) - 1; 603} 604 605STATIC int 606xfs_blkdev_get( 607 xfs_mount_t *mp, 608 const char *name, 609 struct block_device **bdevp) 610{ 611 int error = 0; 612 613 *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp); 614 if (IS_ERR(*bdevp)) { 615 error = PTR_ERR(*bdevp); 616 printk("XFS: Invalid device [%s], error=%d\n", name, error); 617 } 618 619 return -error; 620} 621 622STATIC void 623xfs_blkdev_put( 624 struct block_device *bdev) 625{ 626 if (bdev) 627 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); 628} 629 630/* 631 * Try to write out the superblock using barriers. 632 */ 633STATIC int 634xfs_barrier_test( 635 xfs_mount_t *mp) 636{ 637 xfs_buf_t *sbp = xfs_getsb(mp, 0); 638 int error; 639 640 XFS_BUF_UNDONE(sbp); 641 XFS_BUF_UNREAD(sbp); 642 XFS_BUF_UNDELAYWRITE(sbp); 643 XFS_BUF_WRITE(sbp); 644 XFS_BUF_UNASYNC(sbp); 645 XFS_BUF_ORDERED(sbp); 646 647 xfsbdstrat(mp, sbp); 648 error = xfs_iowait(sbp); 649 650 /* 651 * Clear all the flags we set and possible error state in the 652 * buffer. We only did the write to try out whether barriers 653 * worked and shouldn't leave any traces in the superblock 654 * buffer. 655 */ 656 XFS_BUF_DONE(sbp); 657 XFS_BUF_ERROR(sbp, 0); 658 XFS_BUF_UNORDERED(sbp); 659 660 xfs_buf_relse(sbp); 661 return error; 662} 663 664STATIC void 665xfs_mountfs_check_barriers(xfs_mount_t *mp) 666{ 667 int error; 668 669 if (mp->m_logdev_targp != mp->m_ddev_targp) { 670 xfs_fs_cmn_err(CE_NOTE, mp, 671 "Disabling barriers, not supported with external log device"); 672 mp->m_flags &= ~XFS_MOUNT_BARRIER; 673 return; 674 } 675 676 if (xfs_readonly_buftarg(mp->m_ddev_targp)) { 677 xfs_fs_cmn_err(CE_NOTE, mp, 678 "Disabling barriers, underlying device is readonly"); 679 mp->m_flags &= ~XFS_MOUNT_BARRIER; 680 return; 681 } 682 683 error = xfs_barrier_test(mp); 684 if (error) { 685 xfs_fs_cmn_err(CE_NOTE, mp, 686 "Disabling barriers, trial barrier write failed"); 687 mp->m_flags &= ~XFS_MOUNT_BARRIER; 688 return; 689 } 690} 691 692void 693xfs_blkdev_issue_flush( 694 xfs_buftarg_t *buftarg) 695{ 696 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL, 697 BLKDEV_IFL_WAIT); 698} 699 700STATIC void 701xfs_close_devices( 702 struct xfs_mount *mp) 703{ 704 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 705 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 706 xfs_free_buftarg(mp, mp->m_logdev_targp); 707 xfs_blkdev_put(logdev); 708 } 709 if (mp->m_rtdev_targp) { 710 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 711 xfs_free_buftarg(mp, mp->m_rtdev_targp); 712 xfs_blkdev_put(rtdev); 713 } 714 xfs_free_buftarg(mp, mp->m_ddev_targp); 715} 716 717/* 718 * The file system configurations are: 719 * (1) device (partition) with data and internal log 720 * (2) logical volume with data and log subvolumes. 721 * (3) logical volume with data, log, and realtime subvolumes. 722 * 723 * We only have to handle opening the log and realtime volumes here if 724 * they are present. The data subvolume has already been opened by 725 * get_sb_bdev() and is stored in sb->s_bdev. 726 */ 727STATIC int 728xfs_open_devices( 729 struct xfs_mount *mp) 730{ 731 struct block_device *ddev = mp->m_super->s_bdev; 732 struct block_device *logdev = NULL, *rtdev = NULL; 733 int error; 734 735 /* 736 * Open real time and log devices - order is important. 737 */ 738 if (mp->m_logname) { 739 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 740 if (error) 741 goto out; 742 } 743 744 if (mp->m_rtname) { 745 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 746 if (error) 747 goto out_close_logdev; 748 749 if (rtdev == ddev || rtdev == logdev) { 750 cmn_err(CE_WARN, 751 "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); 752 error = EINVAL; 753 goto out_close_rtdev; 754 } 755 } 756 757 /* 758 * Setup xfs_mount buffer target pointers 759 */ 760 error = ENOMEM; 761 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname); 762 if (!mp->m_ddev_targp) 763 goto out_close_rtdev; 764 765 if (rtdev) { 766 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname); 767 if (!mp->m_rtdev_targp) 768 goto out_free_ddev_targ; 769 } 770 771 if (logdev && logdev != ddev) { 772 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname); 773 if (!mp->m_logdev_targp) 774 goto out_free_rtdev_targ; 775 } else { 776 mp->m_logdev_targp = mp->m_ddev_targp; 777 } 778 779 return 0; 780 781 out_free_rtdev_targ: 782 if (mp->m_rtdev_targp) 783 xfs_free_buftarg(mp, mp->m_rtdev_targp); 784 out_free_ddev_targ: 785 xfs_free_buftarg(mp, mp->m_ddev_targp); 786 out_close_rtdev: 787 if (rtdev) 788 xfs_blkdev_put(rtdev); 789 out_close_logdev: 790 if (logdev && logdev != ddev) 791 xfs_blkdev_put(logdev); 792 out: 793 return error; 794} 795 796/* 797 * Setup xfs_mount buffer target pointers based on superblock 798 */ 799STATIC int 800xfs_setup_devices( 801 struct xfs_mount *mp) 802{ 803 int error; 804 805 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, 806 mp->m_sb.sb_sectsize); 807 if (error) 808 return error; 809 810 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 811 unsigned int log_sector_size = BBSIZE; 812 813 if (xfs_sb_version_hassector(&mp->m_sb)) 814 log_sector_size = mp->m_sb.sb_logsectsize; 815 error = xfs_setsize_buftarg(mp->m_logdev_targp, 816 mp->m_sb.sb_blocksize, 817 log_sector_size); 818 if (error) 819 return error; 820 } 821 if (mp->m_rtdev_targp) { 822 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 823 mp->m_sb.sb_blocksize, 824 mp->m_sb.sb_sectsize); 825 if (error) 826 return error; 827 } 828 829 return 0; 830} 831 832/* 833 * XFS AIL push thread support 834 */ 835void 836xfsaild_wakeup( 837 struct xfs_ail *ailp, 838 xfs_lsn_t threshold_lsn) 839{ 840 ailp->xa_target = threshold_lsn; 841 wake_up_process(ailp->xa_task); 842} 843 844STATIC int 845xfsaild( 846 void *data) 847{ 848 struct xfs_ail *ailp = data; 849 xfs_lsn_t last_pushed_lsn = 0; 850 long tout = 0; /* milliseconds */ 851 852 while (!kthread_should_stop()) { 853 schedule_timeout_interruptible(tout ? 854 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); 855 856 /* swsusp */ 857 try_to_freeze(); 858 859 ASSERT(ailp->xa_mount->m_log); 860 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount)) 861 continue; 862 863 tout = xfsaild_push(ailp, &last_pushed_lsn); 864 } 865 866 return 0; 867} /* xfsaild */ 868 869int 870xfsaild_start( 871 struct xfs_ail *ailp) 872{ 873 ailp->xa_target = 0; 874 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", 875 ailp->xa_mount->m_fsname); 876 if (IS_ERR(ailp->xa_task)) 877 return -PTR_ERR(ailp->xa_task); 878 return 0; 879} 880 881void 882xfsaild_stop( 883 struct xfs_ail *ailp) 884{ 885 kthread_stop(ailp->xa_task); 886} 887 888 889/* Catch misguided souls that try to use this interface on XFS */ 890STATIC struct inode * 891xfs_fs_alloc_inode( 892 struct super_block *sb) 893{ 894 BUG(); 895 return NULL; 896} 897 898/* 899 * Now that the generic code is guaranteed not to be accessing 900 * the linux inode, we can reclaim the inode. 901 */ 902STATIC void 903xfs_fs_destroy_inode( 904 struct inode *inode) 905{ 906 struct xfs_inode *ip = XFS_I(inode); 907 908 trace_xfs_destroy_inode(ip); 909 910 XFS_STATS_INC(vn_reclaim); 911 912 /* bad inode, get out here ASAP */ 913 if (is_bad_inode(inode)) 914 goto out_reclaim; 915 916 xfs_ioend_wait(ip); 917 918 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 919 920 /* 921 * We should never get here with one of the reclaim flags already set. 922 */ 923 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 924 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 925 926 /* 927 * We always use background reclaim here because even if the 928 * inode is clean, it still may be under IO and hence we have 929 * to take the flush lock. The background reclaim path handles 930 * this more efficiently than we can here, so simply let background 931 * reclaim tear down all inodes. 932 */ 933out_reclaim: 934 xfs_inode_set_reclaim_tag(ip); 935} 936 937/* 938 * Slab object creation initialisation for the XFS inode. 939 * This covers only the idempotent fields in the XFS inode; 940 * all other fields need to be initialised on allocation 941 * from the slab. This avoids the need to repeatedly intialise 942 * fields in the xfs inode that left in the initialise state 943 * when freeing the inode. 944 */ 945STATIC void 946xfs_fs_inode_init_once( 947 void *inode) 948{ 949 struct xfs_inode *ip = inode; 950 951 memset(ip, 0, sizeof(struct xfs_inode)); 952 953 /* vfs inode */ 954 inode_init_once(VFS_I(ip)); 955 956 /* xfs inode */ 957 atomic_set(&ip->i_iocount, 0); 958 atomic_set(&ip->i_pincount, 0); 959 spin_lock_init(&ip->i_flags_lock); 960 init_waitqueue_head(&ip->i_ipin_wait); 961 /* 962 * Because we want to use a counting completion, complete 963 * the flush completion once to allow a single access to 964 * the flush completion without blocking. 965 */ 966 init_completion(&ip->i_flush); 967 complete(&ip->i_flush); 968 969 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 970 "xfsino", ip->i_ino); 971} 972 973/* 974 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that 975 * we catch unlogged VFS level updates to the inode. Care must be taken 976 * here - the transaction code calls mark_inode_dirty_sync() to mark the 977 * VFS inode dirty in a transaction and clears the i_update_core field; 978 * it must clear the field after calling mark_inode_dirty_sync() to 979 * correctly indicate that the dirty state has been propagated into the 980 * inode log item. 981 * 982 * We need the barrier() to maintain correct ordering between unlogged 983 * updates and the transaction commit code that clears the i_update_core 984 * field. This requires all updates to be completed before marking the 985 * inode dirty. 986 */ 987STATIC void 988xfs_fs_dirty_inode( 989 struct inode *inode) 990{ 991 barrier(); 992 XFS_I(inode)->i_update_core = 1; 993} 994 995STATIC int 996xfs_log_inode( 997 struct xfs_inode *ip) 998{ 999 struct xfs_mount *mp = ip->i_mount; 1000 struct xfs_trans *tp; 1001 int error; 1002 1003 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1004 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 1005 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); 1006 1007 if (error) { 1008 xfs_trans_cancel(tp, 0); 1009 /* we need to return with the lock hold shared */ 1010 xfs_ilock(ip, XFS_ILOCK_SHARED); 1011 return error; 1012 } 1013 1014 xfs_ilock(ip, XFS_ILOCK_EXCL); 1015 1016 /* 1017 * Note - it's possible that we might have pushed ourselves out of the 1018 * way during trans_reserve which would flush the inode. But there's 1019 * no guarantee that the inode buffer has actually gone out yet (it's 1020 * delwri). Plus the buffer could be pinned anyway if it's part of 1021 * an inode in another recent transaction. So we play it safe and 1022 * fire off the transaction anyway. 1023 */ 1024 xfs_trans_ijoin(tp, ip); 1025 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1026 error = xfs_trans_commit(tp, 0); 1027 xfs_ilock_demote(ip, XFS_ILOCK_EXCL); 1028 1029 return error; 1030} 1031 1032STATIC int 1033xfs_fs_write_inode( 1034 struct inode *inode, 1035 struct writeback_control *wbc) 1036{ 1037 struct xfs_inode *ip = XFS_I(inode); 1038 struct xfs_mount *mp = ip->i_mount; 1039 int error = EAGAIN; 1040 1041 trace_xfs_write_inode(ip); 1042 1043 if (XFS_FORCED_SHUTDOWN(mp)) 1044 return XFS_ERROR(EIO); 1045 1046 if (wbc->sync_mode == WB_SYNC_ALL) { 1047 /* 1048 * Make sure the inode has made it it into the log. Instead 1049 * of forcing it all the way to stable storage using a 1050 * synchronous transaction we let the log force inside the 1051 * ->sync_fs call do that for thus, which reduces the number 1052 * of synchronous log foces dramatically. 1053 */ 1054 xfs_ioend_wait(ip); 1055 xfs_ilock(ip, XFS_ILOCK_SHARED); 1056 if (ip->i_update_core) { 1057 error = xfs_log_inode(ip); 1058 if (error) 1059 goto out_unlock; 1060 } 1061 } else { 1062 /* 1063 * We make this non-blocking if the inode is contended, return 1064 * EAGAIN to indicate to the caller that they did not succeed. 1065 * This prevents the flush path from blocking on inodes inside 1066 * another operation right now, they get caught later by 1067 * xfs_sync. 1068 */ 1069 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) 1070 goto out; 1071 1072 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) 1073 goto out_unlock; 1074 1075 /* 1076 * Now we have the flush lock and the inode is not pinned, we 1077 * can check if the inode is really clean as we know that 1078 * there are no pending transaction completions, it is not 1079 * waiting on the delayed write queue and there is no IO in 1080 * progress. 1081 */ 1082 if (xfs_inode_clean(ip)) { 1083 xfs_ifunlock(ip); 1084 error = 0; 1085 goto out_unlock; 1086 } 1087 error = xfs_iflush(ip, 0); 1088 } 1089 1090 out_unlock: 1091 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1092 out: 1093 /* 1094 * if we failed to write out the inode then mark 1095 * it dirty again so we'll try again later. 1096 */ 1097 if (error) 1098 xfs_mark_inode_dirty_sync(ip); 1099 return -error; 1100} 1101 1102STATIC void 1103xfs_fs_evict_inode( 1104 struct inode *inode) 1105{ 1106 xfs_inode_t *ip = XFS_I(inode); 1107 1108 trace_xfs_evict_inode(ip); 1109 1110 truncate_inode_pages(&inode->i_data, 0); 1111 end_writeback(inode); 1112 XFS_STATS_INC(vn_rele); 1113 XFS_STATS_INC(vn_remove); 1114 XFS_STATS_DEC(vn_active); 1115 1116 /* 1117 * The iolock is used by the file system to coordinate reads, 1118 * writes, and block truncates. Up to this point the lock 1119 * protected concurrent accesses by users of the inode. But 1120 * from here forward we're doing some final processing of the 1121 * inode because we're done with it, and although we reuse the 1122 * iolock for protection it is really a distinct lock class 1123 * (in the lockdep sense) from before. To keep lockdep happy 1124 * (and basically indicate what we are doing), we explicitly 1125 * re-init the iolock here. 1126 */ 1127 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 1128 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 1129 1130 xfs_inactive(ip); 1131} 1132 1133STATIC void 1134xfs_free_fsname( 1135 struct xfs_mount *mp) 1136{ 1137 kfree(mp->m_fsname); 1138 kfree(mp->m_rtname); 1139 kfree(mp->m_logname); 1140} 1141 1142STATIC void 1143xfs_fs_put_super( 1144 struct super_block *sb) 1145{ 1146 struct xfs_mount *mp = XFS_M(sb); 1147 1148 /* 1149 * Unregister the memory shrinker before we tear down the mount 1150 * structure so we don't have memory reclaim racing with us here. 1151 */ 1152 xfs_inode_shrinker_unregister(mp); 1153 xfs_syncd_stop(mp); 1154 1155 /* 1156 * Blow away any referenced inode in the filestreams cache. 1157 * This can and will cause log traffic as inodes go inactive 1158 * here. 1159 */ 1160 xfs_filestream_unmount(mp); 1161 1162 XFS_bflush(mp->m_ddev_targp); 1163 1164 xfs_unmountfs(mp); 1165 xfs_freesb(mp); 1166 xfs_icsb_destroy_counters(mp); 1167 xfs_close_devices(mp); 1168 xfs_free_fsname(mp); 1169 kfree(mp); 1170} 1171 1172STATIC int 1173xfs_fs_sync_fs( 1174 struct super_block *sb, 1175 int wait) 1176{ 1177 struct xfs_mount *mp = XFS_M(sb); 1178 int error; 1179 1180 /* 1181 * Not much we can do for the first async pass. Writing out the 1182 * superblock would be counter-productive as we are going to redirty 1183 * when writing out other data and metadata (and writing out a single 1184 * block is quite fast anyway). 1185 * 1186 * Try to asynchronously kick off quota syncing at least. 1187 */ 1188 if (!wait) { 1189 xfs_qm_sync(mp, SYNC_TRYLOCK); 1190 return 0; 1191 } 1192 1193 error = xfs_quiesce_data(mp); 1194 if (error) 1195 return -error; 1196 1197 if (laptop_mode) { 1198 int prev_sync_seq = mp->m_sync_seq; 1199 1200 /* 1201 * The disk must be active because we're syncing. 1202 * We schedule xfssyncd now (now that the disk is 1203 * active) instead of later (when it might not be). 1204 */ 1205 wake_up_process(mp->m_sync_task); 1206 /* 1207 * We have to wait for the sync iteration to complete. 1208 * If we don't, the disk activity caused by the sync 1209 * will come after the sync is completed, and that 1210 * triggers another sync from laptop mode. 1211 */ 1212 wait_event(mp->m_wait_single_sync_task, 1213 mp->m_sync_seq != prev_sync_seq); 1214 } 1215 1216 return 0; 1217} 1218 1219STATIC int 1220xfs_fs_statfs( 1221 struct dentry *dentry, 1222 struct kstatfs *statp) 1223{ 1224 struct xfs_mount *mp = XFS_M(dentry->d_sb); 1225 xfs_sb_t *sbp = &mp->m_sb; 1226 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1227 __uint64_t fakeinos, id; 1228 xfs_extlen_t lsize; 1229 __int64_t ffree; 1230 1231 statp->f_type = XFS_SB_MAGIC; 1232 statp->f_namelen = MAXNAMELEN - 1; 1233 1234 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 1235 statp->f_fsid.val[0] = (u32)id; 1236 statp->f_fsid.val[1] = (u32)(id >> 32); 1237 1238 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 1239 1240 spin_lock(&mp->m_sb_lock); 1241 statp->f_bsize = sbp->sb_blocksize; 1242 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 1243 statp->f_blocks = sbp->sb_dblocks - lsize; 1244 statp->f_bfree = statp->f_bavail = 1245 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1246 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1247 statp->f_files = 1248 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 1249 if (mp->m_maxicount) 1250 statp->f_files = min_t(typeof(statp->f_files), 1251 statp->f_files, 1252 mp->m_maxicount); 1253 1254 /* make sure statp->f_ffree does not underflow */ 1255 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1256 statp->f_ffree = max_t(__int64_t, ffree, 0); 1257 1258 spin_unlock(&mp->m_sb_lock); 1259 1260 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || 1261 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == 1262 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) 1263 xfs_qm_statvfs(ip, statp); 1264 return 0; 1265} 1266 1267STATIC void 1268xfs_save_resvblks(struct xfs_mount *mp) 1269{ 1270 __uint64_t resblks = 0; 1271 1272 mp->m_resblks_save = mp->m_resblks; 1273 xfs_reserve_blocks(mp, &resblks, NULL); 1274} 1275 1276STATIC void 1277xfs_restore_resvblks(struct xfs_mount *mp) 1278{ 1279 __uint64_t resblks; 1280 1281 if (mp->m_resblks_save) { 1282 resblks = mp->m_resblks_save; 1283 mp->m_resblks_save = 0; 1284 } else 1285 resblks = xfs_default_resblks(mp); 1286 1287 xfs_reserve_blocks(mp, &resblks, NULL); 1288} 1289 1290STATIC int 1291xfs_fs_remount( 1292 struct super_block *sb, 1293 int *flags, 1294 char *options) 1295{ 1296 struct xfs_mount *mp = XFS_M(sb); 1297 substring_t args[MAX_OPT_ARGS]; 1298 char *p; 1299 int error; 1300 1301 while ((p = strsep(&options, ",")) != NULL) { 1302 int token; 1303 1304 if (!*p) 1305 continue; 1306 1307 token = match_token(p, tokens, args); 1308 switch (token) { 1309 case Opt_barrier: 1310 mp->m_flags |= XFS_MOUNT_BARRIER; 1311 1312 /* 1313 * Test if barriers are actually working if we can, 1314 * else delay this check until the filesystem is 1315 * marked writeable. 1316 */ 1317 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) 1318 xfs_mountfs_check_barriers(mp); 1319 break; 1320 case Opt_nobarrier: 1321 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1322 break; 1323 default: 1324 /* 1325 * Logically we would return an error here to prevent 1326 * users from believing they might have changed 1327 * mount options using remount which can't be changed. 1328 * 1329 * But unfortunately mount(8) adds all options from 1330 * mtab and fstab to the mount arguments in some cases 1331 * so we can't blindly reject options, but have to 1332 * check for each specified option if it actually 1333 * differs from the currently set option and only 1334 * reject it if that's the case. 1335 * 1336 * Until that is implemented we return success for 1337 * every remount request, and silently ignore all 1338 * options that we can't actually change. 1339 */ 1340 break; 1341 } 1342 } 1343 1344 /* ro -> rw */ 1345 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1346 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1347 if (mp->m_flags & XFS_MOUNT_BARRIER) 1348 xfs_mountfs_check_barriers(mp); 1349 1350 /* 1351 * If this is the first remount to writeable state we 1352 * might have some superblock changes to update. 1353 */ 1354 if (mp->m_update_flags) { 1355 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1356 if (error) { 1357 cmn_err(CE_WARN, 1358 "XFS: failed to write sb changes"); 1359 return error; 1360 } 1361 mp->m_update_flags = 0; 1362 } 1363 1364 /* 1365 * Fill out the reserve pool if it is empty. Use the stashed 1366 * value if it is non-zero, otherwise go with the default. 1367 */ 1368 xfs_restore_resvblks(mp); 1369 } 1370 1371 /* rw -> ro */ 1372 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1373 /* 1374 * After we have synced the data but before we sync the 1375 * metadata, we need to free up the reserve block pool so that 1376 * the used block count in the superblock on disk is correct at 1377 * the end of the remount. Stash the current reserve pool size 1378 * so that if we get remounted rw, we can return it to the same 1379 * size. 1380 */ 1381 1382 xfs_quiesce_data(mp); 1383 xfs_save_resvblks(mp); 1384 xfs_quiesce_attr(mp); 1385 mp->m_flags |= XFS_MOUNT_RDONLY; 1386 } 1387 1388 return 0; 1389} 1390 1391/* 1392 * Second stage of a freeze. The data is already frozen so we only 1393 * need to take care of the metadata. Once that's done write a dummy 1394 * record to dirty the log in case of a crash while frozen. 1395 */ 1396STATIC int 1397xfs_fs_freeze( 1398 struct super_block *sb) 1399{ 1400 struct xfs_mount *mp = XFS_M(sb); 1401 1402 xfs_save_resvblks(mp); 1403 xfs_quiesce_attr(mp); 1404 return -xfs_fs_log_dummy(mp, SYNC_WAIT); 1405} 1406 1407STATIC int 1408xfs_fs_unfreeze( 1409 struct super_block *sb) 1410{ 1411 struct xfs_mount *mp = XFS_M(sb); 1412 1413 xfs_restore_resvblks(mp); 1414 return 0; 1415} 1416 1417STATIC int 1418xfs_fs_show_options( 1419 struct seq_file *m, 1420 struct vfsmount *mnt) 1421{ 1422 return -xfs_showargs(XFS_M(mnt->mnt_sb), m); 1423} 1424 1425/* 1426 * This function fills in xfs_mount_t fields based on mount args. 1427 * Note: the superblock _has_ now been read in. 1428 */ 1429STATIC int 1430xfs_finish_flags( 1431 struct xfs_mount *mp) 1432{ 1433 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 1434 1435 /* Fail a mount where the logbuf is smaller than the log stripe */ 1436 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1437 if (mp->m_logbsize <= 0 && 1438 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 1439 mp->m_logbsize = mp->m_sb.sb_logsunit; 1440 } else if (mp->m_logbsize > 0 && 1441 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1442 cmn_err(CE_WARN, 1443 "XFS: logbuf size must be greater than or equal to log stripe size"); 1444 return XFS_ERROR(EINVAL); 1445 } 1446 } else { 1447 /* Fail a mount if the logbuf is larger than 32K */ 1448 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1449 cmn_err(CE_WARN, 1450 "XFS: logbuf size for version 1 logs must be 16K or 32K"); 1451 return XFS_ERROR(EINVAL); 1452 } 1453 } 1454 1455 /* 1456 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 1457 * told by noattr2 to turn it off 1458 */ 1459 if (xfs_sb_version_hasattr2(&mp->m_sb) && 1460 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 1461 mp->m_flags |= XFS_MOUNT_ATTR2; 1462 1463 /* 1464 * prohibit r/w mounts of read-only filesystems 1465 */ 1466 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1467 cmn_err(CE_WARN, 1468 "XFS: cannot mount a read-only filesystem as read-write"); 1469 return XFS_ERROR(EROFS); 1470 } 1471 1472 return 0; 1473} 1474 1475STATIC int 1476xfs_fs_fill_super( 1477 struct super_block *sb, 1478 void *data, 1479 int silent) 1480{ 1481 struct inode *root; 1482 struct xfs_mount *mp = NULL; 1483 int flags = 0, error = ENOMEM; 1484 1485 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1486 if (!mp) 1487 goto out; 1488 1489 spin_lock_init(&mp->m_sb_lock); 1490 mutex_init(&mp->m_growlock); 1491 atomic_set(&mp->m_active_trans, 0); 1492 INIT_LIST_HEAD(&mp->m_sync_list); 1493 spin_lock_init(&mp->m_sync_lock); 1494 init_waitqueue_head(&mp->m_wait_single_sync_task); 1495 1496 mp->m_super = sb; 1497 sb->s_fs_info = mp; 1498 1499 error = xfs_parseargs(mp, (char *)data); 1500 if (error) 1501 goto out_free_fsname; 1502 1503 sb_min_blocksize(sb, BBSIZE); 1504 sb->s_xattr = xfs_xattr_handlers; 1505 sb->s_export_op = &xfs_export_operations; 1506#ifdef CONFIG_XFS_QUOTA 1507 sb->s_qcop = &xfs_quotactl_operations; 1508#endif 1509 sb->s_op = &xfs_super_operations; 1510 1511 if (silent) 1512 flags |= XFS_MFSI_QUIET; 1513 1514 error = xfs_open_devices(mp); 1515 if (error) 1516 goto out_free_fsname; 1517 1518 if (xfs_icsb_init_counters(mp)) 1519 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1520 1521 error = xfs_readsb(mp, flags); 1522 if (error) 1523 goto out_destroy_counters; 1524 1525 error = xfs_finish_flags(mp); 1526 if (error) 1527 goto out_free_sb; 1528 1529 error = xfs_setup_devices(mp); 1530 if (error) 1531 goto out_free_sb; 1532 1533 if (mp->m_flags & XFS_MOUNT_BARRIER) 1534 xfs_mountfs_check_barriers(mp); 1535 1536 error = xfs_filestream_mount(mp); 1537 if (error) 1538 goto out_free_sb; 1539 1540 error = xfs_mountfs(mp); 1541 if (error) 1542 goto out_filestream_unmount; 1543 1544 sb->s_magic = XFS_SB_MAGIC; 1545 sb->s_blocksize = mp->m_sb.sb_blocksize; 1546 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1547 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); 1548 sb->s_time_gran = 1; 1549 set_posix_acl_flag(sb); 1550 1551 root = igrab(VFS_I(mp->m_rootip)); 1552 if (!root) { 1553 error = ENOENT; 1554 goto fail_unmount; 1555 } 1556 if (is_bad_inode(root)) { 1557 error = EINVAL; 1558 goto fail_vnrele; 1559 } 1560 sb->s_root = d_alloc_root(root); 1561 if (!sb->s_root) { 1562 error = ENOMEM; 1563 goto fail_vnrele; 1564 } 1565 1566 error = xfs_syncd_init(mp); 1567 if (error) 1568 goto fail_vnrele; 1569 1570 xfs_inode_shrinker_register(mp); 1571 1572 return 0; 1573 1574 out_filestream_unmount: 1575 xfs_filestream_unmount(mp); 1576 out_free_sb: 1577 xfs_freesb(mp); 1578 out_destroy_counters: 1579 xfs_icsb_destroy_counters(mp); 1580 xfs_close_devices(mp); 1581 out_free_fsname: 1582 xfs_free_fsname(mp); 1583 kfree(mp); 1584 out: 1585 return -error; 1586 1587 fail_vnrele: 1588 if (sb->s_root) { 1589 dput(sb->s_root); 1590 sb->s_root = NULL; 1591 } else { 1592 iput(root); 1593 } 1594 1595 fail_unmount: 1596 /* 1597 * Blow away any referenced inode in the filestreams cache. 1598 * This can and will cause log traffic as inodes go inactive 1599 * here. 1600 */ 1601 xfs_filestream_unmount(mp); 1602 1603 XFS_bflush(mp->m_ddev_targp); 1604 1605 xfs_unmountfs(mp); 1606 goto out_free_sb; 1607} 1608 1609STATIC int 1610xfs_fs_get_sb( 1611 struct file_system_type *fs_type, 1612 int flags, 1613 const char *dev_name, 1614 void *data, 1615 struct vfsmount *mnt) 1616{ 1617 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super, 1618 mnt); 1619} 1620 1621static const struct super_operations xfs_super_operations = { 1622 .alloc_inode = xfs_fs_alloc_inode, 1623 .destroy_inode = xfs_fs_destroy_inode, 1624 .dirty_inode = xfs_fs_dirty_inode, 1625 .write_inode = xfs_fs_write_inode, 1626 .evict_inode = xfs_fs_evict_inode, 1627 .put_super = xfs_fs_put_super, 1628 .sync_fs = xfs_fs_sync_fs, 1629 .freeze_fs = xfs_fs_freeze, 1630 .unfreeze_fs = xfs_fs_unfreeze, 1631 .statfs = xfs_fs_statfs, 1632 .remount_fs = xfs_fs_remount, 1633 .show_options = xfs_fs_show_options, 1634}; 1635 1636static struct file_system_type xfs_fs_type = { 1637 .owner = THIS_MODULE, 1638 .name = "xfs", 1639 .get_sb = xfs_fs_get_sb, 1640 .kill_sb = kill_block_super, 1641 .fs_flags = FS_REQUIRES_DEV, 1642}; 1643 1644STATIC int __init 1645xfs_init_zones(void) 1646{ 1647 1648 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); 1649 if (!xfs_ioend_zone) 1650 goto out; 1651 1652 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, 1653 xfs_ioend_zone); 1654 if (!xfs_ioend_pool) 1655 goto out_destroy_ioend_zone; 1656 1657 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), 1658 "xfs_log_ticket"); 1659 if (!xfs_log_ticket_zone) 1660 goto out_destroy_ioend_pool; 1661 1662 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), 1663 "xfs_bmap_free_item"); 1664 if (!xfs_bmap_free_item_zone) 1665 goto out_destroy_log_ticket_zone; 1666 1667 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 1668 "xfs_btree_cur"); 1669 if (!xfs_btree_cur_zone) 1670 goto out_destroy_bmap_free_item_zone; 1671 1672 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), 1673 "xfs_da_state"); 1674 if (!xfs_da_state_zone) 1675 goto out_destroy_btree_cur_zone; 1676 1677 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 1678 if (!xfs_dabuf_zone) 1679 goto out_destroy_da_state_zone; 1680 1681 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1682 if (!xfs_ifork_zone) 1683 goto out_destroy_dabuf_zone; 1684 1685 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1686 if (!xfs_trans_zone) 1687 goto out_destroy_ifork_zone; 1688 1689 xfs_log_item_desc_zone = 1690 kmem_zone_init(sizeof(struct xfs_log_item_desc), 1691 "xfs_log_item_desc"); 1692 if (!xfs_log_item_desc_zone) 1693 goto out_destroy_trans_zone; 1694 1695 /* 1696 * The size of the zone allocated buf log item is the maximum 1697 * size possible under XFS. This wastes a little bit of memory, 1698 * but it is much faster. 1699 */ 1700 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1701 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / 1702 NBWORD) * sizeof(int))), "xfs_buf_item"); 1703 if (!xfs_buf_item_zone) 1704 goto out_destroy_log_item_desc_zone; 1705 1706 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1707 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1708 sizeof(xfs_extent_t))), "xfs_efd_item"); 1709 if (!xfs_efd_zone) 1710 goto out_destroy_buf_item_zone; 1711 1712 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 1713 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * 1714 sizeof(xfs_extent_t))), "xfs_efi_item"); 1715 if (!xfs_efi_zone) 1716 goto out_destroy_efd_zone; 1717 1718 xfs_inode_zone = 1719 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", 1720 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, 1721 xfs_fs_inode_init_once); 1722 if (!xfs_inode_zone) 1723 goto out_destroy_efi_zone; 1724 1725 xfs_ili_zone = 1726 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", 1727 KM_ZONE_SPREAD, NULL); 1728 if (!xfs_ili_zone) 1729 goto out_destroy_inode_zone; 1730 1731 return 0; 1732 1733 out_destroy_inode_zone: 1734 kmem_zone_destroy(xfs_inode_zone); 1735 out_destroy_efi_zone: 1736 kmem_zone_destroy(xfs_efi_zone); 1737 out_destroy_efd_zone: 1738 kmem_zone_destroy(xfs_efd_zone); 1739 out_destroy_buf_item_zone: 1740 kmem_zone_destroy(xfs_buf_item_zone); 1741 out_destroy_log_item_desc_zone: 1742 kmem_zone_destroy(xfs_log_item_desc_zone); 1743 out_destroy_trans_zone: 1744 kmem_zone_destroy(xfs_trans_zone); 1745 out_destroy_ifork_zone: 1746 kmem_zone_destroy(xfs_ifork_zone); 1747 out_destroy_dabuf_zone: 1748 kmem_zone_destroy(xfs_dabuf_zone); 1749 out_destroy_da_state_zone: 1750 kmem_zone_destroy(xfs_da_state_zone); 1751 out_destroy_btree_cur_zone: 1752 kmem_zone_destroy(xfs_btree_cur_zone); 1753 out_destroy_bmap_free_item_zone: 1754 kmem_zone_destroy(xfs_bmap_free_item_zone); 1755 out_destroy_log_ticket_zone: 1756 kmem_zone_destroy(xfs_log_ticket_zone); 1757 out_destroy_ioend_pool: 1758 mempool_destroy(xfs_ioend_pool); 1759 out_destroy_ioend_zone: 1760 kmem_zone_destroy(xfs_ioend_zone); 1761 out: 1762 return -ENOMEM; 1763} 1764 1765STATIC void 1766xfs_destroy_zones(void) 1767{ 1768 kmem_zone_destroy(xfs_ili_zone); 1769 kmem_zone_destroy(xfs_inode_zone); 1770 kmem_zone_destroy(xfs_efi_zone); 1771 kmem_zone_destroy(xfs_efd_zone); 1772 kmem_zone_destroy(xfs_buf_item_zone); 1773 kmem_zone_destroy(xfs_log_item_desc_zone); 1774 kmem_zone_destroy(xfs_trans_zone); 1775 kmem_zone_destroy(xfs_ifork_zone); 1776 kmem_zone_destroy(xfs_dabuf_zone); 1777 kmem_zone_destroy(xfs_da_state_zone); 1778 kmem_zone_destroy(xfs_btree_cur_zone); 1779 kmem_zone_destroy(xfs_bmap_free_item_zone); 1780 kmem_zone_destroy(xfs_log_ticket_zone); 1781 mempool_destroy(xfs_ioend_pool); 1782 kmem_zone_destroy(xfs_ioend_zone); 1783 1784} 1785 1786STATIC int __init 1787init_xfs_fs(void) 1788{ 1789 int error; 1790 1791 printk(KERN_INFO XFS_VERSION_STRING " with " 1792 XFS_BUILD_OPTIONS " enabled\n"); 1793 1794 xfs_ioend_init(); 1795 xfs_dir_startup(); 1796 1797 error = xfs_init_zones(); 1798 if (error) 1799 goto out; 1800 1801 error = xfs_mru_cache_init(); 1802 if (error) 1803 goto out_destroy_zones; 1804 1805 error = xfs_filestream_init(); 1806 if (error) 1807 goto out_mru_cache_uninit; 1808 1809 error = xfs_buf_init(); 1810 if (error) 1811 goto out_filestream_uninit; 1812 1813 error = xfs_init_procfs(); 1814 if (error) 1815 goto out_buf_terminate; 1816 1817 error = xfs_sysctl_register(); 1818 if (error) 1819 goto out_cleanup_procfs; 1820 1821 vfs_initquota(); 1822 1823 error = register_filesystem(&xfs_fs_type); 1824 if (error) 1825 goto out_sysctl_unregister; 1826 return 0; 1827 1828 out_sysctl_unregister: 1829 xfs_sysctl_unregister(); 1830 out_cleanup_procfs: 1831 xfs_cleanup_procfs(); 1832 out_buf_terminate: 1833 xfs_buf_terminate(); 1834 out_filestream_uninit: 1835 xfs_filestream_uninit(); 1836 out_mru_cache_uninit: 1837 xfs_mru_cache_uninit(); 1838 out_destroy_zones: 1839 xfs_destroy_zones(); 1840 out: 1841 return error; 1842} 1843 1844STATIC void __exit 1845exit_xfs_fs(void) 1846{ 1847 vfs_exitquota(); 1848 unregister_filesystem(&xfs_fs_type); 1849 xfs_sysctl_unregister(); 1850 xfs_cleanup_procfs(); 1851 xfs_buf_terminate(); 1852 xfs_filestream_uninit(); 1853 xfs_mru_cache_uninit(); 1854 xfs_destroy_zones(); 1855} 1856 1857module_init(init_xfs_fs); 1858module_exit(exit_xfs_fs); 1859 1860MODULE_AUTHOR("Silicon Graphics, Inc."); 1861MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 1862MODULE_LICENSE("GPL"); 1863