1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8#include <linux/blkdev.h> 9#include <linux/export.h> 10#include <linux/mm.h> 11#include <linux/errno.h> 12#include <linux/file.h> 13#include <linux/highuid.h> 14#include <linux/fs.h> 15#include <linux/namei.h> 16#include <linux/security.h> 17#include <linux/cred.h> 18#include <linux/syscalls.h> 19#include <linux/pagemap.h> 20#include <linux/compat.h> 21#include <linux/iversion.h> 22 23#include <linux/uaccess.h> 24#include <asm/unistd.h> 25 26#include "internal.h" 27#include "mount.h" 28 29/** 30 * generic_fillattr - Fill in the basic attributes from the inode struct 31 * @idmap: idmap of the mount the inode was found from 32 * @request_mask: statx request_mask 33 * @inode: Inode to use as the source 34 * @stat: Where to fill in the attributes 35 * 36 * Fill in the basic attributes in the kstat structure from data that's to be 37 * found on the VFS inode structure. This is the default if no getattr inode 38 * operation is supplied. 39 * 40 * If the inode has been found through an idmapped mount the idmap of 41 * the vfsmount must be passed through @idmap. This function will then 42 * take care to map the inode according to @idmap before filling in the 43 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 44 * performed on the raw inode simply pass @nop_mnt_idmap. 45 */ 46void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, 47 struct inode *inode, struct kstat *stat) 48{ 49 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); 50 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); 51 52 stat->dev = inode->i_sb->s_dev; 53 stat->ino = inode->i_ino; 54 stat->mode = inode->i_mode; 55 stat->nlink = inode->i_nlink; 56 stat->uid = vfsuid_into_kuid(vfsuid); 57 stat->gid = vfsgid_into_kgid(vfsgid); 58 stat->rdev = inode->i_rdev; 59 stat->size = i_size_read(inode); 60 stat->atime = inode_get_atime(inode); 61 stat->mtime = inode_get_mtime(inode); 62 stat->ctime = inode_get_ctime(inode); 63 stat->blksize = i_blocksize(inode); 64 stat->blocks = inode->i_blocks; 65 66 if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { 67 stat->result_mask |= STATX_CHANGE_COOKIE; 68 stat->change_cookie = inode_query_iversion(inode); 69 } 70 71} 72EXPORT_SYMBOL(generic_fillattr); 73 74/** 75 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 76 * @inode: Inode to use as the source 77 * @stat: Where to fill in the attribute flags 78 * 79 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 80 * inode that are published on i_flags and enforced by the VFS. 81 */ 82void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 83{ 84 if (inode->i_flags & S_IMMUTABLE) 85 stat->attributes |= STATX_ATTR_IMMUTABLE; 86 if (inode->i_flags & S_APPEND) 87 stat->attributes |= STATX_ATTR_APPEND; 88 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 89} 90EXPORT_SYMBOL(generic_fill_statx_attr); 91 92/** 93 * vfs_getattr_nosec - getattr without security checks 94 * @path: file to get attributes from 95 * @stat: structure to return attributes in 96 * @request_mask: STATX_xxx flags indicating what the caller wants 97 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 98 * 99 * Get attributes without calling security_inode_getattr. 100 * 101 * Currently the only caller other than vfs_getattr is internal to the 102 * filehandle lookup code, which uses only the inode number and returns no 103 * attributes to any user. Any other code probably wants vfs_getattr. 104 */ 105int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 106 u32 request_mask, unsigned int query_flags) 107{ 108 struct mnt_idmap *idmap; 109 struct inode *inode = d_backing_inode(path->dentry); 110 111 memset(stat, 0, sizeof(*stat)); 112 stat->result_mask |= STATX_BASIC_STATS; 113 query_flags &= AT_STATX_SYNC_TYPE; 114 115 /* allow the fs to override these if it really wants to */ 116 /* SB_NOATIME means filesystem supplies dummy atime value */ 117 if (inode->i_sb->s_flags & SB_NOATIME) 118 stat->result_mask &= ~STATX_ATIME; 119 120 /* 121 * Note: If you add another clause to set an attribute flag, please 122 * update attributes_mask below. 123 */ 124 if (IS_AUTOMOUNT(inode)) 125 stat->attributes |= STATX_ATTR_AUTOMOUNT; 126 127 if (IS_DAX(inode)) 128 stat->attributes |= STATX_ATTR_DAX; 129 130 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 131 STATX_ATTR_DAX); 132 133 idmap = mnt_idmap(path->mnt); 134 if (inode->i_op->getattr) 135 return inode->i_op->getattr(idmap, path, stat, 136 request_mask, 137 query_flags | AT_GETATTR_NOSEC); 138 139 generic_fillattr(idmap, request_mask, inode, stat); 140 return 0; 141} 142EXPORT_SYMBOL(vfs_getattr_nosec); 143 144/* 145 * vfs_getattr - Get the enhanced basic attributes of a file 146 * @path: The file of interest 147 * @stat: Where to return the statistics 148 * @request_mask: STATX_xxx flags indicating what the caller wants 149 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 150 * 151 * Ask the filesystem for a file's attributes. The caller must indicate in 152 * request_mask and query_flags to indicate what they want. 153 * 154 * If the file is remote, the filesystem can be forced to update the attributes 155 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 156 * suppress the update by passing AT_STATX_DONT_SYNC. 157 * 158 * Bits must have been set in request_mask to indicate which attributes the 159 * caller wants retrieving. Any such attribute not requested may be returned 160 * anyway, but the value may be approximate, and, if remote, may not have been 161 * synchronised with the server. 162 * 163 * 0 will be returned on success, and a -ve error code if unsuccessful. 164 */ 165int vfs_getattr(const struct path *path, struct kstat *stat, 166 u32 request_mask, unsigned int query_flags) 167{ 168 int retval; 169 170 if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC)) 171 return -EPERM; 172 173 retval = security_inode_getattr(path); 174 if (retval) 175 return retval; 176 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 177} 178EXPORT_SYMBOL(vfs_getattr); 179 180/** 181 * vfs_fstat - Get the basic attributes by file descriptor 182 * @fd: The file descriptor referring to the file of interest 183 * @stat: The result structure to fill in. 184 * 185 * This function is a wrapper around vfs_getattr(). The main difference is 186 * that it uses a file descriptor to determine the file location. 187 * 188 * 0 will be returned on success, and a -ve error code if unsuccessful. 189 */ 190int vfs_fstat(int fd, struct kstat *stat) 191{ 192 struct fd f; 193 int error; 194 195 f = fdget_raw(fd); 196 if (!f.file) 197 return -EBADF; 198 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 199 fdput(f); 200 return error; 201} 202 203int getname_statx_lookup_flags(int flags) 204{ 205 int lookup_flags = 0; 206 207 if (!(flags & AT_SYMLINK_NOFOLLOW)) 208 lookup_flags |= LOOKUP_FOLLOW; 209 if (!(flags & AT_NO_AUTOMOUNT)) 210 lookup_flags |= LOOKUP_AUTOMOUNT; 211 if (flags & AT_EMPTY_PATH) 212 lookup_flags |= LOOKUP_EMPTY; 213 214 return lookup_flags; 215} 216 217/** 218 * vfs_statx - Get basic and extra attributes by filename 219 * @dfd: A file descriptor representing the base dir for a relative filename 220 * @filename: The name of the file of interest 221 * @flags: Flags to control the query 222 * @stat: The result structure to fill in. 223 * @request_mask: STATX_xxx flags indicating what the caller wants 224 * 225 * This function is a wrapper around vfs_getattr(). The main difference is 226 * that it uses a filename and base directory to determine the file location. 227 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 228 * at the given name from being referenced. 229 * 230 * 0 will be returned on success, and a -ve error code if unsuccessful. 231 */ 232static int vfs_statx(int dfd, struct filename *filename, int flags, 233 struct kstat *stat, u32 request_mask) 234{ 235 struct path path; 236 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 237 int error; 238 239 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 240 AT_STATX_SYNC_TYPE)) 241 return -EINVAL; 242 243retry: 244 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 245 if (error) 246 goto out; 247 248 error = vfs_getattr(&path, stat, request_mask, flags); 249 250 if (request_mask & STATX_MNT_ID_UNIQUE) { 251 stat->mnt_id = real_mount(path.mnt)->mnt_id_unique; 252 stat->result_mask |= STATX_MNT_ID_UNIQUE; 253 } else { 254 stat->mnt_id = real_mount(path.mnt)->mnt_id; 255 stat->result_mask |= STATX_MNT_ID; 256 } 257 258 if (path.mnt->mnt_root == path.dentry) 259 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 260 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 261 262 /* Handle STATX_DIOALIGN for block devices. */ 263 if (request_mask & STATX_DIOALIGN) { 264 struct inode *inode = d_backing_inode(path.dentry); 265 266 if (S_ISBLK(inode->i_mode)) 267 bdev_statx_dioalign(inode, stat); 268 } 269 270 path_put(&path); 271 if (retry_estale(error, lookup_flags)) { 272 lookup_flags |= LOOKUP_REVAL; 273 goto retry; 274 } 275out: 276 return error; 277} 278 279int vfs_fstatat(int dfd, const char __user *filename, 280 struct kstat *stat, int flags) 281{ 282 int ret; 283 int statx_flags = flags | AT_NO_AUTOMOUNT; 284 struct filename *name; 285 286 /* 287 * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH) 288 * 289 * If AT_EMPTY_PATH is set, we expect the common case to be that 290 * empty path, and avoid doing all the extra pathname work. 291 */ 292 if (dfd >= 0 && flags == AT_EMPTY_PATH) { 293 char c; 294 295 ret = get_user(c, filename); 296 if (unlikely(ret)) 297 return ret; 298 299 if (likely(!c)) 300 return vfs_fstat(dfd, stat); 301 } 302 303 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 304 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 305 putname(name); 306 307 return ret; 308} 309 310#ifdef __ARCH_WANT_OLD_STAT 311 312/* 313 * For backward compatibility? Maybe this should be moved 314 * into arch/i386 instead? 315 */ 316static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 317{ 318 static int warncount = 5; 319 struct __old_kernel_stat tmp; 320 321 if (warncount > 0) { 322 warncount--; 323 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 324 current->comm); 325 } else if (warncount < 0) { 326 /* it's laughable, but... */ 327 warncount = 0; 328 } 329 330 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 331 tmp.st_dev = old_encode_dev(stat->dev); 332 tmp.st_ino = stat->ino; 333 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 334 return -EOVERFLOW; 335 tmp.st_mode = stat->mode; 336 tmp.st_nlink = stat->nlink; 337 if (tmp.st_nlink != stat->nlink) 338 return -EOVERFLOW; 339 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 340 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 341 tmp.st_rdev = old_encode_dev(stat->rdev); 342#if BITS_PER_LONG == 32 343 if (stat->size > MAX_NON_LFS) 344 return -EOVERFLOW; 345#endif 346 tmp.st_size = stat->size; 347 tmp.st_atime = stat->atime.tv_sec; 348 tmp.st_mtime = stat->mtime.tv_sec; 349 tmp.st_ctime = stat->ctime.tv_sec; 350 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 351} 352 353SYSCALL_DEFINE2(stat, const char __user *, filename, 354 struct __old_kernel_stat __user *, statbuf) 355{ 356 struct kstat stat; 357 int error; 358 359 error = vfs_stat(filename, &stat); 360 if (error) 361 return error; 362 363 return cp_old_stat(&stat, statbuf); 364} 365 366SYSCALL_DEFINE2(lstat, const char __user *, filename, 367 struct __old_kernel_stat __user *, statbuf) 368{ 369 struct kstat stat; 370 int error; 371 372 error = vfs_lstat(filename, &stat); 373 if (error) 374 return error; 375 376 return cp_old_stat(&stat, statbuf); 377} 378 379SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 380{ 381 struct kstat stat; 382 int error = vfs_fstat(fd, &stat); 383 384 if (!error) 385 error = cp_old_stat(&stat, statbuf); 386 387 return error; 388} 389 390#endif /* __ARCH_WANT_OLD_STAT */ 391 392#ifdef __ARCH_WANT_NEW_STAT 393 394#ifndef INIT_STRUCT_STAT_PADDING 395# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 396#endif 397 398static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 399{ 400 struct stat tmp; 401 402 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 403 return -EOVERFLOW; 404 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 405 return -EOVERFLOW; 406#if BITS_PER_LONG == 32 407 if (stat->size > MAX_NON_LFS) 408 return -EOVERFLOW; 409#endif 410 411 INIT_STRUCT_STAT_PADDING(tmp); 412 tmp.st_dev = new_encode_dev(stat->dev); 413 tmp.st_ino = stat->ino; 414 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 415 return -EOVERFLOW; 416 tmp.st_mode = stat->mode; 417 tmp.st_nlink = stat->nlink; 418 if (tmp.st_nlink != stat->nlink) 419 return -EOVERFLOW; 420 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 421 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 422 tmp.st_rdev = new_encode_dev(stat->rdev); 423 tmp.st_size = stat->size; 424 tmp.st_atime = stat->atime.tv_sec; 425 tmp.st_mtime = stat->mtime.tv_sec; 426 tmp.st_ctime = stat->ctime.tv_sec; 427#ifdef STAT_HAVE_NSEC 428 tmp.st_atime_nsec = stat->atime.tv_nsec; 429 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 430 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 431#endif 432 tmp.st_blocks = stat->blocks; 433 tmp.st_blksize = stat->blksize; 434 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 435} 436 437SYSCALL_DEFINE2(newstat, const char __user *, filename, 438 struct stat __user *, statbuf) 439{ 440 struct kstat stat; 441 int error = vfs_stat(filename, &stat); 442 443 if (error) 444 return error; 445 return cp_new_stat(&stat, statbuf); 446} 447 448SYSCALL_DEFINE2(newlstat, const char __user *, filename, 449 struct stat __user *, statbuf) 450{ 451 struct kstat stat; 452 int error; 453 454 error = vfs_lstat(filename, &stat); 455 if (error) 456 return error; 457 458 return cp_new_stat(&stat, statbuf); 459} 460 461#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 462SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 463 struct stat __user *, statbuf, int, flag) 464{ 465 struct kstat stat; 466 int error; 467 468 error = vfs_fstatat(dfd, filename, &stat, flag); 469 if (error) 470 return error; 471 return cp_new_stat(&stat, statbuf); 472} 473#endif 474 475SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 476{ 477 struct kstat stat; 478 int error = vfs_fstat(fd, &stat); 479 480 if (!error) 481 error = cp_new_stat(&stat, statbuf); 482 483 return error; 484} 485#endif 486 487static int do_readlinkat(int dfd, const char __user *pathname, 488 char __user *buf, int bufsiz) 489{ 490 struct path path; 491 int error; 492 int empty = 0; 493 unsigned int lookup_flags = LOOKUP_EMPTY; 494 495 if (bufsiz <= 0) 496 return -EINVAL; 497 498retry: 499 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 500 if (!error) { 501 struct inode *inode = d_backing_inode(path.dentry); 502 503 error = empty ? -ENOENT : -EINVAL; 504 /* 505 * AFS mountpoints allow readlink(2) but are not symlinks 506 */ 507 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 508 error = security_inode_readlink(path.dentry); 509 if (!error) { 510 touch_atime(&path); 511 error = vfs_readlink(path.dentry, buf, bufsiz); 512 } 513 } 514 path_put(&path); 515 if (retry_estale(error, lookup_flags)) { 516 lookup_flags |= LOOKUP_REVAL; 517 goto retry; 518 } 519 } 520 return error; 521} 522 523SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 524 char __user *, buf, int, bufsiz) 525{ 526 return do_readlinkat(dfd, pathname, buf, bufsiz); 527} 528 529SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 530 int, bufsiz) 531{ 532 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 533} 534 535 536/* ---------- LFS-64 ----------- */ 537#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 538 539#ifndef INIT_STRUCT_STAT64_PADDING 540# define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 541#endif 542 543static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 544{ 545 struct stat64 tmp; 546 547 INIT_STRUCT_STAT64_PADDING(tmp); 548#ifdef CONFIG_MIPS 549 /* mips has weird padding, so we don't get 64 bits there */ 550 tmp.st_dev = new_encode_dev(stat->dev); 551 tmp.st_rdev = new_encode_dev(stat->rdev); 552#else 553 tmp.st_dev = huge_encode_dev(stat->dev); 554 tmp.st_rdev = huge_encode_dev(stat->rdev); 555#endif 556 tmp.st_ino = stat->ino; 557 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 558 return -EOVERFLOW; 559#ifdef STAT64_HAS_BROKEN_ST_INO 560 tmp.__st_ino = stat->ino; 561#endif 562 tmp.st_mode = stat->mode; 563 tmp.st_nlink = stat->nlink; 564 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 565 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 566 tmp.st_atime = stat->atime.tv_sec; 567 tmp.st_atime_nsec = stat->atime.tv_nsec; 568 tmp.st_mtime = stat->mtime.tv_sec; 569 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 570 tmp.st_ctime = stat->ctime.tv_sec; 571 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 572 tmp.st_size = stat->size; 573 tmp.st_blocks = stat->blocks; 574 tmp.st_blksize = stat->blksize; 575 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 576} 577 578SYSCALL_DEFINE2(stat64, const char __user *, filename, 579 struct stat64 __user *, statbuf) 580{ 581 struct kstat stat; 582 int error = vfs_stat(filename, &stat); 583 584 if (!error) 585 error = cp_new_stat64(&stat, statbuf); 586 587 return error; 588} 589 590SYSCALL_DEFINE2(lstat64, const char __user *, filename, 591 struct stat64 __user *, statbuf) 592{ 593 struct kstat stat; 594 int error = vfs_lstat(filename, &stat); 595 596 if (!error) 597 error = cp_new_stat64(&stat, statbuf); 598 599 return error; 600} 601 602SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 603{ 604 struct kstat stat; 605 int error = vfs_fstat(fd, &stat); 606 607 if (!error) 608 error = cp_new_stat64(&stat, statbuf); 609 610 return error; 611} 612 613SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 614 struct stat64 __user *, statbuf, int, flag) 615{ 616 struct kstat stat; 617 int error; 618 619 error = vfs_fstatat(dfd, filename, &stat, flag); 620 if (error) 621 return error; 622 return cp_new_stat64(&stat, statbuf); 623} 624#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 625 626static noinline_for_stack int 627cp_statx(const struct kstat *stat, struct statx __user *buffer) 628{ 629 struct statx tmp; 630 631 memset(&tmp, 0, sizeof(tmp)); 632 633 /* STATX_CHANGE_COOKIE is kernel-only for now */ 634 tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; 635 tmp.stx_blksize = stat->blksize; 636 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ 637 tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; 638 tmp.stx_nlink = stat->nlink; 639 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 640 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 641 tmp.stx_mode = stat->mode; 642 tmp.stx_ino = stat->ino; 643 tmp.stx_size = stat->size; 644 tmp.stx_blocks = stat->blocks; 645 tmp.stx_attributes_mask = stat->attributes_mask; 646 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 647 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 648 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 649 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 650 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 651 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 652 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 653 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 654 tmp.stx_rdev_major = MAJOR(stat->rdev); 655 tmp.stx_rdev_minor = MINOR(stat->rdev); 656 tmp.stx_dev_major = MAJOR(stat->dev); 657 tmp.stx_dev_minor = MINOR(stat->dev); 658 tmp.stx_mnt_id = stat->mnt_id; 659 tmp.stx_dio_mem_align = stat->dio_mem_align; 660 tmp.stx_dio_offset_align = stat->dio_offset_align; 661 662 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 663} 664 665int do_statx(int dfd, struct filename *filename, unsigned int flags, 666 unsigned int mask, struct statx __user *buffer) 667{ 668 struct kstat stat; 669 int error; 670 671 if (mask & STATX__RESERVED) 672 return -EINVAL; 673 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 674 return -EINVAL; 675 676 /* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 677 * from userland. 678 */ 679 mask &= ~STATX_CHANGE_COOKIE; 680 681 error = vfs_statx(dfd, filename, flags, &stat, mask); 682 if (error) 683 return error; 684 685 return cp_statx(&stat, buffer); 686} 687 688/** 689 * sys_statx - System call to get enhanced stats 690 * @dfd: Base directory to pathwalk from *or* fd to stat. 691 * @filename: File to stat or "" with AT_EMPTY_PATH 692 * @flags: AT_* flags to control pathwalk. 693 * @mask: Parts of statx struct actually required. 694 * @buffer: Result buffer. 695 * 696 * Note that fstat() can be emulated by setting dfd to the fd of interest, 697 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 698 */ 699SYSCALL_DEFINE5(statx, 700 int, dfd, const char __user *, filename, unsigned, flags, 701 unsigned int, mask, 702 struct statx __user *, buffer) 703{ 704 int ret; 705 struct filename *name; 706 707 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 708 ret = do_statx(dfd, name, flags, mask, buffer); 709 putname(name); 710 711 return ret; 712} 713 714#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 715static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 716{ 717 struct compat_stat tmp; 718 719 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 720 return -EOVERFLOW; 721 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 722 return -EOVERFLOW; 723 724 memset(&tmp, 0, sizeof(tmp)); 725 tmp.st_dev = new_encode_dev(stat->dev); 726 tmp.st_ino = stat->ino; 727 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 728 return -EOVERFLOW; 729 tmp.st_mode = stat->mode; 730 tmp.st_nlink = stat->nlink; 731 if (tmp.st_nlink != stat->nlink) 732 return -EOVERFLOW; 733 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 734 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 735 tmp.st_rdev = new_encode_dev(stat->rdev); 736 if ((u64) stat->size > MAX_NON_LFS) 737 return -EOVERFLOW; 738 tmp.st_size = stat->size; 739 tmp.st_atime = stat->atime.tv_sec; 740 tmp.st_atime_nsec = stat->atime.tv_nsec; 741 tmp.st_mtime = stat->mtime.tv_sec; 742 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 743 tmp.st_ctime = stat->ctime.tv_sec; 744 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 745 tmp.st_blocks = stat->blocks; 746 tmp.st_blksize = stat->blksize; 747 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 748} 749 750COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 751 struct compat_stat __user *, statbuf) 752{ 753 struct kstat stat; 754 int error; 755 756 error = vfs_stat(filename, &stat); 757 if (error) 758 return error; 759 return cp_compat_stat(&stat, statbuf); 760} 761 762COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 763 struct compat_stat __user *, statbuf) 764{ 765 struct kstat stat; 766 int error; 767 768 error = vfs_lstat(filename, &stat); 769 if (error) 770 return error; 771 return cp_compat_stat(&stat, statbuf); 772} 773 774#ifndef __ARCH_WANT_STAT64 775COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 776 const char __user *, filename, 777 struct compat_stat __user *, statbuf, int, flag) 778{ 779 struct kstat stat; 780 int error; 781 782 error = vfs_fstatat(dfd, filename, &stat, flag); 783 if (error) 784 return error; 785 return cp_compat_stat(&stat, statbuf); 786} 787#endif 788 789COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 790 struct compat_stat __user *, statbuf) 791{ 792 struct kstat stat; 793 int error = vfs_fstat(fd, &stat); 794 795 if (!error) 796 error = cp_compat_stat(&stat, statbuf); 797 return error; 798} 799#endif 800 801/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 802void __inode_add_bytes(struct inode *inode, loff_t bytes) 803{ 804 inode->i_blocks += bytes >> 9; 805 bytes &= 511; 806 inode->i_bytes += bytes; 807 if (inode->i_bytes >= 512) { 808 inode->i_blocks++; 809 inode->i_bytes -= 512; 810 } 811} 812EXPORT_SYMBOL(__inode_add_bytes); 813 814void inode_add_bytes(struct inode *inode, loff_t bytes) 815{ 816 spin_lock(&inode->i_lock); 817 __inode_add_bytes(inode, bytes); 818 spin_unlock(&inode->i_lock); 819} 820 821EXPORT_SYMBOL(inode_add_bytes); 822 823void __inode_sub_bytes(struct inode *inode, loff_t bytes) 824{ 825 inode->i_blocks -= bytes >> 9; 826 bytes &= 511; 827 if (inode->i_bytes < bytes) { 828 inode->i_blocks--; 829 inode->i_bytes += 512; 830 } 831 inode->i_bytes -= bytes; 832} 833 834EXPORT_SYMBOL(__inode_sub_bytes); 835 836void inode_sub_bytes(struct inode *inode, loff_t bytes) 837{ 838 spin_lock(&inode->i_lock); 839 __inode_sub_bytes(inode, bytes); 840 spin_unlock(&inode->i_lock); 841} 842 843EXPORT_SYMBOL(inode_sub_bytes); 844 845loff_t inode_get_bytes(struct inode *inode) 846{ 847 loff_t ret; 848 849 spin_lock(&inode->i_lock); 850 ret = __inode_get_bytes(inode); 851 spin_unlock(&inode->i_lock); 852 return ret; 853} 854 855EXPORT_SYMBOL(inode_get_bytes); 856 857void inode_set_bytes(struct inode *inode, loff_t bytes) 858{ 859 /* Caller is here responsible for sufficient locking 860 * (ie. inode->i_lock) */ 861 inode->i_blocks = bytes >> 9; 862 inode->i_bytes = bytes & 511; 863} 864 865EXPORT_SYMBOL(inode_set_bytes); 866