stat.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/stat.c
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. */
  7. #include <linux/blkdev.h>
  8. #include <linux/export.h>
  9. #include <linux/mm.h>
  10. #include <linux/errno.h>
  11. #include <linux/file.h>
  12. #include <linux/highuid.h>
  13. #include <linux/fs.h>
  14. #include <linux/namei.h>
  15. #include <linux/security.h>
  16. #include <linux/cred.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/compat.h>
  20. #include <linux/iversion.h>
  21. #include <linux/uaccess.h>
  22. #include <asm/unistd.h>
  23. #include "internal.h"
  24. #include "mount.h"
  25. /**
  26. * generic_fillattr - Fill in the basic attributes from the inode struct
  27. * @idmap: idmap of the mount the inode was found from
  28. * @request_mask: statx request_mask
  29. * @inode: Inode to use as the source
  30. * @stat: Where to fill in the attributes
  31. *
  32. * Fill in the basic attributes in the kstat structure from data that's to be
  33. * found on the VFS inode structure. This is the default if no getattr inode
  34. * operation is supplied.
  35. *
  36. * If the inode has been found through an idmapped mount the idmap of
  37. * the vfsmount must be passed through @idmap. This function will then
  38. * take care to map the inode according to @idmap before filling in the
  39. * uid and gid filds. On non-idmapped mounts or if permission checking is to be
  40. * performed on the raw inode simply pass @nop_mnt_idmap.
  41. */
  42. void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask,
  43. struct inode *inode, struct kstat *stat)
  44. {
  45. vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
  46. vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
  47. stat->dev = inode->i_sb->s_dev;
  48. stat->ino = inode->i_ino;
  49. stat->mode = inode->i_mode;
  50. stat->nlink = inode->i_nlink;
  51. stat->uid = vfsuid_into_kuid(vfsuid);
  52. stat->gid = vfsgid_into_kgid(vfsgid);
  53. stat->rdev = inode->i_rdev;
  54. stat->size = i_size_read(inode);
  55. stat->atime = inode_get_atime(inode);
  56. stat->mtime = inode_get_mtime(inode);
  57. stat->ctime = inode_get_ctime(inode);
  58. stat->blksize = i_blocksize(inode);
  59. stat->blocks = inode->i_blocks;
  60. if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
  61. stat->result_mask |= STATX_CHANGE_COOKIE;
  62. stat->change_cookie = inode_query_iversion(inode);
  63. }
  64. }
  65. EXPORT_SYMBOL(generic_fillattr);
  66. /**
  67. * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
  68. * @inode: Inode to use as the source
  69. * @stat: Where to fill in the attribute flags
  70. *
  71. * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
  72. * inode that are published on i_flags and enforced by the VFS.
  73. */
  74. void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
  75. {
  76. if (inode->i_flags & S_IMMUTABLE)
  77. stat->attributes |= STATX_ATTR_IMMUTABLE;
  78. if (inode->i_flags & S_APPEND)
  79. stat->attributes |= STATX_ATTR_APPEND;
  80. stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS;
  81. }
  82. EXPORT_SYMBOL(generic_fill_statx_attr);
  83. /**
  84. * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes
  85. * @stat: Where to fill in the attribute flags
  86. * @unit_min: Minimum supported atomic write length in bytes
  87. * @unit_max: Maximum supported atomic write length in bytes
  88. *
  89. * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from
  90. * atomic write unit_min and unit_max values.
  91. */
  92. void generic_fill_statx_atomic_writes(struct kstat *stat,
  93. unsigned int unit_min,
  94. unsigned int unit_max)
  95. {
  96. /* Confirm that the request type is known */
  97. stat->result_mask |= STATX_WRITE_ATOMIC;
  98. /* Confirm that the file attribute type is known */
  99. stat->attributes_mask |= STATX_ATTR_WRITE_ATOMIC;
  100. if (unit_min) {
  101. stat->atomic_write_unit_min = unit_min;
  102. stat->atomic_write_unit_max = unit_max;
  103. /* Initially only allow 1x segment */
  104. stat->atomic_write_segments_max = 1;
  105. /* Confirm atomic writes are actually supported */
  106. stat->attributes |= STATX_ATTR_WRITE_ATOMIC;
  107. }
  108. }
  109. EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes);
  110. /**
  111. * vfs_getattr_nosec - getattr without security checks
  112. * @path: file to get attributes from
  113. * @stat: structure to return attributes in
  114. * @request_mask: STATX_xxx flags indicating what the caller wants
  115. * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
  116. *
  117. * Get attributes without calling security_inode_getattr.
  118. *
  119. * Currently the only caller other than vfs_getattr is internal to the
  120. * filehandle lookup code, which uses only the inode number and returns no
  121. * attributes to any user. Any other code probably wants vfs_getattr.
  122. */
  123. int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
  124. u32 request_mask, unsigned int query_flags)
  125. {
  126. struct mnt_idmap *idmap;
  127. struct inode *inode = d_backing_inode(path->dentry);
  128. memset(stat, 0, sizeof(*stat));
  129. stat->result_mask |= STATX_BASIC_STATS;
  130. query_flags &= AT_STATX_SYNC_TYPE;
  131. /* allow the fs to override these if it really wants to */
  132. /* SB_NOATIME means filesystem supplies dummy atime value */
  133. if (inode->i_sb->s_flags & SB_NOATIME)
  134. stat->result_mask &= ~STATX_ATIME;
  135. /*
  136. * Note: If you add another clause to set an attribute flag, please
  137. * update attributes_mask below.
  138. */
  139. if (IS_AUTOMOUNT(inode))
  140. stat->attributes |= STATX_ATTR_AUTOMOUNT;
  141. if (IS_DAX(inode))
  142. stat->attributes |= STATX_ATTR_DAX;
  143. stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
  144. STATX_ATTR_DAX);
  145. idmap = mnt_idmap(path->mnt);
  146. if (inode->i_op->getattr)
  147. return inode->i_op->getattr(idmap, path, stat,
  148. request_mask,
  149. query_flags | AT_GETATTR_NOSEC);
  150. generic_fillattr(idmap, request_mask, inode, stat);
  151. return 0;
  152. }
  153. EXPORT_SYMBOL(vfs_getattr_nosec);
  154. /*
  155. * vfs_getattr - Get the enhanced basic attributes of a file
  156. * @path: The file of interest
  157. * @stat: Where to return the statistics
  158. * @request_mask: STATX_xxx flags indicating what the caller wants
  159. * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
  160. *
  161. * Ask the filesystem for a file's attributes. The caller must indicate in
  162. * request_mask and query_flags to indicate what they want.
  163. *
  164. * If the file is remote, the filesystem can be forced to update the attributes
  165. * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
  166. * suppress the update by passing AT_STATX_DONT_SYNC.
  167. *
  168. * Bits must have been set in request_mask to indicate which attributes the
  169. * caller wants retrieving. Any such attribute not requested may be returned
  170. * anyway, but the value may be approximate, and, if remote, may not have been
  171. * synchronised with the server.
  172. *
  173. * 0 will be returned on success, and a -ve error code if unsuccessful.
  174. */
  175. int vfs_getattr(const struct path *path, struct kstat *stat,
  176. u32 request_mask, unsigned int query_flags)
  177. {
  178. int retval;
  179. if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC))
  180. return -EPERM;
  181. retval = security_inode_getattr(path);
  182. if (retval)
  183. return retval;
  184. return vfs_getattr_nosec(path, stat, request_mask, query_flags);
  185. }
  186. EXPORT_SYMBOL(vfs_getattr);
  187. /**
  188. * vfs_fstat - Get the basic attributes by file descriptor
  189. * @fd: The file descriptor referring to the file of interest
  190. * @stat: The result structure to fill in.
  191. *
  192. * This function is a wrapper around vfs_getattr(). The main difference is
  193. * that it uses a file descriptor to determine the file location.
  194. *
  195. * 0 will be returned on success, and a -ve error code if unsuccessful.
  196. */
  197. int vfs_fstat(int fd, struct kstat *stat)
  198. {
  199. struct fd f;
  200. int error;
  201. f = fdget_raw(fd);
  202. if (!fd_file(f))
  203. return -EBADF;
  204. error = vfs_getattr(&fd_file(f)->f_path, stat, STATX_BASIC_STATS, 0);
  205. fdput(f);
  206. return error;
  207. }
  208. int getname_statx_lookup_flags(int flags)
  209. {
  210. int lookup_flags = 0;
  211. if (!(flags & AT_SYMLINK_NOFOLLOW))
  212. lookup_flags |= LOOKUP_FOLLOW;
  213. if (!(flags & AT_NO_AUTOMOUNT))
  214. lookup_flags |= LOOKUP_AUTOMOUNT;
  215. if (flags & AT_EMPTY_PATH)
  216. lookup_flags |= LOOKUP_EMPTY;
  217. return lookup_flags;
  218. }
  219. static int vfs_statx_path(struct path *path, int flags, struct kstat *stat,
  220. u32 request_mask)
  221. {
  222. int error = vfs_getattr(path, stat, request_mask, flags);
  223. if (request_mask & STATX_MNT_ID_UNIQUE) {
  224. stat->mnt_id = real_mount(path->mnt)->mnt_id_unique;
  225. stat->result_mask |= STATX_MNT_ID_UNIQUE;
  226. } else {
  227. stat->mnt_id = real_mount(path->mnt)->mnt_id;
  228. stat->result_mask |= STATX_MNT_ID;
  229. }
  230. if (path_mounted(path))
  231. stat->attributes |= STATX_ATTR_MOUNT_ROOT;
  232. stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
  233. /*
  234. * If this is a block device inode, override the filesystem
  235. * attributes with the block device specific parameters that need to be
  236. * obtained from the bdev backing inode.
  237. */
  238. if (S_ISBLK(stat->mode))
  239. bdev_statx(path, stat, request_mask);
  240. return error;
  241. }
  242. static int vfs_statx_fd(int fd, int flags, struct kstat *stat,
  243. u32 request_mask)
  244. {
  245. CLASS(fd_raw, f)(fd);
  246. if (!fd_file(f))
  247. return -EBADF;
  248. return vfs_statx_path(&fd_file(f)->f_path, flags, stat, request_mask);
  249. }
  250. /**
  251. * vfs_statx - Get basic and extra attributes by filename
  252. * @dfd: A file descriptor representing the base dir for a relative filename
  253. * @filename: The name of the file of interest
  254. * @flags: Flags to control the query
  255. * @stat: The result structure to fill in.
  256. * @request_mask: STATX_xxx flags indicating what the caller wants
  257. *
  258. * This function is a wrapper around vfs_getattr(). The main difference is
  259. * that it uses a filename and base directory to determine the file location.
  260. * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
  261. * at the given name from being referenced.
  262. *
  263. * 0 will be returned on success, and a -ve error code if unsuccessful.
  264. */
  265. static int vfs_statx(int dfd, struct filename *filename, int flags,
  266. struct kstat *stat, u32 request_mask)
  267. {
  268. struct path path;
  269. unsigned int lookup_flags = getname_statx_lookup_flags(flags);
  270. int error;
  271. if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
  272. AT_STATX_SYNC_TYPE))
  273. return -EINVAL;
  274. retry:
  275. error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
  276. if (error)
  277. return error;
  278. error = vfs_statx_path(&path, flags, stat, request_mask);
  279. path_put(&path);
  280. if (retry_estale(error, lookup_flags)) {
  281. lookup_flags |= LOOKUP_REVAL;
  282. goto retry;
  283. }
  284. return error;
  285. }
  286. int vfs_fstatat(int dfd, const char __user *filename,
  287. struct kstat *stat, int flags)
  288. {
  289. int ret;
  290. int statx_flags = flags | AT_NO_AUTOMOUNT;
  291. struct filename *name;
  292. /*
  293. * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH)
  294. *
  295. * If AT_EMPTY_PATH is set, we expect the common case to be that
  296. * empty path, and avoid doing all the extra pathname work.
  297. */
  298. if (flags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename))
  299. return vfs_fstat(dfd, stat);
  300. name = getname_flags(filename, getname_statx_lookup_flags(statx_flags));
  301. ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
  302. putname(name);
  303. return ret;
  304. }
  305. #ifdef __ARCH_WANT_OLD_STAT
  306. /*
  307. * For backward compatibility? Maybe this should be moved
  308. * into arch/i386 instead?
  309. */
  310. static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
  311. {
  312. static int warncount = 5;
  313. struct __old_kernel_stat tmp;
  314. if (warncount > 0) {
  315. warncount--;
  316. printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
  317. current->comm);
  318. } else if (warncount < 0) {
  319. /* it's laughable, but... */
  320. warncount = 0;
  321. }
  322. memset(&tmp, 0, sizeof(struct __old_kernel_stat));
  323. tmp.st_dev = old_encode_dev(stat->dev);
  324. tmp.st_ino = stat->ino;
  325. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  326. return -EOVERFLOW;
  327. tmp.st_mode = stat->mode;
  328. tmp.st_nlink = stat->nlink;
  329. if (tmp.st_nlink != stat->nlink)
  330. return -EOVERFLOW;
  331. SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
  332. SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
  333. tmp.st_rdev = old_encode_dev(stat->rdev);
  334. #if BITS_PER_LONG == 32
  335. if (stat->size > MAX_NON_LFS)
  336. return -EOVERFLOW;
  337. #endif
  338. tmp.st_size = stat->size;
  339. tmp.st_atime = stat->atime.tv_sec;
  340. tmp.st_mtime = stat->mtime.tv_sec;
  341. tmp.st_ctime = stat->ctime.tv_sec;
  342. return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
  343. }
  344. SYSCALL_DEFINE2(stat, const char __user *, filename,
  345. struct __old_kernel_stat __user *, statbuf)
  346. {
  347. struct kstat stat;
  348. int error;
  349. error = vfs_stat(filename, &stat);
  350. if (error)
  351. return error;
  352. return cp_old_stat(&stat, statbuf);
  353. }
  354. SYSCALL_DEFINE2(lstat, const char __user *, filename,
  355. struct __old_kernel_stat __user *, statbuf)
  356. {
  357. struct kstat stat;
  358. int error;
  359. error = vfs_lstat(filename, &stat);
  360. if (error)
  361. return error;
  362. return cp_old_stat(&stat, statbuf);
  363. }
  364. SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
  365. {
  366. struct kstat stat;
  367. int error = vfs_fstat(fd, &stat);
  368. if (!error)
  369. error = cp_old_stat(&stat, statbuf);
  370. return error;
  371. }
  372. #endif /* __ARCH_WANT_OLD_STAT */
  373. #ifdef __ARCH_WANT_NEW_STAT
  374. #ifndef INIT_STRUCT_STAT_PADDING
  375. # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
  376. #endif
  377. static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
  378. {
  379. struct stat tmp;
  380. if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
  381. return -EOVERFLOW;
  382. if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
  383. return -EOVERFLOW;
  384. #if BITS_PER_LONG == 32
  385. if (stat->size > MAX_NON_LFS)
  386. return -EOVERFLOW;
  387. #endif
  388. INIT_STRUCT_STAT_PADDING(tmp);
  389. tmp.st_dev = new_encode_dev(stat->dev);
  390. tmp.st_ino = stat->ino;
  391. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  392. return -EOVERFLOW;
  393. tmp.st_mode = stat->mode;
  394. tmp.st_nlink = stat->nlink;
  395. if (tmp.st_nlink != stat->nlink)
  396. return -EOVERFLOW;
  397. SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
  398. SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
  399. tmp.st_rdev = new_encode_dev(stat->rdev);
  400. tmp.st_size = stat->size;
  401. tmp.st_atime = stat->atime.tv_sec;
  402. tmp.st_mtime = stat->mtime.tv_sec;
  403. tmp.st_ctime = stat->ctime.tv_sec;
  404. #ifdef STAT_HAVE_NSEC
  405. tmp.st_atime_nsec = stat->atime.tv_nsec;
  406. tmp.st_mtime_nsec = stat->mtime.tv_nsec;
  407. tmp.st_ctime_nsec = stat->ctime.tv_nsec;
  408. #endif
  409. tmp.st_blocks = stat->blocks;
  410. tmp.st_blksize = stat->blksize;
  411. return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
  412. }
  413. SYSCALL_DEFINE2(newstat, const char __user *, filename,
  414. struct stat __user *, statbuf)
  415. {
  416. struct kstat stat;
  417. int error = vfs_stat(filename, &stat);
  418. if (error)
  419. return error;
  420. return cp_new_stat(&stat, statbuf);
  421. }
  422. SYSCALL_DEFINE2(newlstat, const char __user *, filename,
  423. struct stat __user *, statbuf)
  424. {
  425. struct kstat stat;
  426. int error;
  427. error = vfs_lstat(filename, &stat);
  428. if (error)
  429. return error;
  430. return cp_new_stat(&stat, statbuf);
  431. }
  432. #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
  433. SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
  434. struct stat __user *, statbuf, int, flag)
  435. {
  436. struct kstat stat;
  437. int error;
  438. error = vfs_fstatat(dfd, filename, &stat, flag);
  439. if (error)
  440. return error;
  441. return cp_new_stat(&stat, statbuf);
  442. }
  443. #endif
  444. SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
  445. {
  446. struct kstat stat;
  447. int error = vfs_fstat(fd, &stat);
  448. if (!error)
  449. error = cp_new_stat(&stat, statbuf);
  450. return error;
  451. }
  452. #endif
  453. static int do_readlinkat(int dfd, const char __user *pathname,
  454. char __user *buf, int bufsiz)
  455. {
  456. struct path path;
  457. struct filename *name;
  458. int error;
  459. unsigned int lookup_flags = LOOKUP_EMPTY;
  460. if (bufsiz <= 0)
  461. return -EINVAL;
  462. retry:
  463. name = getname_flags(pathname, lookup_flags);
  464. error = filename_lookup(dfd, name, lookup_flags, &path, NULL);
  465. if (unlikely(error)) {
  466. putname(name);
  467. return error;
  468. }
  469. /*
  470. * AFS mountpoints allow readlink(2) but are not symlinks
  471. */
  472. if (d_is_symlink(path.dentry) ||
  473. d_backing_inode(path.dentry)->i_op->readlink) {
  474. error = security_inode_readlink(path.dentry);
  475. if (!error) {
  476. touch_atime(&path);
  477. error = vfs_readlink(path.dentry, buf, bufsiz);
  478. }
  479. } else {
  480. error = (name->name[0] == '\0') ? -ENOENT : -EINVAL;
  481. }
  482. path_put(&path);
  483. putname(name);
  484. if (retry_estale(error, lookup_flags)) {
  485. lookup_flags |= LOOKUP_REVAL;
  486. goto retry;
  487. }
  488. return error;
  489. }
  490. SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
  491. char __user *, buf, int, bufsiz)
  492. {
  493. return do_readlinkat(dfd, pathname, buf, bufsiz);
  494. }
  495. SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
  496. int, bufsiz)
  497. {
  498. return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
  499. }
  500. /* ---------- LFS-64 ----------- */
  501. #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
  502. #ifndef INIT_STRUCT_STAT64_PADDING
  503. # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
  504. #endif
  505. static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
  506. {
  507. struct stat64 tmp;
  508. INIT_STRUCT_STAT64_PADDING(tmp);
  509. #ifdef CONFIG_MIPS
  510. /* mips has weird padding, so we don't get 64 bits there */
  511. tmp.st_dev = new_encode_dev(stat->dev);
  512. tmp.st_rdev = new_encode_dev(stat->rdev);
  513. #else
  514. tmp.st_dev = huge_encode_dev(stat->dev);
  515. tmp.st_rdev = huge_encode_dev(stat->rdev);
  516. #endif
  517. tmp.st_ino = stat->ino;
  518. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  519. return -EOVERFLOW;
  520. #ifdef STAT64_HAS_BROKEN_ST_INO
  521. tmp.__st_ino = stat->ino;
  522. #endif
  523. tmp.st_mode = stat->mode;
  524. tmp.st_nlink = stat->nlink;
  525. tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
  526. tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
  527. tmp.st_atime = stat->atime.tv_sec;
  528. tmp.st_atime_nsec = stat->atime.tv_nsec;
  529. tmp.st_mtime = stat->mtime.tv_sec;
  530. tmp.st_mtime_nsec = stat->mtime.tv_nsec;
  531. tmp.st_ctime = stat->ctime.tv_sec;
  532. tmp.st_ctime_nsec = stat->ctime.tv_nsec;
  533. tmp.st_size = stat->size;
  534. tmp.st_blocks = stat->blocks;
  535. tmp.st_blksize = stat->blksize;
  536. return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
  537. }
  538. SYSCALL_DEFINE2(stat64, const char __user *, filename,
  539. struct stat64 __user *, statbuf)
  540. {
  541. struct kstat stat;
  542. int error = vfs_stat(filename, &stat);
  543. if (!error)
  544. error = cp_new_stat64(&stat, statbuf);
  545. return error;
  546. }
  547. SYSCALL_DEFINE2(lstat64, const char __user *, filename,
  548. struct stat64 __user *, statbuf)
  549. {
  550. struct kstat stat;
  551. int error = vfs_lstat(filename, &stat);
  552. if (!error)
  553. error = cp_new_stat64(&stat, statbuf);
  554. return error;
  555. }
  556. SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
  557. {
  558. struct kstat stat;
  559. int error = vfs_fstat(fd, &stat);
  560. if (!error)
  561. error = cp_new_stat64(&stat, statbuf);
  562. return error;
  563. }
  564. SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
  565. struct stat64 __user *, statbuf, int, flag)
  566. {
  567. struct kstat stat;
  568. int error;
  569. error = vfs_fstatat(dfd, filename, &stat, flag);
  570. if (error)
  571. return error;
  572. return cp_new_stat64(&stat, statbuf);
  573. }
  574. #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
  575. static noinline_for_stack int
  576. cp_statx(const struct kstat *stat, struct statx __user *buffer)
  577. {
  578. struct statx tmp;
  579. memset(&tmp, 0, sizeof(tmp));
  580. /* STATX_CHANGE_COOKIE is kernel-only for now */
  581. tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE;
  582. tmp.stx_blksize = stat->blksize;
  583. /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */
  584. tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC;
  585. tmp.stx_nlink = stat->nlink;
  586. tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
  587. tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
  588. tmp.stx_mode = stat->mode;
  589. tmp.stx_ino = stat->ino;
  590. tmp.stx_size = stat->size;
  591. tmp.stx_blocks = stat->blocks;
  592. tmp.stx_attributes_mask = stat->attributes_mask;
  593. tmp.stx_atime.tv_sec = stat->atime.tv_sec;
  594. tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
  595. tmp.stx_btime.tv_sec = stat->btime.tv_sec;
  596. tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
  597. tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
  598. tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
  599. tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
  600. tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
  601. tmp.stx_rdev_major = MAJOR(stat->rdev);
  602. tmp.stx_rdev_minor = MINOR(stat->rdev);
  603. tmp.stx_dev_major = MAJOR(stat->dev);
  604. tmp.stx_dev_minor = MINOR(stat->dev);
  605. tmp.stx_mnt_id = stat->mnt_id;
  606. tmp.stx_dio_mem_align = stat->dio_mem_align;
  607. tmp.stx_dio_offset_align = stat->dio_offset_align;
  608. tmp.stx_subvol = stat->subvol;
  609. tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min;
  610. tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max;
  611. tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max;
  612. return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
  613. }
  614. int do_statx(int dfd, struct filename *filename, unsigned int flags,
  615. unsigned int mask, struct statx __user *buffer)
  616. {
  617. struct kstat stat;
  618. int error;
  619. if (mask & STATX__RESERVED)
  620. return -EINVAL;
  621. if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
  622. return -EINVAL;
  623. /*
  624. * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
  625. * from userland.
  626. */
  627. mask &= ~STATX_CHANGE_COOKIE;
  628. error = vfs_statx(dfd, filename, flags, &stat, mask);
  629. if (error)
  630. return error;
  631. return cp_statx(&stat, buffer);
  632. }
  633. int do_statx_fd(int fd, unsigned int flags, unsigned int mask,
  634. struct statx __user *buffer)
  635. {
  636. struct kstat stat;
  637. int error;
  638. if (mask & STATX__RESERVED)
  639. return -EINVAL;
  640. if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
  641. return -EINVAL;
  642. /*
  643. * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
  644. * from userland.
  645. */
  646. mask &= ~STATX_CHANGE_COOKIE;
  647. error = vfs_statx_fd(fd, flags, &stat, mask);
  648. if (error)
  649. return error;
  650. return cp_statx(&stat, buffer);
  651. }
  652. /**
  653. * sys_statx - System call to get enhanced stats
  654. * @dfd: Base directory to pathwalk from *or* fd to stat.
  655. * @filename: File to stat or either NULL or "" with AT_EMPTY_PATH
  656. * @flags: AT_* flags to control pathwalk.
  657. * @mask: Parts of statx struct actually required.
  658. * @buffer: Result buffer.
  659. *
  660. * Note that fstat() can be emulated by setting dfd to the fd of interest,
  661. * supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH
  662. * in the flags.
  663. */
  664. SYSCALL_DEFINE5(statx,
  665. int, dfd, const char __user *, filename, unsigned, flags,
  666. unsigned int, mask,
  667. struct statx __user *, buffer)
  668. {
  669. int ret;
  670. unsigned lflags;
  671. struct filename *name;
  672. /*
  673. * Short-circuit handling of NULL and "" paths.
  674. *
  675. * For a NULL path we require and accept only the AT_EMPTY_PATH flag
  676. * (possibly |'d with AT_STATX flags).
  677. *
  678. * However, glibc on 32-bit architectures implements fstatat as statx
  679. * with the "" pathname and AT_NO_AUTOMOUNT | AT_EMPTY_PATH flags.
  680. * Supporting this results in the uglification below.
  681. */
  682. lflags = flags & ~(AT_NO_AUTOMOUNT | AT_STATX_SYNC_TYPE);
  683. if (lflags == AT_EMPTY_PATH && vfs_empty_path(dfd, filename))
  684. return do_statx_fd(dfd, flags & ~AT_NO_AUTOMOUNT, mask, buffer);
  685. name = getname_flags(filename, getname_statx_lookup_flags(flags));
  686. ret = do_statx(dfd, name, flags, mask, buffer);
  687. putname(name);
  688. return ret;
  689. }
  690. #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
  691. static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
  692. {
  693. struct compat_stat tmp;
  694. if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
  695. return -EOVERFLOW;
  696. if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
  697. return -EOVERFLOW;
  698. memset(&tmp, 0, sizeof(tmp));
  699. tmp.st_dev = new_encode_dev(stat->dev);
  700. tmp.st_ino = stat->ino;
  701. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  702. return -EOVERFLOW;
  703. tmp.st_mode = stat->mode;
  704. tmp.st_nlink = stat->nlink;
  705. if (tmp.st_nlink != stat->nlink)
  706. return -EOVERFLOW;
  707. SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
  708. SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
  709. tmp.st_rdev = new_encode_dev(stat->rdev);
  710. if ((u64) stat->size > MAX_NON_LFS)
  711. return -EOVERFLOW;
  712. tmp.st_size = stat->size;
  713. tmp.st_atime = stat->atime.tv_sec;
  714. tmp.st_atime_nsec = stat->atime.tv_nsec;
  715. tmp.st_mtime = stat->mtime.tv_sec;
  716. tmp.st_mtime_nsec = stat->mtime.tv_nsec;
  717. tmp.st_ctime = stat->ctime.tv_sec;
  718. tmp.st_ctime_nsec = stat->ctime.tv_nsec;
  719. tmp.st_blocks = stat->blocks;
  720. tmp.st_blksize = stat->blksize;
  721. return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
  722. }
  723. COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
  724. struct compat_stat __user *, statbuf)
  725. {
  726. struct kstat stat;
  727. int error;
  728. error = vfs_stat(filename, &stat);
  729. if (error)
  730. return error;
  731. return cp_compat_stat(&stat, statbuf);
  732. }
  733. COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
  734. struct compat_stat __user *, statbuf)
  735. {
  736. struct kstat stat;
  737. int error;
  738. error = vfs_lstat(filename, &stat);
  739. if (error)
  740. return error;
  741. return cp_compat_stat(&stat, statbuf);
  742. }
  743. #ifndef __ARCH_WANT_STAT64
  744. COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
  745. const char __user *, filename,
  746. struct compat_stat __user *, statbuf, int, flag)
  747. {
  748. struct kstat stat;
  749. int error;
  750. error = vfs_fstatat(dfd, filename, &stat, flag);
  751. if (error)
  752. return error;
  753. return cp_compat_stat(&stat, statbuf);
  754. }
  755. #endif
  756. COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
  757. struct compat_stat __user *, statbuf)
  758. {
  759. struct kstat stat;
  760. int error = vfs_fstat(fd, &stat);
  761. if (!error)
  762. error = cp_compat_stat(&stat, statbuf);
  763. return error;
  764. }
  765. #endif
  766. /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
  767. void __inode_add_bytes(struct inode *inode, loff_t bytes)
  768. {
  769. inode->i_blocks += bytes >> 9;
  770. bytes &= 511;
  771. inode->i_bytes += bytes;
  772. if (inode->i_bytes >= 512) {
  773. inode->i_blocks++;
  774. inode->i_bytes -= 512;
  775. }
  776. }
  777. EXPORT_SYMBOL(__inode_add_bytes);
  778. void inode_add_bytes(struct inode *inode, loff_t bytes)
  779. {
  780. spin_lock(&inode->i_lock);
  781. __inode_add_bytes(inode, bytes);
  782. spin_unlock(&inode->i_lock);
  783. }
  784. EXPORT_SYMBOL(inode_add_bytes);
  785. void __inode_sub_bytes(struct inode *inode, loff_t bytes)
  786. {
  787. inode->i_blocks -= bytes >> 9;
  788. bytes &= 511;
  789. if (inode->i_bytes < bytes) {
  790. inode->i_blocks--;
  791. inode->i_bytes += 512;
  792. }
  793. inode->i_bytes -= bytes;
  794. }
  795. EXPORT_SYMBOL(__inode_sub_bytes);
  796. void inode_sub_bytes(struct inode *inode, loff_t bytes)
  797. {
  798. spin_lock(&inode->i_lock);
  799. __inode_sub_bytes(inode, bytes);
  800. spin_unlock(&inode->i_lock);
  801. }
  802. EXPORT_SYMBOL(inode_sub_bytes);
  803. loff_t inode_get_bytes(struct inode *inode)
  804. {
  805. loff_t ret;
  806. spin_lock(&inode->i_lock);
  807. ret = __inode_get_bytes(inode);
  808. spin_unlock(&inode->i_lock);
  809. return ret;
  810. }
  811. EXPORT_SYMBOL(inode_get_bytes);
  812. void inode_set_bytes(struct inode *inode, loff_t bytes)
  813. {
  814. /* Caller is here responsible for sufficient locking
  815. * (ie. inode->i_lock) */
  816. inode->i_blocks = bytes >> 9;
  817. inode->i_bytes = bytes & 511;
  818. }
  819. EXPORT_SYMBOL(inode_set_bytes);