read_write.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/read_write.c
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/stat.h>
  9. #include <linux/sched/xacct.h>
  10. #include <linux/fcntl.h>
  11. #include <linux/file.h>
  12. #include <linux/uio.h>
  13. #include <linux/fsnotify.h>
  14. #include <linux/security.h>
  15. #include <linux/export.h>
  16. #include <linux/syscalls.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/splice.h>
  19. #include <linux/compat.h>
  20. #include <linux/mount.h>
  21. #include <linux/fs.h>
  22. #include "internal.h"
  23. #include <linux/uaccess.h>
  24. #include <asm/unistd.h>
  25. const struct file_operations generic_ro_fops = {
  26. .llseek = generic_file_llseek,
  27. .read_iter = generic_file_read_iter,
  28. .mmap = generic_file_readonly_mmap,
  29. .splice_read = filemap_splice_read,
  30. };
  31. EXPORT_SYMBOL(generic_ro_fops);
  32. static inline bool unsigned_offsets(struct file *file)
  33. {
  34. return file->f_op->fop_flags & FOP_UNSIGNED_OFFSET;
  35. }
  36. /**
  37. * vfs_setpos_cookie - update the file offset for lseek and reset cookie
  38. * @file: file structure in question
  39. * @offset: file offset to seek to
  40. * @maxsize: maximum file size
  41. * @cookie: cookie to reset
  42. *
  43. * Update the file offset to the value specified by @offset if the given
  44. * offset is valid and it is not equal to the current file offset and
  45. * reset the specified cookie to indicate that a seek happened.
  46. *
  47. * Return the specified offset on success and -EINVAL on invalid offset.
  48. */
  49. static loff_t vfs_setpos_cookie(struct file *file, loff_t offset,
  50. loff_t maxsize, u64 *cookie)
  51. {
  52. if (offset < 0 && !unsigned_offsets(file))
  53. return -EINVAL;
  54. if (offset > maxsize)
  55. return -EINVAL;
  56. if (offset != file->f_pos) {
  57. file->f_pos = offset;
  58. if (cookie)
  59. *cookie = 0;
  60. }
  61. return offset;
  62. }
  63. /**
  64. * vfs_setpos - update the file offset for lseek
  65. * @file: file structure in question
  66. * @offset: file offset to seek to
  67. * @maxsize: maximum file size
  68. *
  69. * This is a low-level filesystem helper for updating the file offset to
  70. * the value specified by @offset if the given offset is valid and it is
  71. * not equal to the current file offset.
  72. *
  73. * Return the specified offset on success and -EINVAL on invalid offset.
  74. */
  75. loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
  76. {
  77. return vfs_setpos_cookie(file, offset, maxsize, NULL);
  78. }
  79. EXPORT_SYMBOL(vfs_setpos);
  80. /**
  81. * must_set_pos - check whether f_pos has to be updated
  82. * @file: file to seek on
  83. * @offset: offset to use
  84. * @whence: type of seek operation
  85. * @eof: end of file
  86. *
  87. * Check whether f_pos needs to be updated and update @offset according
  88. * to @whence.
  89. *
  90. * Return: 0 if f_pos doesn't need to be updated, 1 if f_pos has to be
  91. * updated, and negative error code on failure.
  92. */
  93. static int must_set_pos(struct file *file, loff_t *offset, int whence, loff_t eof)
  94. {
  95. switch (whence) {
  96. case SEEK_END:
  97. *offset += eof;
  98. break;
  99. case SEEK_CUR:
  100. /*
  101. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  102. * position-querying operation. Avoid rewriting the "same"
  103. * f_pos value back to the file because a concurrent read(),
  104. * write() or lseek() might have altered it
  105. */
  106. if (*offset == 0) {
  107. *offset = file->f_pos;
  108. return 0;
  109. }
  110. break;
  111. case SEEK_DATA:
  112. /*
  113. * In the generic case the entire file is data, so as long as
  114. * offset isn't at the end of the file then the offset is data.
  115. */
  116. if ((unsigned long long)*offset >= eof)
  117. return -ENXIO;
  118. break;
  119. case SEEK_HOLE:
  120. /*
  121. * There is a virtual hole at the end of the file, so as long as
  122. * offset isn't i_size or larger, return i_size.
  123. */
  124. if ((unsigned long long)*offset >= eof)
  125. return -ENXIO;
  126. *offset = eof;
  127. break;
  128. }
  129. return 1;
  130. }
  131. /**
  132. * generic_file_llseek_size - generic llseek implementation for regular files
  133. * @file: file structure to seek on
  134. * @offset: file offset to seek to
  135. * @whence: type of seek
  136. * @maxsize: max size of this file in file system
  137. * @eof: offset used for SEEK_END position
  138. *
  139. * This is a variant of generic_file_llseek that allows passing in a custom
  140. * maximum file size and a custom EOF position, for e.g. hashed directories
  141. *
  142. * Synchronization:
  143. * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
  144. * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
  145. * read/writes behave like SEEK_SET against seeks.
  146. */
  147. loff_t
  148. generic_file_llseek_size(struct file *file, loff_t offset, int whence,
  149. loff_t maxsize, loff_t eof)
  150. {
  151. int ret;
  152. ret = must_set_pos(file, &offset, whence, eof);
  153. if (ret < 0)
  154. return ret;
  155. if (ret == 0)
  156. return offset;
  157. if (whence == SEEK_CUR) {
  158. /*
  159. * f_lock protects against read/modify/write race with
  160. * other SEEK_CURs. Note that parallel writes and reads
  161. * behave like SEEK_SET.
  162. */
  163. guard(spinlock)(&file->f_lock);
  164. return vfs_setpos(file, file->f_pos + offset, maxsize);
  165. }
  166. return vfs_setpos(file, offset, maxsize);
  167. }
  168. EXPORT_SYMBOL(generic_file_llseek_size);
  169. /**
  170. * generic_llseek_cookie - versioned llseek implementation
  171. * @file: file structure to seek on
  172. * @offset: file offset to seek to
  173. * @whence: type of seek
  174. * @cookie: cookie to update
  175. *
  176. * See generic_file_llseek for a general description and locking assumptions.
  177. *
  178. * In contrast to generic_file_llseek, this function also resets a
  179. * specified cookie to indicate a seek took place.
  180. */
  181. loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
  182. u64 *cookie)
  183. {
  184. struct inode *inode = file->f_mapping->host;
  185. loff_t maxsize = inode->i_sb->s_maxbytes;
  186. loff_t eof = i_size_read(inode);
  187. int ret;
  188. if (WARN_ON_ONCE(!cookie))
  189. return -EINVAL;
  190. /*
  191. * Require that this is only used for directories that guarantee
  192. * synchronization between readdir and seek so that an update to
  193. * @cookie is correctly synchronized with concurrent readdir.
  194. */
  195. if (WARN_ON_ONCE(!(file->f_mode & FMODE_ATOMIC_POS)))
  196. return -EINVAL;
  197. ret = must_set_pos(file, &offset, whence, eof);
  198. if (ret < 0)
  199. return ret;
  200. if (ret == 0)
  201. return offset;
  202. /* No need to hold f_lock because we know that f_pos_lock is held. */
  203. if (whence == SEEK_CUR)
  204. return vfs_setpos_cookie(file, file->f_pos + offset, maxsize, cookie);
  205. return vfs_setpos_cookie(file, offset, maxsize, cookie);
  206. }
  207. EXPORT_SYMBOL(generic_llseek_cookie);
  208. /**
  209. * generic_file_llseek - generic llseek implementation for regular files
  210. * @file: file structure to seek on
  211. * @offset: file offset to seek to
  212. * @whence: type of seek
  213. *
  214. * This is a generic implemenation of ->llseek useable for all normal local
  215. * filesystems. It just updates the file offset to the value specified by
  216. * @offset and @whence.
  217. */
  218. loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
  219. {
  220. struct inode *inode = file->f_mapping->host;
  221. return generic_file_llseek_size(file, offset, whence,
  222. inode->i_sb->s_maxbytes,
  223. i_size_read(inode));
  224. }
  225. EXPORT_SYMBOL(generic_file_llseek);
  226. /**
  227. * fixed_size_llseek - llseek implementation for fixed-sized devices
  228. * @file: file structure to seek on
  229. * @offset: file offset to seek to
  230. * @whence: type of seek
  231. * @size: size of the file
  232. *
  233. */
  234. loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
  235. {
  236. switch (whence) {
  237. case SEEK_SET: case SEEK_CUR: case SEEK_END:
  238. return generic_file_llseek_size(file, offset, whence,
  239. size, size);
  240. default:
  241. return -EINVAL;
  242. }
  243. }
  244. EXPORT_SYMBOL(fixed_size_llseek);
  245. /**
  246. * no_seek_end_llseek - llseek implementation for fixed-sized devices
  247. * @file: file structure to seek on
  248. * @offset: file offset to seek to
  249. * @whence: type of seek
  250. *
  251. */
  252. loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
  253. {
  254. switch (whence) {
  255. case SEEK_SET: case SEEK_CUR:
  256. return generic_file_llseek_size(file, offset, whence,
  257. OFFSET_MAX, 0);
  258. default:
  259. return -EINVAL;
  260. }
  261. }
  262. EXPORT_SYMBOL(no_seek_end_llseek);
  263. /**
  264. * no_seek_end_llseek_size - llseek implementation for fixed-sized devices
  265. * @file: file structure to seek on
  266. * @offset: file offset to seek to
  267. * @whence: type of seek
  268. * @size: maximal offset allowed
  269. *
  270. */
  271. loff_t no_seek_end_llseek_size(struct file *file, loff_t offset, int whence, loff_t size)
  272. {
  273. switch (whence) {
  274. case SEEK_SET: case SEEK_CUR:
  275. return generic_file_llseek_size(file, offset, whence,
  276. size, 0);
  277. default:
  278. return -EINVAL;
  279. }
  280. }
  281. EXPORT_SYMBOL(no_seek_end_llseek_size);
  282. /**
  283. * noop_llseek - No Operation Performed llseek implementation
  284. * @file: file structure to seek on
  285. * @offset: file offset to seek to
  286. * @whence: type of seek
  287. *
  288. * This is an implementation of ->llseek useable for the rare special case when
  289. * userspace expects the seek to succeed but the (device) file is actually not
  290. * able to perform the seek. In this case you use noop_llseek() instead of
  291. * falling back to the default implementation of ->llseek.
  292. */
  293. loff_t noop_llseek(struct file *file, loff_t offset, int whence)
  294. {
  295. return file->f_pos;
  296. }
  297. EXPORT_SYMBOL(noop_llseek);
  298. loff_t default_llseek(struct file *file, loff_t offset, int whence)
  299. {
  300. struct inode *inode = file_inode(file);
  301. loff_t retval;
  302. inode_lock(inode);
  303. switch (whence) {
  304. case SEEK_END:
  305. offset += i_size_read(inode);
  306. break;
  307. case SEEK_CUR:
  308. if (offset == 0) {
  309. retval = file->f_pos;
  310. goto out;
  311. }
  312. offset += file->f_pos;
  313. break;
  314. case SEEK_DATA:
  315. /*
  316. * In the generic case the entire file is data, so as
  317. * long as offset isn't at the end of the file then the
  318. * offset is data.
  319. */
  320. if (offset >= inode->i_size) {
  321. retval = -ENXIO;
  322. goto out;
  323. }
  324. break;
  325. case SEEK_HOLE:
  326. /*
  327. * There is a virtual hole at the end of the file, so
  328. * as long as offset isn't i_size or larger, return
  329. * i_size.
  330. */
  331. if (offset >= inode->i_size) {
  332. retval = -ENXIO;
  333. goto out;
  334. }
  335. offset = inode->i_size;
  336. break;
  337. }
  338. retval = -EINVAL;
  339. if (offset >= 0 || unsigned_offsets(file)) {
  340. if (offset != file->f_pos)
  341. file->f_pos = offset;
  342. retval = offset;
  343. }
  344. out:
  345. inode_unlock(inode);
  346. return retval;
  347. }
  348. EXPORT_SYMBOL(default_llseek);
  349. loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
  350. {
  351. if (!(file->f_mode & FMODE_LSEEK))
  352. return -ESPIPE;
  353. return file->f_op->llseek(file, offset, whence);
  354. }
  355. EXPORT_SYMBOL(vfs_llseek);
  356. static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence)
  357. {
  358. off_t retval;
  359. struct fd f = fdget_pos(fd);
  360. if (!fd_file(f))
  361. return -EBADF;
  362. retval = -EINVAL;
  363. if (whence <= SEEK_MAX) {
  364. loff_t res = vfs_llseek(fd_file(f), offset, whence);
  365. retval = res;
  366. if (res != (loff_t)retval)
  367. retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
  368. }
  369. fdput_pos(f);
  370. return retval;
  371. }
  372. SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
  373. {
  374. return ksys_lseek(fd, offset, whence);
  375. }
  376. #ifdef CONFIG_COMPAT
  377. COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
  378. {
  379. return ksys_lseek(fd, offset, whence);
  380. }
  381. #endif
  382. #if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \
  383. defined(__ARCH_WANT_SYS_LLSEEK)
  384. SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
  385. unsigned long, offset_low, loff_t __user *, result,
  386. unsigned int, whence)
  387. {
  388. int retval;
  389. struct fd f = fdget_pos(fd);
  390. loff_t offset;
  391. if (!fd_file(f))
  392. return -EBADF;
  393. retval = -EINVAL;
  394. if (whence > SEEK_MAX)
  395. goto out_putf;
  396. offset = vfs_llseek(fd_file(f), ((loff_t) offset_high << 32) | offset_low,
  397. whence);
  398. retval = (int)offset;
  399. if (offset >= 0) {
  400. retval = -EFAULT;
  401. if (!copy_to_user(result, &offset, sizeof(offset)))
  402. retval = 0;
  403. }
  404. out_putf:
  405. fdput_pos(f);
  406. return retval;
  407. }
  408. #endif
  409. int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
  410. {
  411. int mask = read_write == READ ? MAY_READ : MAY_WRITE;
  412. int ret;
  413. if (unlikely((ssize_t) count < 0))
  414. return -EINVAL;
  415. if (ppos) {
  416. loff_t pos = *ppos;
  417. if (unlikely(pos < 0)) {
  418. if (!unsigned_offsets(file))
  419. return -EINVAL;
  420. if (count >= -pos) /* both values are in 0..LLONG_MAX */
  421. return -EOVERFLOW;
  422. } else if (unlikely((loff_t) (pos + count) < 0)) {
  423. if (!unsigned_offsets(file))
  424. return -EINVAL;
  425. }
  426. }
  427. ret = security_file_permission(file, mask);
  428. if (ret)
  429. return ret;
  430. return fsnotify_file_area_perm(file, mask, ppos, count);
  431. }
  432. EXPORT_SYMBOL(rw_verify_area);
  433. static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  434. {
  435. struct kiocb kiocb;
  436. struct iov_iter iter;
  437. ssize_t ret;
  438. init_sync_kiocb(&kiocb, filp);
  439. kiocb.ki_pos = (ppos ? *ppos : 0);
  440. iov_iter_ubuf(&iter, ITER_DEST, buf, len);
  441. ret = filp->f_op->read_iter(&kiocb, &iter);
  442. BUG_ON(ret == -EIOCBQUEUED);
  443. if (ppos)
  444. *ppos = kiocb.ki_pos;
  445. return ret;
  446. }
  447. static int warn_unsupported(struct file *file, const char *op)
  448. {
  449. pr_warn_ratelimited(
  450. "kernel %s not supported for file %pD4 (pid: %d comm: %.20s)\n",
  451. op, file, current->pid, current->comm);
  452. return -EINVAL;
  453. }
  454. ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
  455. {
  456. struct kvec iov = {
  457. .iov_base = buf,
  458. .iov_len = min_t(size_t, count, MAX_RW_COUNT),
  459. };
  460. struct kiocb kiocb;
  461. struct iov_iter iter;
  462. ssize_t ret;
  463. if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
  464. return -EINVAL;
  465. if (!(file->f_mode & FMODE_CAN_READ))
  466. return -EINVAL;
  467. /*
  468. * Also fail if ->read_iter and ->read are both wired up as that
  469. * implies very convoluted semantics.
  470. */
  471. if (unlikely(!file->f_op->read_iter || file->f_op->read))
  472. return warn_unsupported(file, "read");
  473. init_sync_kiocb(&kiocb, file);
  474. kiocb.ki_pos = pos ? *pos : 0;
  475. iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len);
  476. ret = file->f_op->read_iter(&kiocb, &iter);
  477. if (ret > 0) {
  478. if (pos)
  479. *pos = kiocb.ki_pos;
  480. fsnotify_access(file);
  481. add_rchar(current, ret);
  482. }
  483. inc_syscr(current);
  484. return ret;
  485. }
  486. ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
  487. {
  488. ssize_t ret;
  489. ret = rw_verify_area(READ, file, pos, count);
  490. if (ret)
  491. return ret;
  492. return __kernel_read(file, buf, count, pos);
  493. }
  494. EXPORT_SYMBOL(kernel_read);
  495. ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
  496. {
  497. ssize_t ret;
  498. if (!(file->f_mode & FMODE_READ))
  499. return -EBADF;
  500. if (!(file->f_mode & FMODE_CAN_READ))
  501. return -EINVAL;
  502. if (unlikely(!access_ok(buf, count)))
  503. return -EFAULT;
  504. ret = rw_verify_area(READ, file, pos, count);
  505. if (ret)
  506. return ret;
  507. if (count > MAX_RW_COUNT)
  508. count = MAX_RW_COUNT;
  509. if (file->f_op->read)
  510. ret = file->f_op->read(file, buf, count, pos);
  511. else if (file->f_op->read_iter)
  512. ret = new_sync_read(file, buf, count, pos);
  513. else
  514. ret = -EINVAL;
  515. if (ret > 0) {
  516. fsnotify_access(file);
  517. add_rchar(current, ret);
  518. }
  519. inc_syscr(current);
  520. return ret;
  521. }
  522. static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  523. {
  524. struct kiocb kiocb;
  525. struct iov_iter iter;
  526. ssize_t ret;
  527. init_sync_kiocb(&kiocb, filp);
  528. kiocb.ki_pos = (ppos ? *ppos : 0);
  529. iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len);
  530. ret = filp->f_op->write_iter(&kiocb, &iter);
  531. BUG_ON(ret == -EIOCBQUEUED);
  532. if (ret > 0 && ppos)
  533. *ppos = kiocb.ki_pos;
  534. return ret;
  535. }
  536. /* caller is responsible for file_start_write/file_end_write */
  537. ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos)
  538. {
  539. struct kiocb kiocb;
  540. ssize_t ret;
  541. if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
  542. return -EBADF;
  543. if (!(file->f_mode & FMODE_CAN_WRITE))
  544. return -EINVAL;
  545. /*
  546. * Also fail if ->write_iter and ->write are both wired up as that
  547. * implies very convoluted semantics.
  548. */
  549. if (unlikely(!file->f_op->write_iter || file->f_op->write))
  550. return warn_unsupported(file, "write");
  551. init_sync_kiocb(&kiocb, file);
  552. kiocb.ki_pos = pos ? *pos : 0;
  553. ret = file->f_op->write_iter(&kiocb, from);
  554. if (ret > 0) {
  555. if (pos)
  556. *pos = kiocb.ki_pos;
  557. fsnotify_modify(file);
  558. add_wchar(current, ret);
  559. }
  560. inc_syscw(current);
  561. return ret;
  562. }
  563. /* caller is responsible for file_start_write/file_end_write */
  564. ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
  565. {
  566. struct kvec iov = {
  567. .iov_base = (void *)buf,
  568. .iov_len = min_t(size_t, count, MAX_RW_COUNT),
  569. };
  570. struct iov_iter iter;
  571. iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len);
  572. return __kernel_write_iter(file, &iter, pos);
  573. }
  574. /*
  575. * This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()",
  576. * but autofs is one of the few internal kernel users that actually
  577. * wants this _and_ can be built as a module. So we need to export
  578. * this symbol for autofs, even though it really isn't appropriate
  579. * for any other kernel modules.
  580. */
  581. EXPORT_SYMBOL_GPL(__kernel_write);
  582. ssize_t kernel_write(struct file *file, const void *buf, size_t count,
  583. loff_t *pos)
  584. {
  585. ssize_t ret;
  586. ret = rw_verify_area(WRITE, file, pos, count);
  587. if (ret)
  588. return ret;
  589. file_start_write(file);
  590. ret = __kernel_write(file, buf, count, pos);
  591. file_end_write(file);
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(kernel_write);
  595. ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
  596. {
  597. ssize_t ret;
  598. if (!(file->f_mode & FMODE_WRITE))
  599. return -EBADF;
  600. if (!(file->f_mode & FMODE_CAN_WRITE))
  601. return -EINVAL;
  602. if (unlikely(!access_ok(buf, count)))
  603. return -EFAULT;
  604. ret = rw_verify_area(WRITE, file, pos, count);
  605. if (ret)
  606. return ret;
  607. if (count > MAX_RW_COUNT)
  608. count = MAX_RW_COUNT;
  609. file_start_write(file);
  610. if (file->f_op->write)
  611. ret = file->f_op->write(file, buf, count, pos);
  612. else if (file->f_op->write_iter)
  613. ret = new_sync_write(file, buf, count, pos);
  614. else
  615. ret = -EINVAL;
  616. if (ret > 0) {
  617. fsnotify_modify(file);
  618. add_wchar(current, ret);
  619. }
  620. inc_syscw(current);
  621. file_end_write(file);
  622. return ret;
  623. }
  624. /* file_ppos returns &file->f_pos or NULL if file is stream */
  625. static inline loff_t *file_ppos(struct file *file)
  626. {
  627. return file->f_mode & FMODE_STREAM ? NULL : &file->f_pos;
  628. }
  629. ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
  630. {
  631. struct fd f = fdget_pos(fd);
  632. ssize_t ret = -EBADF;
  633. if (fd_file(f)) {
  634. loff_t pos, *ppos = file_ppos(fd_file(f));
  635. if (ppos) {
  636. pos = *ppos;
  637. ppos = &pos;
  638. }
  639. ret = vfs_read(fd_file(f), buf, count, ppos);
  640. if (ret >= 0 && ppos)
  641. fd_file(f)->f_pos = pos;
  642. fdput_pos(f);
  643. }
  644. return ret;
  645. }
  646. SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
  647. {
  648. return ksys_read(fd, buf, count);
  649. }
  650. ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
  651. {
  652. struct fd f = fdget_pos(fd);
  653. ssize_t ret = -EBADF;
  654. if (fd_file(f)) {
  655. loff_t pos, *ppos = file_ppos(fd_file(f));
  656. if (ppos) {
  657. pos = *ppos;
  658. ppos = &pos;
  659. }
  660. ret = vfs_write(fd_file(f), buf, count, ppos);
  661. if (ret >= 0 && ppos)
  662. fd_file(f)->f_pos = pos;
  663. fdput_pos(f);
  664. }
  665. return ret;
  666. }
  667. SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
  668. size_t, count)
  669. {
  670. return ksys_write(fd, buf, count);
  671. }
  672. ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
  673. loff_t pos)
  674. {
  675. struct fd f;
  676. ssize_t ret = -EBADF;
  677. if (pos < 0)
  678. return -EINVAL;
  679. f = fdget(fd);
  680. if (fd_file(f)) {
  681. ret = -ESPIPE;
  682. if (fd_file(f)->f_mode & FMODE_PREAD)
  683. ret = vfs_read(fd_file(f), buf, count, &pos);
  684. fdput(f);
  685. }
  686. return ret;
  687. }
  688. SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
  689. size_t, count, loff_t, pos)
  690. {
  691. return ksys_pread64(fd, buf, count, pos);
  692. }
  693. #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64)
  694. COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, buf,
  695. size_t, count, compat_arg_u64_dual(pos))
  696. {
  697. return ksys_pread64(fd, buf, count, compat_arg_u64_glue(pos));
  698. }
  699. #endif
  700. ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
  701. size_t count, loff_t pos)
  702. {
  703. struct fd f;
  704. ssize_t ret = -EBADF;
  705. if (pos < 0)
  706. return -EINVAL;
  707. f = fdget(fd);
  708. if (fd_file(f)) {
  709. ret = -ESPIPE;
  710. if (fd_file(f)->f_mode & FMODE_PWRITE)
  711. ret = vfs_write(fd_file(f), buf, count, &pos);
  712. fdput(f);
  713. }
  714. return ret;
  715. }
  716. SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
  717. size_t, count, loff_t, pos)
  718. {
  719. return ksys_pwrite64(fd, buf, count, pos);
  720. }
  721. #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64)
  722. COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, const char __user *, buf,
  723. size_t, count, compat_arg_u64_dual(pos))
  724. {
  725. return ksys_pwrite64(fd, buf, count, compat_arg_u64_glue(pos));
  726. }
  727. #endif
  728. static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
  729. loff_t *ppos, int type, rwf_t flags)
  730. {
  731. struct kiocb kiocb;
  732. ssize_t ret;
  733. init_sync_kiocb(&kiocb, filp);
  734. ret = kiocb_set_rw_flags(&kiocb, flags, type);
  735. if (ret)
  736. return ret;
  737. kiocb.ki_pos = (ppos ? *ppos : 0);
  738. if (type == READ)
  739. ret = filp->f_op->read_iter(&kiocb, iter);
  740. else
  741. ret = filp->f_op->write_iter(&kiocb, iter);
  742. BUG_ON(ret == -EIOCBQUEUED);
  743. if (ppos)
  744. *ppos = kiocb.ki_pos;
  745. return ret;
  746. }
  747. /* Do it by hand, with file-ops */
  748. static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
  749. loff_t *ppos, int type, rwf_t flags)
  750. {
  751. ssize_t ret = 0;
  752. if (flags & ~RWF_HIPRI)
  753. return -EOPNOTSUPP;
  754. while (iov_iter_count(iter)) {
  755. ssize_t nr;
  756. if (type == READ) {
  757. nr = filp->f_op->read(filp, iter_iov_addr(iter),
  758. iter_iov_len(iter), ppos);
  759. } else {
  760. nr = filp->f_op->write(filp, iter_iov_addr(iter),
  761. iter_iov_len(iter), ppos);
  762. }
  763. if (nr < 0) {
  764. if (!ret)
  765. ret = nr;
  766. break;
  767. }
  768. ret += nr;
  769. if (nr != iter_iov_len(iter))
  770. break;
  771. iov_iter_advance(iter, nr);
  772. }
  773. return ret;
  774. }
  775. ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
  776. struct iov_iter *iter)
  777. {
  778. size_t tot_len;
  779. ssize_t ret = 0;
  780. if (!file->f_op->read_iter)
  781. return -EINVAL;
  782. if (!(file->f_mode & FMODE_READ))
  783. return -EBADF;
  784. if (!(file->f_mode & FMODE_CAN_READ))
  785. return -EINVAL;
  786. tot_len = iov_iter_count(iter);
  787. if (!tot_len)
  788. goto out;
  789. ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
  790. if (ret < 0)
  791. return ret;
  792. ret = file->f_op->read_iter(iocb, iter);
  793. out:
  794. if (ret >= 0)
  795. fsnotify_access(file);
  796. return ret;
  797. }
  798. EXPORT_SYMBOL(vfs_iocb_iter_read);
  799. ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
  800. rwf_t flags)
  801. {
  802. size_t tot_len;
  803. ssize_t ret = 0;
  804. if (!file->f_op->read_iter)
  805. return -EINVAL;
  806. if (!(file->f_mode & FMODE_READ))
  807. return -EBADF;
  808. if (!(file->f_mode & FMODE_CAN_READ))
  809. return -EINVAL;
  810. tot_len = iov_iter_count(iter);
  811. if (!tot_len)
  812. goto out;
  813. ret = rw_verify_area(READ, file, ppos, tot_len);
  814. if (ret < 0)
  815. return ret;
  816. ret = do_iter_readv_writev(file, iter, ppos, READ, flags);
  817. out:
  818. if (ret >= 0)
  819. fsnotify_access(file);
  820. return ret;
  821. }
  822. EXPORT_SYMBOL(vfs_iter_read);
  823. /*
  824. * Caller is responsible for calling kiocb_end_write() on completion
  825. * if async iocb was queued.
  826. */
  827. ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
  828. struct iov_iter *iter)
  829. {
  830. size_t tot_len;
  831. ssize_t ret = 0;
  832. if (!file->f_op->write_iter)
  833. return -EINVAL;
  834. if (!(file->f_mode & FMODE_WRITE))
  835. return -EBADF;
  836. if (!(file->f_mode & FMODE_CAN_WRITE))
  837. return -EINVAL;
  838. tot_len = iov_iter_count(iter);
  839. if (!tot_len)
  840. return 0;
  841. ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
  842. if (ret < 0)
  843. return ret;
  844. kiocb_start_write(iocb);
  845. ret = file->f_op->write_iter(iocb, iter);
  846. if (ret != -EIOCBQUEUED)
  847. kiocb_end_write(iocb);
  848. if (ret > 0)
  849. fsnotify_modify(file);
  850. return ret;
  851. }
  852. EXPORT_SYMBOL(vfs_iocb_iter_write);
  853. ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
  854. rwf_t flags)
  855. {
  856. size_t tot_len;
  857. ssize_t ret;
  858. if (!(file->f_mode & FMODE_WRITE))
  859. return -EBADF;
  860. if (!(file->f_mode & FMODE_CAN_WRITE))
  861. return -EINVAL;
  862. if (!file->f_op->write_iter)
  863. return -EINVAL;
  864. tot_len = iov_iter_count(iter);
  865. if (!tot_len)
  866. return 0;
  867. ret = rw_verify_area(WRITE, file, ppos, tot_len);
  868. if (ret < 0)
  869. return ret;
  870. file_start_write(file);
  871. ret = do_iter_readv_writev(file, iter, ppos, WRITE, flags);
  872. if (ret > 0)
  873. fsnotify_modify(file);
  874. file_end_write(file);
  875. return ret;
  876. }
  877. EXPORT_SYMBOL(vfs_iter_write);
  878. static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
  879. unsigned long vlen, loff_t *pos, rwf_t flags)
  880. {
  881. struct iovec iovstack[UIO_FASTIOV];
  882. struct iovec *iov = iovstack;
  883. struct iov_iter iter;
  884. size_t tot_len;
  885. ssize_t ret = 0;
  886. if (!(file->f_mode & FMODE_READ))
  887. return -EBADF;
  888. if (!(file->f_mode & FMODE_CAN_READ))
  889. return -EINVAL;
  890. ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov,
  891. &iter);
  892. if (ret < 0)
  893. return ret;
  894. tot_len = iov_iter_count(&iter);
  895. if (!tot_len)
  896. goto out;
  897. ret = rw_verify_area(READ, file, pos, tot_len);
  898. if (ret < 0)
  899. goto out;
  900. if (file->f_op->read_iter)
  901. ret = do_iter_readv_writev(file, &iter, pos, READ, flags);
  902. else
  903. ret = do_loop_readv_writev(file, &iter, pos, READ, flags);
  904. out:
  905. if (ret >= 0)
  906. fsnotify_access(file);
  907. kfree(iov);
  908. return ret;
  909. }
  910. static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
  911. unsigned long vlen, loff_t *pos, rwf_t flags)
  912. {
  913. struct iovec iovstack[UIO_FASTIOV];
  914. struct iovec *iov = iovstack;
  915. struct iov_iter iter;
  916. size_t tot_len;
  917. ssize_t ret = 0;
  918. if (!(file->f_mode & FMODE_WRITE))
  919. return -EBADF;
  920. if (!(file->f_mode & FMODE_CAN_WRITE))
  921. return -EINVAL;
  922. ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov,
  923. &iter);
  924. if (ret < 0)
  925. return ret;
  926. tot_len = iov_iter_count(&iter);
  927. if (!tot_len)
  928. goto out;
  929. ret = rw_verify_area(WRITE, file, pos, tot_len);
  930. if (ret < 0)
  931. goto out;
  932. file_start_write(file);
  933. if (file->f_op->write_iter)
  934. ret = do_iter_readv_writev(file, &iter, pos, WRITE, flags);
  935. else
  936. ret = do_loop_readv_writev(file, &iter, pos, WRITE, flags);
  937. if (ret > 0)
  938. fsnotify_modify(file);
  939. file_end_write(file);
  940. out:
  941. kfree(iov);
  942. return ret;
  943. }
  944. static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
  945. unsigned long vlen, rwf_t flags)
  946. {
  947. struct fd f = fdget_pos(fd);
  948. ssize_t ret = -EBADF;
  949. if (fd_file(f)) {
  950. loff_t pos, *ppos = file_ppos(fd_file(f));
  951. if (ppos) {
  952. pos = *ppos;
  953. ppos = &pos;
  954. }
  955. ret = vfs_readv(fd_file(f), vec, vlen, ppos, flags);
  956. if (ret >= 0 && ppos)
  957. fd_file(f)->f_pos = pos;
  958. fdput_pos(f);
  959. }
  960. if (ret > 0)
  961. add_rchar(current, ret);
  962. inc_syscr(current);
  963. return ret;
  964. }
  965. static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
  966. unsigned long vlen, rwf_t flags)
  967. {
  968. struct fd f = fdget_pos(fd);
  969. ssize_t ret = -EBADF;
  970. if (fd_file(f)) {
  971. loff_t pos, *ppos = file_ppos(fd_file(f));
  972. if (ppos) {
  973. pos = *ppos;
  974. ppos = &pos;
  975. }
  976. ret = vfs_writev(fd_file(f), vec, vlen, ppos, flags);
  977. if (ret >= 0 && ppos)
  978. fd_file(f)->f_pos = pos;
  979. fdput_pos(f);
  980. }
  981. if (ret > 0)
  982. add_wchar(current, ret);
  983. inc_syscw(current);
  984. return ret;
  985. }
  986. static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
  987. {
  988. #define HALF_LONG_BITS (BITS_PER_LONG / 2)
  989. return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
  990. }
  991. static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
  992. unsigned long vlen, loff_t pos, rwf_t flags)
  993. {
  994. struct fd f;
  995. ssize_t ret = -EBADF;
  996. if (pos < 0)
  997. return -EINVAL;
  998. f = fdget(fd);
  999. if (fd_file(f)) {
  1000. ret = -ESPIPE;
  1001. if (fd_file(f)->f_mode & FMODE_PREAD)
  1002. ret = vfs_readv(fd_file(f), vec, vlen, &pos, flags);
  1003. fdput(f);
  1004. }
  1005. if (ret > 0)
  1006. add_rchar(current, ret);
  1007. inc_syscr(current);
  1008. return ret;
  1009. }
  1010. static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
  1011. unsigned long vlen, loff_t pos, rwf_t flags)
  1012. {
  1013. struct fd f;
  1014. ssize_t ret = -EBADF;
  1015. if (pos < 0)
  1016. return -EINVAL;
  1017. f = fdget(fd);
  1018. if (fd_file(f)) {
  1019. ret = -ESPIPE;
  1020. if (fd_file(f)->f_mode & FMODE_PWRITE)
  1021. ret = vfs_writev(fd_file(f), vec, vlen, &pos, flags);
  1022. fdput(f);
  1023. }
  1024. if (ret > 0)
  1025. add_wchar(current, ret);
  1026. inc_syscw(current);
  1027. return ret;
  1028. }
  1029. SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
  1030. unsigned long, vlen)
  1031. {
  1032. return do_readv(fd, vec, vlen, 0);
  1033. }
  1034. SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
  1035. unsigned long, vlen)
  1036. {
  1037. return do_writev(fd, vec, vlen, 0);
  1038. }
  1039. SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
  1040. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  1041. {
  1042. loff_t pos = pos_from_hilo(pos_h, pos_l);
  1043. return do_preadv(fd, vec, vlen, pos, 0);
  1044. }
  1045. SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec,
  1046. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
  1047. rwf_t, flags)
  1048. {
  1049. loff_t pos = pos_from_hilo(pos_h, pos_l);
  1050. if (pos == -1)
  1051. return do_readv(fd, vec, vlen, flags);
  1052. return do_preadv(fd, vec, vlen, pos, flags);
  1053. }
  1054. SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
  1055. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
  1056. {
  1057. loff_t pos = pos_from_hilo(pos_h, pos_l);
  1058. return do_pwritev(fd, vec, vlen, pos, 0);
  1059. }
  1060. SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec,
  1061. unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
  1062. rwf_t, flags)
  1063. {
  1064. loff_t pos = pos_from_hilo(pos_h, pos_l);
  1065. if (pos == -1)
  1066. return do_writev(fd, vec, vlen, flags);
  1067. return do_pwritev(fd, vec, vlen, pos, flags);
  1068. }
  1069. /*
  1070. * Various compat syscalls. Note that they all pretend to take a native
  1071. * iovec - import_iovec will properly treat those as compat_iovecs based on
  1072. * in_compat_syscall().
  1073. */
  1074. #ifdef CONFIG_COMPAT
  1075. #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
  1076. COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
  1077. const struct iovec __user *, vec,
  1078. unsigned long, vlen, loff_t, pos)
  1079. {
  1080. return do_preadv(fd, vec, vlen, pos, 0);
  1081. }
  1082. #endif
  1083. COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
  1084. const struct iovec __user *, vec,
  1085. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  1086. {
  1087. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1088. return do_preadv(fd, vec, vlen, pos, 0);
  1089. }
  1090. #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
  1091. COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
  1092. const struct iovec __user *, vec,
  1093. unsigned long, vlen, loff_t, pos, rwf_t, flags)
  1094. {
  1095. if (pos == -1)
  1096. return do_readv(fd, vec, vlen, flags);
  1097. return do_preadv(fd, vec, vlen, pos, flags);
  1098. }
  1099. #endif
  1100. COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
  1101. const struct iovec __user *, vec,
  1102. compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
  1103. rwf_t, flags)
  1104. {
  1105. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1106. if (pos == -1)
  1107. return do_readv(fd, vec, vlen, flags);
  1108. return do_preadv(fd, vec, vlen, pos, flags);
  1109. }
  1110. #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
  1111. COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
  1112. const struct iovec __user *, vec,
  1113. unsigned long, vlen, loff_t, pos)
  1114. {
  1115. return do_pwritev(fd, vec, vlen, pos, 0);
  1116. }
  1117. #endif
  1118. COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
  1119. const struct iovec __user *,vec,
  1120. compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
  1121. {
  1122. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1123. return do_pwritev(fd, vec, vlen, pos, 0);
  1124. }
  1125. #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
  1126. COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
  1127. const struct iovec __user *, vec,
  1128. unsigned long, vlen, loff_t, pos, rwf_t, flags)
  1129. {
  1130. if (pos == -1)
  1131. return do_writev(fd, vec, vlen, flags);
  1132. return do_pwritev(fd, vec, vlen, pos, flags);
  1133. }
  1134. #endif
  1135. COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
  1136. const struct iovec __user *,vec,
  1137. compat_ulong_t, vlen, u32, pos_low, u32, pos_high, rwf_t, flags)
  1138. {
  1139. loff_t pos = ((loff_t)pos_high << 32) | pos_low;
  1140. if (pos == -1)
  1141. return do_writev(fd, vec, vlen, flags);
  1142. return do_pwritev(fd, vec, vlen, pos, flags);
  1143. }
  1144. #endif /* CONFIG_COMPAT */
  1145. static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
  1146. size_t count, loff_t max)
  1147. {
  1148. struct fd in, out;
  1149. struct inode *in_inode, *out_inode;
  1150. struct pipe_inode_info *opipe;
  1151. loff_t pos;
  1152. loff_t out_pos;
  1153. ssize_t retval;
  1154. int fl;
  1155. /*
  1156. * Get input file, and verify that it is ok..
  1157. */
  1158. retval = -EBADF;
  1159. in = fdget(in_fd);
  1160. if (!fd_file(in))
  1161. goto out;
  1162. if (!(fd_file(in)->f_mode & FMODE_READ))
  1163. goto fput_in;
  1164. retval = -ESPIPE;
  1165. if (!ppos) {
  1166. pos = fd_file(in)->f_pos;
  1167. } else {
  1168. pos = *ppos;
  1169. if (!(fd_file(in)->f_mode & FMODE_PREAD))
  1170. goto fput_in;
  1171. }
  1172. retval = rw_verify_area(READ, fd_file(in), &pos, count);
  1173. if (retval < 0)
  1174. goto fput_in;
  1175. if (count > MAX_RW_COUNT)
  1176. count = MAX_RW_COUNT;
  1177. /*
  1178. * Get output file, and verify that it is ok..
  1179. */
  1180. retval = -EBADF;
  1181. out = fdget(out_fd);
  1182. if (!fd_file(out))
  1183. goto fput_in;
  1184. if (!(fd_file(out)->f_mode & FMODE_WRITE))
  1185. goto fput_out;
  1186. in_inode = file_inode(fd_file(in));
  1187. out_inode = file_inode(fd_file(out));
  1188. out_pos = fd_file(out)->f_pos;
  1189. if (!max)
  1190. max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
  1191. if (unlikely(pos + count > max)) {
  1192. retval = -EOVERFLOW;
  1193. if (pos >= max)
  1194. goto fput_out;
  1195. count = max - pos;
  1196. }
  1197. fl = 0;
  1198. #if 0
  1199. /*
  1200. * We need to debate whether we can enable this or not. The
  1201. * man page documents EAGAIN return for the output at least,
  1202. * and the application is arguably buggy if it doesn't expect
  1203. * EAGAIN on a non-blocking file descriptor.
  1204. */
  1205. if (fd_file(in)->f_flags & O_NONBLOCK)
  1206. fl = SPLICE_F_NONBLOCK;
  1207. #endif
  1208. opipe = get_pipe_info(fd_file(out), true);
  1209. if (!opipe) {
  1210. retval = rw_verify_area(WRITE, fd_file(out), &out_pos, count);
  1211. if (retval < 0)
  1212. goto fput_out;
  1213. retval = do_splice_direct(fd_file(in), &pos, fd_file(out), &out_pos,
  1214. count, fl);
  1215. } else {
  1216. if (fd_file(out)->f_flags & O_NONBLOCK)
  1217. fl |= SPLICE_F_NONBLOCK;
  1218. retval = splice_file_to_pipe(fd_file(in), opipe, &pos, count, fl);
  1219. }
  1220. if (retval > 0) {
  1221. add_rchar(current, retval);
  1222. add_wchar(current, retval);
  1223. fsnotify_access(fd_file(in));
  1224. fsnotify_modify(fd_file(out));
  1225. fd_file(out)->f_pos = out_pos;
  1226. if (ppos)
  1227. *ppos = pos;
  1228. else
  1229. fd_file(in)->f_pos = pos;
  1230. }
  1231. inc_syscr(current);
  1232. inc_syscw(current);
  1233. if (pos > max)
  1234. retval = -EOVERFLOW;
  1235. fput_out:
  1236. fdput(out);
  1237. fput_in:
  1238. fdput(in);
  1239. out:
  1240. return retval;
  1241. }
  1242. SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
  1243. {
  1244. loff_t pos;
  1245. off_t off;
  1246. ssize_t ret;
  1247. if (offset) {
  1248. if (unlikely(get_user(off, offset)))
  1249. return -EFAULT;
  1250. pos = off;
  1251. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1252. if (unlikely(put_user(pos, offset)))
  1253. return -EFAULT;
  1254. return ret;
  1255. }
  1256. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1257. }
  1258. SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
  1259. {
  1260. loff_t pos;
  1261. ssize_t ret;
  1262. if (offset) {
  1263. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1264. return -EFAULT;
  1265. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1266. if (unlikely(put_user(pos, offset)))
  1267. return -EFAULT;
  1268. return ret;
  1269. }
  1270. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1271. }
  1272. #ifdef CONFIG_COMPAT
  1273. COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
  1274. compat_off_t __user *, offset, compat_size_t, count)
  1275. {
  1276. loff_t pos;
  1277. off_t off;
  1278. ssize_t ret;
  1279. if (offset) {
  1280. if (unlikely(get_user(off, offset)))
  1281. return -EFAULT;
  1282. pos = off;
  1283. ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
  1284. if (unlikely(put_user(pos, offset)))
  1285. return -EFAULT;
  1286. return ret;
  1287. }
  1288. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1289. }
  1290. COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
  1291. compat_loff_t __user *, offset, compat_size_t, count)
  1292. {
  1293. loff_t pos;
  1294. ssize_t ret;
  1295. if (offset) {
  1296. if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
  1297. return -EFAULT;
  1298. ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
  1299. if (unlikely(put_user(pos, offset)))
  1300. return -EFAULT;
  1301. return ret;
  1302. }
  1303. return do_sendfile(out_fd, in_fd, NULL, count, 0);
  1304. }
  1305. #endif
  1306. /*
  1307. * Performs necessary checks before doing a file copy
  1308. *
  1309. * Can adjust amount of bytes to copy via @req_count argument.
  1310. * Returns appropriate error code that caller should return or
  1311. * zero in case the copy should be allowed.
  1312. */
  1313. static int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
  1314. struct file *file_out, loff_t pos_out,
  1315. size_t *req_count, unsigned int flags)
  1316. {
  1317. struct inode *inode_in = file_inode(file_in);
  1318. struct inode *inode_out = file_inode(file_out);
  1319. uint64_t count = *req_count;
  1320. loff_t size_in;
  1321. int ret;
  1322. ret = generic_file_rw_checks(file_in, file_out);
  1323. if (ret)
  1324. return ret;
  1325. /*
  1326. * We allow some filesystems to handle cross sb copy, but passing
  1327. * a file of the wrong filesystem type to filesystem driver can result
  1328. * in an attempt to dereference the wrong type of ->private_data, so
  1329. * avoid doing that until we really have a good reason.
  1330. *
  1331. * nfs and cifs define several different file_system_type structures
  1332. * and several different sets of file_operations, but they all end up
  1333. * using the same ->copy_file_range() function pointer.
  1334. */
  1335. if (flags & COPY_FILE_SPLICE) {
  1336. /* cross sb splice is allowed */
  1337. } else if (file_out->f_op->copy_file_range) {
  1338. if (file_in->f_op->copy_file_range !=
  1339. file_out->f_op->copy_file_range)
  1340. return -EXDEV;
  1341. } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) {
  1342. return -EXDEV;
  1343. }
  1344. /* Don't touch certain kinds of inodes */
  1345. if (IS_IMMUTABLE(inode_out))
  1346. return -EPERM;
  1347. if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
  1348. return -ETXTBSY;
  1349. /* Ensure offsets don't wrap. */
  1350. if (pos_in + count < pos_in || pos_out + count < pos_out)
  1351. return -EOVERFLOW;
  1352. /* Shorten the copy to EOF */
  1353. size_in = i_size_read(inode_in);
  1354. if (pos_in >= size_in)
  1355. count = 0;
  1356. else
  1357. count = min(count, size_in - (uint64_t)pos_in);
  1358. ret = generic_write_check_limits(file_out, pos_out, &count);
  1359. if (ret)
  1360. return ret;
  1361. /* Don't allow overlapped copying within the same file. */
  1362. if (inode_in == inode_out &&
  1363. pos_out + count > pos_in &&
  1364. pos_out < pos_in + count)
  1365. return -EINVAL;
  1366. *req_count = count;
  1367. return 0;
  1368. }
  1369. /*
  1370. * copy_file_range() differs from regular file read and write in that it
  1371. * specifically allows return partial success. When it does so is up to
  1372. * the copy_file_range method.
  1373. */
  1374. ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
  1375. struct file *file_out, loff_t pos_out,
  1376. size_t len, unsigned int flags)
  1377. {
  1378. ssize_t ret;
  1379. bool splice = flags & COPY_FILE_SPLICE;
  1380. bool samesb = file_inode(file_in)->i_sb == file_inode(file_out)->i_sb;
  1381. if (flags & ~COPY_FILE_SPLICE)
  1382. return -EINVAL;
  1383. ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len,
  1384. flags);
  1385. if (unlikely(ret))
  1386. return ret;
  1387. ret = rw_verify_area(READ, file_in, &pos_in, len);
  1388. if (unlikely(ret))
  1389. return ret;
  1390. ret = rw_verify_area(WRITE, file_out, &pos_out, len);
  1391. if (unlikely(ret))
  1392. return ret;
  1393. if (len == 0)
  1394. return 0;
  1395. /*
  1396. * Make sure return value doesn't overflow in 32bit compat mode. Also
  1397. * limit the size for all cases except when calling ->copy_file_range().
  1398. */
  1399. if (splice || !file_out->f_op->copy_file_range || in_compat_syscall())
  1400. len = min_t(size_t, MAX_RW_COUNT, len);
  1401. file_start_write(file_out);
  1402. /*
  1403. * Cloning is supported by more file systems, so we implement copy on
  1404. * same sb using clone, but for filesystems where both clone and copy
  1405. * are supported (e.g. nfs,cifs), we only call the copy method.
  1406. */
  1407. if (!splice && file_out->f_op->copy_file_range) {
  1408. ret = file_out->f_op->copy_file_range(file_in, pos_in,
  1409. file_out, pos_out,
  1410. len, flags);
  1411. } else if (!splice && file_in->f_op->remap_file_range && samesb) {
  1412. ret = file_in->f_op->remap_file_range(file_in, pos_in,
  1413. file_out, pos_out, len, REMAP_FILE_CAN_SHORTEN);
  1414. /* fallback to splice */
  1415. if (ret <= 0)
  1416. splice = true;
  1417. } else if (samesb) {
  1418. /* Fallback to splice for same sb copy for backward compat */
  1419. splice = true;
  1420. }
  1421. file_end_write(file_out);
  1422. if (!splice)
  1423. goto done;
  1424. /*
  1425. * We can get here for same sb copy of filesystems that do not implement
  1426. * ->copy_file_range() in case filesystem does not support clone or in
  1427. * case filesystem supports clone but rejected the clone request (e.g.
  1428. * because it was not block aligned).
  1429. *
  1430. * In both cases, fall back to kernel copy so we are able to maintain a
  1431. * consistent story about which filesystems support copy_file_range()
  1432. * and which filesystems do not, that will allow userspace tools to
  1433. * make consistent desicions w.r.t using copy_file_range().
  1434. *
  1435. * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE
  1436. * for server-side-copy between any two sb.
  1437. *
  1438. * In any case, we call do_splice_direct() and not splice_file_range(),
  1439. * without file_start_write() held, to avoid possible deadlocks related
  1440. * to splicing from input file, while file_start_write() is held on
  1441. * the output file on a different sb.
  1442. */
  1443. ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len, 0);
  1444. done:
  1445. if (ret > 0) {
  1446. fsnotify_access(file_in);
  1447. add_rchar(current, ret);
  1448. fsnotify_modify(file_out);
  1449. add_wchar(current, ret);
  1450. }
  1451. inc_syscr(current);
  1452. inc_syscw(current);
  1453. return ret;
  1454. }
  1455. EXPORT_SYMBOL(vfs_copy_file_range);
  1456. SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
  1457. int, fd_out, loff_t __user *, off_out,
  1458. size_t, len, unsigned int, flags)
  1459. {
  1460. loff_t pos_in;
  1461. loff_t pos_out;
  1462. struct fd f_in;
  1463. struct fd f_out;
  1464. ssize_t ret = -EBADF;
  1465. f_in = fdget(fd_in);
  1466. if (!fd_file(f_in))
  1467. goto out2;
  1468. f_out = fdget(fd_out);
  1469. if (!fd_file(f_out))
  1470. goto out1;
  1471. ret = -EFAULT;
  1472. if (off_in) {
  1473. if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
  1474. goto out;
  1475. } else {
  1476. pos_in = fd_file(f_in)->f_pos;
  1477. }
  1478. if (off_out) {
  1479. if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
  1480. goto out;
  1481. } else {
  1482. pos_out = fd_file(f_out)->f_pos;
  1483. }
  1484. ret = -EINVAL;
  1485. if (flags != 0)
  1486. goto out;
  1487. ret = vfs_copy_file_range(fd_file(f_in), pos_in, fd_file(f_out), pos_out, len,
  1488. flags);
  1489. if (ret > 0) {
  1490. pos_in += ret;
  1491. pos_out += ret;
  1492. if (off_in) {
  1493. if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
  1494. ret = -EFAULT;
  1495. } else {
  1496. fd_file(f_in)->f_pos = pos_in;
  1497. }
  1498. if (off_out) {
  1499. if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
  1500. ret = -EFAULT;
  1501. } else {
  1502. fd_file(f_out)->f_pos = pos_out;
  1503. }
  1504. }
  1505. out:
  1506. fdput(f_out);
  1507. out1:
  1508. fdput(f_in);
  1509. out2:
  1510. return ret;
  1511. }
  1512. /*
  1513. * Don't operate on ranges the page cache doesn't support, and don't exceed the
  1514. * LFS limits. If pos is under the limit it becomes a short access. If it
  1515. * exceeds the limit we return -EFBIG.
  1516. */
  1517. int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count)
  1518. {
  1519. struct inode *inode = file->f_mapping->host;
  1520. loff_t max_size = inode->i_sb->s_maxbytes;
  1521. loff_t limit = rlimit(RLIMIT_FSIZE);
  1522. if (limit != RLIM_INFINITY) {
  1523. if (pos >= limit) {
  1524. send_sig(SIGXFSZ, current, 0);
  1525. return -EFBIG;
  1526. }
  1527. *count = min(*count, limit - pos);
  1528. }
  1529. if (!(file->f_flags & O_LARGEFILE))
  1530. max_size = MAX_NON_LFS;
  1531. if (unlikely(pos >= max_size))
  1532. return -EFBIG;
  1533. *count = min(*count, max_size - pos);
  1534. return 0;
  1535. }
  1536. EXPORT_SYMBOL_GPL(generic_write_check_limits);
  1537. /* Like generic_write_checks(), but takes size of write instead of iter. */
  1538. int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
  1539. {
  1540. struct file *file = iocb->ki_filp;
  1541. struct inode *inode = file->f_mapping->host;
  1542. if (IS_SWAPFILE(inode))
  1543. return -ETXTBSY;
  1544. if (!*count)
  1545. return 0;
  1546. if (iocb->ki_flags & IOCB_APPEND)
  1547. iocb->ki_pos = i_size_read(inode);
  1548. if ((iocb->ki_flags & IOCB_NOWAIT) &&
  1549. !((iocb->ki_flags & IOCB_DIRECT) ||
  1550. (file->f_op->fop_flags & FOP_BUFFER_WASYNC)))
  1551. return -EINVAL;
  1552. return generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, count);
  1553. }
  1554. EXPORT_SYMBOL(generic_write_checks_count);
  1555. /*
  1556. * Performs necessary checks before doing a write
  1557. *
  1558. * Can adjust writing position or amount of bytes to write.
  1559. * Returns appropriate error code that caller should return or
  1560. * zero in case that write should be allowed.
  1561. */
  1562. ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
  1563. {
  1564. loff_t count = iov_iter_count(from);
  1565. int ret;
  1566. ret = generic_write_checks_count(iocb, &count);
  1567. if (ret)
  1568. return ret;
  1569. iov_iter_truncate(from, count);
  1570. return iov_iter_count(from);
  1571. }
  1572. EXPORT_SYMBOL(generic_write_checks);
  1573. /*
  1574. * Performs common checks before doing a file copy/clone
  1575. * from @file_in to @file_out.
  1576. */
  1577. int generic_file_rw_checks(struct file *file_in, struct file *file_out)
  1578. {
  1579. struct inode *inode_in = file_inode(file_in);
  1580. struct inode *inode_out = file_inode(file_out);
  1581. /* Don't copy dirs, pipes, sockets... */
  1582. if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
  1583. return -EISDIR;
  1584. if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
  1585. return -EINVAL;
  1586. if (!(file_in->f_mode & FMODE_READ) ||
  1587. !(file_out->f_mode & FMODE_WRITE) ||
  1588. (file_out->f_flags & O_APPEND))
  1589. return -EBADF;
  1590. return 0;
  1591. }
  1592. int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter)
  1593. {
  1594. size_t len = iov_iter_count(iter);
  1595. if (!iter_is_ubuf(iter))
  1596. return -EINVAL;
  1597. if (!is_power_of_2(len))
  1598. return -EINVAL;
  1599. if (!IS_ALIGNED(iocb->ki_pos, len))
  1600. return -EINVAL;
  1601. if (!(iocb->ki_flags & IOCB_DIRECT))
  1602. return -EOPNOTSUPP;
  1603. return 0;
  1604. }