ioctl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/capability.h>
  3. #include <linux/compat.h>
  4. #include <linux/blkdev.h>
  5. #include <linux/export.h>
  6. #include <linux/gfp.h>
  7. #include <linux/blkpg.h>
  8. #include <linux/hdreg.h>
  9. #include <linux/backing-dev.h>
  10. #include <linux/fs.h>
  11. #include <linux/blktrace_api.h>
  12. #include <linux/pr.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/io_uring/cmd.h>
  16. #include <uapi/linux/blkdev.h>
  17. #include "blk.h"
  18. static int blkpg_do_ioctl(struct block_device *bdev,
  19. struct blkpg_partition __user *upart, int op)
  20. {
  21. struct gendisk *disk = bdev->bd_disk;
  22. struct blkpg_partition p;
  23. sector_t start, length, capacity, end;
  24. if (!capable(CAP_SYS_ADMIN))
  25. return -EACCES;
  26. if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
  27. return -EFAULT;
  28. if (bdev_is_partition(bdev))
  29. return -EINVAL;
  30. if (p.pno <= 0)
  31. return -EINVAL;
  32. if (op == BLKPG_DEL_PARTITION)
  33. return bdev_del_partition(disk, p.pno);
  34. if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
  35. return -EINVAL;
  36. /* Check that the partition is aligned to the block size */
  37. if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
  38. return -EINVAL;
  39. start = p.start >> SECTOR_SHIFT;
  40. length = p.length >> SECTOR_SHIFT;
  41. capacity = get_capacity(disk);
  42. if (check_add_overflow(start, length, &end))
  43. return -EINVAL;
  44. if (start >= capacity || end > capacity)
  45. return -EINVAL;
  46. switch (op) {
  47. case BLKPG_ADD_PARTITION:
  48. return bdev_add_partition(disk, p.pno, start, length);
  49. case BLKPG_RESIZE_PARTITION:
  50. return bdev_resize_partition(disk, p.pno, start, length);
  51. default:
  52. return -EINVAL;
  53. }
  54. }
  55. static int blkpg_ioctl(struct block_device *bdev,
  56. struct blkpg_ioctl_arg __user *arg)
  57. {
  58. struct blkpg_partition __user *udata;
  59. int op;
  60. if (get_user(op, &arg->op) || get_user(udata, &arg->data))
  61. return -EFAULT;
  62. return blkpg_do_ioctl(bdev, udata, op);
  63. }
  64. #ifdef CONFIG_COMPAT
  65. struct compat_blkpg_ioctl_arg {
  66. compat_int_t op;
  67. compat_int_t flags;
  68. compat_int_t datalen;
  69. compat_caddr_t data;
  70. };
  71. static int compat_blkpg_ioctl(struct block_device *bdev,
  72. struct compat_blkpg_ioctl_arg __user *arg)
  73. {
  74. compat_caddr_t udata;
  75. int op;
  76. if (get_user(op, &arg->op) || get_user(udata, &arg->data))
  77. return -EFAULT;
  78. return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
  79. }
  80. #endif
  81. /*
  82. * Check that [start, start + len) is a valid range from the block device's
  83. * perspective, including verifying that it can be correctly translated into
  84. * logical block addresses.
  85. */
  86. static int blk_validate_byte_range(struct block_device *bdev,
  87. uint64_t start, uint64_t len)
  88. {
  89. unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
  90. uint64_t end;
  91. if ((start | len) & bs_mask)
  92. return -EINVAL;
  93. if (!len)
  94. return -EINVAL;
  95. if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev))
  96. return -EINVAL;
  97. return 0;
  98. }
  99. static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
  100. unsigned long arg)
  101. {
  102. uint64_t range[2], start, len;
  103. struct bio *prev = NULL, *bio;
  104. sector_t sector, nr_sects;
  105. struct blk_plug plug;
  106. int err;
  107. if (copy_from_user(range, (void __user *)arg, sizeof(range)))
  108. return -EFAULT;
  109. start = range[0];
  110. len = range[1];
  111. if (!bdev_max_discard_sectors(bdev))
  112. return -EOPNOTSUPP;
  113. if (!(mode & BLK_OPEN_WRITE))
  114. return -EBADF;
  115. if (bdev_read_only(bdev))
  116. return -EPERM;
  117. err = blk_validate_byte_range(bdev, start, len);
  118. if (err)
  119. return err;
  120. filemap_invalidate_lock(bdev->bd_mapping);
  121. err = truncate_bdev_range(bdev, mode, start, start + len - 1);
  122. if (err)
  123. goto fail;
  124. sector = start >> SECTOR_SHIFT;
  125. nr_sects = len >> SECTOR_SHIFT;
  126. blk_start_plug(&plug);
  127. while (1) {
  128. if (fatal_signal_pending(current)) {
  129. if (prev)
  130. bio_await_chain(prev);
  131. err = -EINTR;
  132. goto out_unplug;
  133. }
  134. bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
  135. GFP_KERNEL);
  136. if (!bio)
  137. break;
  138. prev = bio_chain_and_submit(prev, bio);
  139. }
  140. if (prev) {
  141. err = submit_bio_wait(prev);
  142. if (err == -EOPNOTSUPP)
  143. err = 0;
  144. bio_put(prev);
  145. }
  146. out_unplug:
  147. blk_finish_plug(&plug);
  148. fail:
  149. filemap_invalidate_unlock(bdev->bd_mapping);
  150. return err;
  151. }
  152. static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
  153. void __user *argp)
  154. {
  155. uint64_t start, len, end;
  156. uint64_t range[2];
  157. int err;
  158. if (!(mode & BLK_OPEN_WRITE))
  159. return -EBADF;
  160. if (!bdev_max_secure_erase_sectors(bdev))
  161. return -EOPNOTSUPP;
  162. if (copy_from_user(range, argp, sizeof(range)))
  163. return -EFAULT;
  164. start = range[0];
  165. len = range[1];
  166. if ((start & 511) || (len & 511))
  167. return -EINVAL;
  168. if (check_add_overflow(start, len, &end) ||
  169. end > bdev_nr_bytes(bdev))
  170. return -EINVAL;
  171. filemap_invalidate_lock(bdev->bd_mapping);
  172. err = truncate_bdev_range(bdev, mode, start, end - 1);
  173. if (!err)
  174. err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
  175. GFP_KERNEL);
  176. filemap_invalidate_unlock(bdev->bd_mapping);
  177. return err;
  178. }
  179. static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
  180. unsigned long arg)
  181. {
  182. uint64_t range[2];
  183. uint64_t start, end, len;
  184. int err;
  185. if (!(mode & BLK_OPEN_WRITE))
  186. return -EBADF;
  187. if (copy_from_user(range, (void __user *)arg, sizeof(range)))
  188. return -EFAULT;
  189. start = range[0];
  190. len = range[1];
  191. end = start + len - 1;
  192. if (start & 511)
  193. return -EINVAL;
  194. if (len & 511)
  195. return -EINVAL;
  196. if (end >= (uint64_t)bdev_nr_bytes(bdev))
  197. return -EINVAL;
  198. if (end < start)
  199. return -EINVAL;
  200. /* Invalidate the page cache, including dirty pages */
  201. filemap_invalidate_lock(bdev->bd_mapping);
  202. err = truncate_bdev_range(bdev, mode, start, end);
  203. if (err)
  204. goto fail;
  205. err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
  206. BLKDEV_ZERO_NOUNMAP | BLKDEV_ZERO_KILLABLE);
  207. fail:
  208. filemap_invalidate_unlock(bdev->bd_mapping);
  209. return err;
  210. }
  211. static int put_ushort(unsigned short __user *argp, unsigned short val)
  212. {
  213. return put_user(val, argp);
  214. }
  215. static int put_int(int __user *argp, int val)
  216. {
  217. return put_user(val, argp);
  218. }
  219. static int put_uint(unsigned int __user *argp, unsigned int val)
  220. {
  221. return put_user(val, argp);
  222. }
  223. static int put_long(long __user *argp, long val)
  224. {
  225. return put_user(val, argp);
  226. }
  227. static int put_ulong(unsigned long __user *argp, unsigned long val)
  228. {
  229. return put_user(val, argp);
  230. }
  231. static int put_u64(u64 __user *argp, u64 val)
  232. {
  233. return put_user(val, argp);
  234. }
  235. #ifdef CONFIG_COMPAT
  236. static int compat_put_long(compat_long_t __user *argp, long val)
  237. {
  238. return put_user(val, argp);
  239. }
  240. static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
  241. {
  242. return put_user(val, argp);
  243. }
  244. #endif
  245. #ifdef CONFIG_COMPAT
  246. /*
  247. * This is the equivalent of compat_ptr_ioctl(), to be used by block
  248. * drivers that implement only commands that are completely compatible
  249. * between 32-bit and 64-bit user space
  250. */
  251. int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode,
  252. unsigned cmd, unsigned long arg)
  253. {
  254. struct gendisk *disk = bdev->bd_disk;
  255. if (disk->fops->ioctl)
  256. return disk->fops->ioctl(bdev, mode, cmd,
  257. (unsigned long)compat_ptr(arg));
  258. return -ENOIOCTLCMD;
  259. }
  260. EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
  261. #endif
  262. static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode)
  263. {
  264. /* no sense to make reservations for partitions */
  265. if (bdev_is_partition(bdev))
  266. return false;
  267. if (capable(CAP_SYS_ADMIN))
  268. return true;
  269. /*
  270. * Only allow unprivileged reservations if the file descriptor is open
  271. * for writing.
  272. */
  273. return mode & BLK_OPEN_WRITE;
  274. }
  275. static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode,
  276. struct pr_registration __user *arg)
  277. {
  278. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  279. struct pr_registration reg;
  280. if (!blkdev_pr_allowed(bdev, mode))
  281. return -EPERM;
  282. if (!ops || !ops->pr_register)
  283. return -EOPNOTSUPP;
  284. if (copy_from_user(&reg, arg, sizeof(reg)))
  285. return -EFAULT;
  286. if (reg.flags & ~PR_FL_IGNORE_KEY)
  287. return -EOPNOTSUPP;
  288. return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
  289. }
  290. static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode,
  291. struct pr_reservation __user *arg)
  292. {
  293. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  294. struct pr_reservation rsv;
  295. if (!blkdev_pr_allowed(bdev, mode))
  296. return -EPERM;
  297. if (!ops || !ops->pr_reserve)
  298. return -EOPNOTSUPP;
  299. if (copy_from_user(&rsv, arg, sizeof(rsv)))
  300. return -EFAULT;
  301. if (rsv.flags & ~PR_FL_IGNORE_KEY)
  302. return -EOPNOTSUPP;
  303. return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
  304. }
  305. static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode,
  306. struct pr_reservation __user *arg)
  307. {
  308. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  309. struct pr_reservation rsv;
  310. if (!blkdev_pr_allowed(bdev, mode))
  311. return -EPERM;
  312. if (!ops || !ops->pr_release)
  313. return -EOPNOTSUPP;
  314. if (copy_from_user(&rsv, arg, sizeof(rsv)))
  315. return -EFAULT;
  316. if (rsv.flags)
  317. return -EOPNOTSUPP;
  318. return ops->pr_release(bdev, rsv.key, rsv.type);
  319. }
  320. static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode,
  321. struct pr_preempt __user *arg, bool abort)
  322. {
  323. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  324. struct pr_preempt p;
  325. if (!blkdev_pr_allowed(bdev, mode))
  326. return -EPERM;
  327. if (!ops || !ops->pr_preempt)
  328. return -EOPNOTSUPP;
  329. if (copy_from_user(&p, arg, sizeof(p)))
  330. return -EFAULT;
  331. if (p.flags)
  332. return -EOPNOTSUPP;
  333. return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
  334. }
  335. static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
  336. struct pr_clear __user *arg)
  337. {
  338. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  339. struct pr_clear c;
  340. if (!blkdev_pr_allowed(bdev, mode))
  341. return -EPERM;
  342. if (!ops || !ops->pr_clear)
  343. return -EOPNOTSUPP;
  344. if (copy_from_user(&c, arg, sizeof(c)))
  345. return -EFAULT;
  346. if (c.flags)
  347. return -EOPNOTSUPP;
  348. return ops->pr_clear(bdev, c.key);
  349. }
  350. static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
  351. unsigned long arg)
  352. {
  353. if (!capable(CAP_SYS_ADMIN))
  354. return -EACCES;
  355. mutex_lock(&bdev->bd_holder_lock);
  356. if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
  357. bdev->bd_holder_ops->sync(bdev);
  358. else {
  359. mutex_unlock(&bdev->bd_holder_lock);
  360. sync_blockdev(bdev);
  361. }
  362. invalidate_bdev(bdev);
  363. return 0;
  364. }
  365. static int blkdev_roset(struct block_device *bdev, unsigned cmd,
  366. unsigned long arg)
  367. {
  368. int ret, n;
  369. if (!capable(CAP_SYS_ADMIN))
  370. return -EACCES;
  371. if (get_user(n, (int __user *)arg))
  372. return -EFAULT;
  373. if (bdev->bd_disk->fops->set_read_only) {
  374. ret = bdev->bd_disk->fops->set_read_only(bdev, n);
  375. if (ret)
  376. return ret;
  377. }
  378. if (n)
  379. bdev_set_flag(bdev, BD_READ_ONLY);
  380. else
  381. bdev_clear_flag(bdev, BD_READ_ONLY);
  382. return 0;
  383. }
  384. static int blkdev_getgeo(struct block_device *bdev,
  385. struct hd_geometry __user *argp)
  386. {
  387. struct gendisk *disk = bdev->bd_disk;
  388. struct hd_geometry geo;
  389. int ret;
  390. if (!argp)
  391. return -EINVAL;
  392. if (!disk->fops->getgeo)
  393. return -ENOTTY;
  394. /*
  395. * We need to set the startsect first, the driver may
  396. * want to override it.
  397. */
  398. memset(&geo, 0, sizeof(geo));
  399. geo.start = get_start_sect(bdev);
  400. ret = disk->fops->getgeo(bdev, &geo);
  401. if (ret)
  402. return ret;
  403. if (copy_to_user(argp, &geo, sizeof(geo)))
  404. return -EFAULT;
  405. return 0;
  406. }
  407. #ifdef CONFIG_COMPAT
  408. struct compat_hd_geometry {
  409. unsigned char heads;
  410. unsigned char sectors;
  411. unsigned short cylinders;
  412. u32 start;
  413. };
  414. static int compat_hdio_getgeo(struct block_device *bdev,
  415. struct compat_hd_geometry __user *ugeo)
  416. {
  417. struct gendisk *disk = bdev->bd_disk;
  418. struct hd_geometry geo;
  419. int ret;
  420. if (!ugeo)
  421. return -EINVAL;
  422. if (!disk->fops->getgeo)
  423. return -ENOTTY;
  424. memset(&geo, 0, sizeof(geo));
  425. /*
  426. * We need to set the startsect first, the driver may
  427. * want to override it.
  428. */
  429. geo.start = get_start_sect(bdev);
  430. ret = disk->fops->getgeo(bdev, &geo);
  431. if (ret)
  432. return ret;
  433. ret = copy_to_user(ugeo, &geo, 4);
  434. ret |= put_user(geo.start, &ugeo->start);
  435. if (ret)
  436. ret = -EFAULT;
  437. return ret;
  438. }
  439. #endif
  440. /* set the logical block size */
  441. static int blkdev_bszset(struct file *file, blk_mode_t mode,
  442. int __user *argp)
  443. {
  444. // this one might be file_inode(file)->i_rdev - a rare valid
  445. // use of file_inode() for those.
  446. dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
  447. struct file *excl_file;
  448. int ret, n;
  449. if (!capable(CAP_SYS_ADMIN))
  450. return -EACCES;
  451. if (!argp)
  452. return -EINVAL;
  453. if (get_user(n, argp))
  454. return -EFAULT;
  455. if (mode & BLK_OPEN_EXCL)
  456. return set_blocksize(file, n);
  457. excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
  458. if (IS_ERR(excl_file))
  459. return -EBUSY;
  460. ret = set_blocksize(excl_file, n);
  461. fput(excl_file);
  462. return ret;
  463. }
  464. /*
  465. * Common commands that are handled the same way on native and compat
  466. * user space. Note the separate arg/argp parameters that are needed
  467. * to deal with the compat_ptr() conversion.
  468. */
  469. static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
  470. unsigned int cmd, unsigned long arg,
  471. void __user *argp)
  472. {
  473. unsigned int max_sectors;
  474. switch (cmd) {
  475. case BLKFLSBUF:
  476. return blkdev_flushbuf(bdev, cmd, arg);
  477. case BLKROSET:
  478. return blkdev_roset(bdev, cmd, arg);
  479. case BLKDISCARD:
  480. return blk_ioctl_discard(bdev, mode, arg);
  481. case BLKSECDISCARD:
  482. return blk_ioctl_secure_erase(bdev, mode, argp);
  483. case BLKZEROOUT:
  484. return blk_ioctl_zeroout(bdev, mode, arg);
  485. case BLKGETDISKSEQ:
  486. return put_u64(argp, bdev->bd_disk->diskseq);
  487. case BLKREPORTZONE:
  488. return blkdev_report_zones_ioctl(bdev, cmd, arg);
  489. case BLKRESETZONE:
  490. case BLKOPENZONE:
  491. case BLKCLOSEZONE:
  492. case BLKFINISHZONE:
  493. return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
  494. case BLKGETZONESZ:
  495. return put_uint(argp, bdev_zone_sectors(bdev));
  496. case BLKGETNRZONES:
  497. return put_uint(argp, bdev_nr_zones(bdev));
  498. case BLKROGET:
  499. return put_int(argp, bdev_read_only(bdev) != 0);
  500. case BLKSSZGET: /* get block device logical block size */
  501. return put_int(argp, bdev_logical_block_size(bdev));
  502. case BLKPBSZGET: /* get block device physical block size */
  503. return put_uint(argp, bdev_physical_block_size(bdev));
  504. case BLKIOMIN:
  505. return put_uint(argp, bdev_io_min(bdev));
  506. case BLKIOOPT:
  507. return put_uint(argp, bdev_io_opt(bdev));
  508. case BLKALIGNOFF:
  509. return put_int(argp, bdev_alignment_offset(bdev));
  510. case BLKDISCARDZEROES:
  511. return put_uint(argp, 0);
  512. case BLKSECTGET:
  513. max_sectors = min_t(unsigned int, USHRT_MAX,
  514. queue_max_sectors(bdev_get_queue(bdev)));
  515. return put_ushort(argp, max_sectors);
  516. case BLKROTATIONAL:
  517. return put_ushort(argp, !bdev_nonrot(bdev));
  518. case BLKRASET:
  519. case BLKFRASET:
  520. if(!capable(CAP_SYS_ADMIN))
  521. return -EACCES;
  522. bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
  523. return 0;
  524. case BLKRRPART:
  525. if (!capable(CAP_SYS_ADMIN))
  526. return -EACCES;
  527. if (bdev_is_partition(bdev))
  528. return -EINVAL;
  529. return disk_scan_partitions(bdev->bd_disk,
  530. mode | BLK_OPEN_STRICT_SCAN);
  531. case BLKTRACESTART:
  532. case BLKTRACESTOP:
  533. case BLKTRACETEARDOWN:
  534. return blk_trace_ioctl(bdev, cmd, argp);
  535. case IOC_PR_REGISTER:
  536. return blkdev_pr_register(bdev, mode, argp);
  537. case IOC_PR_RESERVE:
  538. return blkdev_pr_reserve(bdev, mode, argp);
  539. case IOC_PR_RELEASE:
  540. return blkdev_pr_release(bdev, mode, argp);
  541. case IOC_PR_PREEMPT:
  542. return blkdev_pr_preempt(bdev, mode, argp, false);
  543. case IOC_PR_PREEMPT_ABORT:
  544. return blkdev_pr_preempt(bdev, mode, argp, true);
  545. case IOC_PR_CLEAR:
  546. return blkdev_pr_clear(bdev, mode, argp);
  547. default:
  548. return -ENOIOCTLCMD;
  549. }
  550. }
  551. /*
  552. * Always keep this in sync with compat_blkdev_ioctl()
  553. * to handle all incompatible commands in both functions.
  554. *
  555. * New commands must be compatible and go into blkdev_common_ioctl
  556. */
  557. long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  558. {
  559. struct block_device *bdev = I_BDEV(file->f_mapping->host);
  560. void __user *argp = (void __user *)arg;
  561. blk_mode_t mode = file_to_blk_mode(file);
  562. int ret;
  563. switch (cmd) {
  564. /* These need separate implementations for the data structure */
  565. case HDIO_GETGEO:
  566. return blkdev_getgeo(bdev, argp);
  567. case BLKPG:
  568. return blkpg_ioctl(bdev, argp);
  569. /* Compat mode returns 32-bit data instead of 'long' */
  570. case BLKRAGET:
  571. case BLKFRAGET:
  572. if (!argp)
  573. return -EINVAL;
  574. return put_long(argp,
  575. (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
  576. case BLKGETSIZE:
  577. if (bdev_nr_sectors(bdev) > ~0UL)
  578. return -EFBIG;
  579. return put_ulong(argp, bdev_nr_sectors(bdev));
  580. /* The data is compatible, but the command number is different */
  581. case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
  582. return put_int(argp, block_size(bdev));
  583. case BLKBSZSET:
  584. return blkdev_bszset(file, mode, argp);
  585. case BLKGETSIZE64:
  586. return put_u64(argp, bdev_nr_bytes(bdev));
  587. /* Incompatible alignment on i386 */
  588. case BLKTRACESETUP:
  589. return blk_trace_ioctl(bdev, cmd, argp);
  590. default:
  591. break;
  592. }
  593. ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
  594. if (ret != -ENOIOCTLCMD)
  595. return ret;
  596. if (!bdev->bd_disk->fops->ioctl)
  597. return -ENOTTY;
  598. return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
  599. }
  600. #ifdef CONFIG_COMPAT
  601. #define BLKBSZGET_32 _IOR(0x12, 112, int)
  602. #define BLKBSZSET_32 _IOW(0x12, 113, int)
  603. #define BLKGETSIZE64_32 _IOR(0x12, 114, int)
  604. /* Most of the generic ioctls are handled in the normal fallback path.
  605. This assumes the blkdev's low level compat_ioctl always returns
  606. ENOIOCTLCMD for unknown ioctls. */
  607. long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  608. {
  609. int ret;
  610. void __user *argp = compat_ptr(arg);
  611. struct block_device *bdev = I_BDEV(file->f_mapping->host);
  612. struct gendisk *disk = bdev->bd_disk;
  613. blk_mode_t mode = file_to_blk_mode(file);
  614. switch (cmd) {
  615. /* These need separate implementations for the data structure */
  616. case HDIO_GETGEO:
  617. return compat_hdio_getgeo(bdev, argp);
  618. case BLKPG:
  619. return compat_blkpg_ioctl(bdev, argp);
  620. /* Compat mode returns 32-bit data instead of 'long' */
  621. case BLKRAGET:
  622. case BLKFRAGET:
  623. if (!argp)
  624. return -EINVAL;
  625. return compat_put_long(argp,
  626. (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
  627. case BLKGETSIZE:
  628. if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
  629. return -EFBIG;
  630. return compat_put_ulong(argp, bdev_nr_sectors(bdev));
  631. /* The data is compatible, but the command number is different */
  632. case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
  633. return put_int(argp, bdev_logical_block_size(bdev));
  634. case BLKBSZSET_32:
  635. return blkdev_bszset(file, mode, argp);
  636. case BLKGETSIZE64_32:
  637. return put_u64(argp, bdev_nr_bytes(bdev));
  638. /* Incompatible alignment on i386 */
  639. case BLKTRACESETUP32:
  640. return blk_trace_ioctl(bdev, cmd, argp);
  641. default:
  642. break;
  643. }
  644. ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
  645. if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
  646. ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
  647. return ret;
  648. }
  649. #endif
  650. struct blk_iou_cmd {
  651. int res;
  652. bool nowait;
  653. };
  654. static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags)
  655. {
  656. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  657. if (bic->res == -EAGAIN && bic->nowait)
  658. io_uring_cmd_issue_blocking(cmd);
  659. else
  660. io_uring_cmd_done(cmd, bic->res, 0, issue_flags);
  661. }
  662. static void bio_cmd_bio_end_io(struct bio *bio)
  663. {
  664. struct io_uring_cmd *cmd = bio->bi_private;
  665. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  666. if (unlikely(bio->bi_status) && !bic->res)
  667. bic->res = blk_status_to_errno(bio->bi_status);
  668. io_uring_cmd_do_in_task_lazy(cmd, blk_cmd_complete);
  669. bio_put(bio);
  670. }
  671. static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
  672. struct block_device *bdev,
  673. uint64_t start, uint64_t len, bool nowait)
  674. {
  675. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  676. gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL;
  677. sector_t sector = start >> SECTOR_SHIFT;
  678. sector_t nr_sects = len >> SECTOR_SHIFT;
  679. struct bio *prev = NULL, *bio;
  680. int err;
  681. if (!bdev_max_discard_sectors(bdev))
  682. return -EOPNOTSUPP;
  683. if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE))
  684. return -EBADF;
  685. if (bdev_read_only(bdev))
  686. return -EPERM;
  687. err = blk_validate_byte_range(bdev, start, len);
  688. if (err)
  689. return err;
  690. err = filemap_invalidate_pages(bdev->bd_mapping, start,
  691. start + len - 1, nowait);
  692. if (err)
  693. return err;
  694. while (true) {
  695. bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp);
  696. if (!bio)
  697. break;
  698. if (nowait) {
  699. /*
  700. * Don't allow multi-bio non-blocking submissions as
  701. * subsequent bios may fail but we won't get a direct
  702. * indication of that. Normally, the caller should
  703. * retry from a blocking context.
  704. */
  705. if (unlikely(nr_sects)) {
  706. bio_put(bio);
  707. return -EAGAIN;
  708. }
  709. bio->bi_opf |= REQ_NOWAIT;
  710. }
  711. prev = bio_chain_and_submit(prev, bio);
  712. }
  713. if (unlikely(!prev))
  714. return -EAGAIN;
  715. if (unlikely(nr_sects))
  716. bic->res = -EAGAIN;
  717. prev->bi_private = cmd;
  718. prev->bi_end_io = bio_cmd_bio_end_io;
  719. submit_bio(prev);
  720. return -EIOCBQUEUED;
  721. }
  722. int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
  723. {
  724. struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
  725. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  726. const struct io_uring_sqe *sqe = cmd->sqe;
  727. u32 cmd_op = cmd->cmd_op;
  728. uint64_t start, len;
  729. if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
  730. sqe->rw_flags || sqe->file_index))
  731. return -EINVAL;
  732. bic->res = 0;
  733. bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
  734. start = READ_ONCE(sqe->addr);
  735. len = READ_ONCE(sqe->addr3);
  736. switch (cmd_op) {
  737. case BLOCK_URING_CMD_DISCARD:
  738. return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
  739. }
  740. return -EINVAL;
  741. }