ioctl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/capability.h>
  3. #include <linux/compat.h>
  4. #include <linux/blkdev.h>
  5. #include <linux/export.h>
  6. #include <linux/gfp.h>
  7. #include <linux/blkpg.h>
  8. #include <linux/hdreg.h>
  9. #include <linux/backing-dev.h>
  10. #include <linux/fs.h>
  11. #include <linux/blktrace_api.h>
  12. #include <linux/pr.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/io_uring/cmd.h>
  16. #include <uapi/linux/blkdev.h>
  17. #include "blk.h"
  18. static int blkpg_do_ioctl(struct block_device *bdev,
  19. struct blkpg_partition __user *upart, int op)
  20. {
  21. struct gendisk *disk = bdev->bd_disk;
  22. struct blkpg_partition p;
  23. sector_t start, length, capacity, end;
  24. if (!capable(CAP_SYS_ADMIN))
  25. return -EACCES;
  26. if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
  27. return -EFAULT;
  28. if (bdev_is_partition(bdev))
  29. return -EINVAL;
  30. if (p.pno <= 0)
  31. return -EINVAL;
  32. if (op == BLKPG_DEL_PARTITION)
  33. return bdev_del_partition(disk, p.pno);
  34. if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
  35. return -EINVAL;
  36. /* Check that the partition is aligned to the block size */
  37. if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
  38. return -EINVAL;
  39. start = p.start >> SECTOR_SHIFT;
  40. length = p.length >> SECTOR_SHIFT;
  41. capacity = get_capacity(disk);
  42. if (check_add_overflow(start, length, &end))
  43. return -EINVAL;
  44. if (start >= capacity || end > capacity)
  45. return -EINVAL;
  46. switch (op) {
  47. case BLKPG_ADD_PARTITION:
  48. return bdev_add_partition(disk, p.pno, start, length);
  49. case BLKPG_RESIZE_PARTITION:
  50. return bdev_resize_partition(disk, p.pno, start, length);
  51. default:
  52. return -EINVAL;
  53. }
  54. }
  55. static int blkpg_ioctl(struct block_device *bdev,
  56. struct blkpg_ioctl_arg __user *arg)
  57. {
  58. struct blkpg_partition __user *udata;
  59. int op;
  60. if (get_user(op, &arg->op) || get_user(udata, &arg->data))
  61. return -EFAULT;
  62. return blkpg_do_ioctl(bdev, udata, op);
  63. }
  64. #ifdef CONFIG_COMPAT
  65. struct compat_blkpg_ioctl_arg {
  66. compat_int_t op;
  67. compat_int_t flags;
  68. compat_int_t datalen;
  69. compat_caddr_t data;
  70. };
  71. static int compat_blkpg_ioctl(struct block_device *bdev,
  72. struct compat_blkpg_ioctl_arg __user *arg)
  73. {
  74. compat_caddr_t udata;
  75. int op;
  76. if (get_user(op, &arg->op) || get_user(udata, &arg->data))
  77. return -EFAULT;
  78. return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
  79. }
  80. #endif
  81. /*
  82. * Check that [start, start + len) is a valid range from the block device's
  83. * perspective, including verifying that it can be correctly translated into
  84. * logical block addresses.
  85. */
  86. static int blk_validate_byte_range(struct block_device *bdev,
  87. uint64_t start, uint64_t len)
  88. {
  89. unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
  90. uint64_t end;
  91. if ((start | len) & bs_mask)
  92. return -EINVAL;
  93. if (!len)
  94. return -EINVAL;
  95. if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev))
  96. return -EINVAL;
  97. return 0;
  98. }
  99. static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
  100. unsigned long arg)
  101. {
  102. uint64_t range[2], start, len;
  103. struct bio *prev = NULL, *bio;
  104. sector_t sector, nr_sects;
  105. struct blk_plug plug;
  106. int err;
  107. if (copy_from_user(range, (void __user *)arg, sizeof(range)))
  108. return -EFAULT;
  109. start = range[0];
  110. len = range[1];
  111. if (!bdev_max_discard_sectors(bdev))
  112. return -EOPNOTSUPP;
  113. if (!(mode & BLK_OPEN_WRITE))
  114. return -EBADF;
  115. if (bdev_read_only(bdev))
  116. return -EPERM;
  117. err = blk_validate_byte_range(bdev, start, len);
  118. if (err)
  119. return err;
  120. inode_lock(bdev->bd_mapping->host);
  121. filemap_invalidate_lock(bdev->bd_mapping);
  122. err = truncate_bdev_range(bdev, mode, start, start + len - 1);
  123. if (err)
  124. goto fail;
  125. sector = start >> SECTOR_SHIFT;
  126. nr_sects = len >> SECTOR_SHIFT;
  127. blk_start_plug(&plug);
  128. while (1) {
  129. if (fatal_signal_pending(current)) {
  130. if (prev)
  131. bio_await_chain(prev);
  132. err = -EINTR;
  133. goto out_unplug;
  134. }
  135. bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
  136. GFP_KERNEL);
  137. if (!bio)
  138. break;
  139. prev = bio_chain_and_submit(prev, bio);
  140. }
  141. if (prev) {
  142. err = submit_bio_wait(prev);
  143. if (err == -EOPNOTSUPP)
  144. err = 0;
  145. bio_put(prev);
  146. }
  147. out_unplug:
  148. blk_finish_plug(&plug);
  149. fail:
  150. filemap_invalidate_unlock(bdev->bd_mapping);
  151. inode_unlock(bdev->bd_mapping->host);
  152. return err;
  153. }
  154. static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
  155. void __user *argp)
  156. {
  157. uint64_t start, len, end;
  158. uint64_t range[2];
  159. int err;
  160. if (!(mode & BLK_OPEN_WRITE))
  161. return -EBADF;
  162. if (!bdev_max_secure_erase_sectors(bdev))
  163. return -EOPNOTSUPP;
  164. if (copy_from_user(range, argp, sizeof(range)))
  165. return -EFAULT;
  166. start = range[0];
  167. len = range[1];
  168. if ((start & 511) || (len & 511))
  169. return -EINVAL;
  170. if (check_add_overflow(start, len, &end) ||
  171. end > bdev_nr_bytes(bdev))
  172. return -EINVAL;
  173. inode_lock(bdev->bd_mapping->host);
  174. filemap_invalidate_lock(bdev->bd_mapping);
  175. err = truncate_bdev_range(bdev, mode, start, end - 1);
  176. if (!err)
  177. err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
  178. GFP_KERNEL);
  179. filemap_invalidate_unlock(bdev->bd_mapping);
  180. inode_unlock(bdev->bd_mapping->host);
  181. return err;
  182. }
  183. static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
  184. unsigned long arg)
  185. {
  186. uint64_t range[2];
  187. uint64_t start, end, len;
  188. int err;
  189. if (!(mode & BLK_OPEN_WRITE))
  190. return -EBADF;
  191. if (copy_from_user(range, (void __user *)arg, sizeof(range)))
  192. return -EFAULT;
  193. start = range[0];
  194. len = range[1];
  195. end = start + len - 1;
  196. if (start & 511)
  197. return -EINVAL;
  198. if (len & 511)
  199. return -EINVAL;
  200. if (end >= (uint64_t)bdev_nr_bytes(bdev))
  201. return -EINVAL;
  202. if (end < start)
  203. return -EINVAL;
  204. /* Invalidate the page cache, including dirty pages */
  205. inode_lock(bdev->bd_mapping->host);
  206. filemap_invalidate_lock(bdev->bd_mapping);
  207. err = truncate_bdev_range(bdev, mode, start, end);
  208. if (err)
  209. goto fail;
  210. err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
  211. BLKDEV_ZERO_NOUNMAP | BLKDEV_ZERO_KILLABLE);
  212. fail:
  213. filemap_invalidate_unlock(bdev->bd_mapping);
  214. inode_unlock(bdev->bd_mapping->host);
  215. return err;
  216. }
  217. static int put_ushort(unsigned short __user *argp, unsigned short val)
  218. {
  219. return put_user(val, argp);
  220. }
  221. static int put_int(int __user *argp, int val)
  222. {
  223. return put_user(val, argp);
  224. }
  225. static int put_uint(unsigned int __user *argp, unsigned int val)
  226. {
  227. return put_user(val, argp);
  228. }
  229. static int put_long(long __user *argp, long val)
  230. {
  231. return put_user(val, argp);
  232. }
  233. static int put_ulong(unsigned long __user *argp, unsigned long val)
  234. {
  235. return put_user(val, argp);
  236. }
  237. static int put_u64(u64 __user *argp, u64 val)
  238. {
  239. return put_user(val, argp);
  240. }
  241. #ifdef CONFIG_COMPAT
  242. static int compat_put_long(compat_long_t __user *argp, long val)
  243. {
  244. return put_user(val, argp);
  245. }
  246. static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
  247. {
  248. return put_user(val, argp);
  249. }
  250. #endif
  251. #ifdef CONFIG_COMPAT
  252. /*
  253. * This is the equivalent of compat_ptr_ioctl(), to be used by block
  254. * drivers that implement only commands that are completely compatible
  255. * between 32-bit and 64-bit user space
  256. */
  257. int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode,
  258. unsigned cmd, unsigned long arg)
  259. {
  260. struct gendisk *disk = bdev->bd_disk;
  261. if (disk->fops->ioctl)
  262. return disk->fops->ioctl(bdev, mode, cmd,
  263. (unsigned long)compat_ptr(arg));
  264. return -ENOIOCTLCMD;
  265. }
  266. EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
  267. #endif
  268. static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode)
  269. {
  270. /* no sense to make reservations for partitions */
  271. if (bdev_is_partition(bdev))
  272. return false;
  273. if (capable(CAP_SYS_ADMIN))
  274. return true;
  275. /*
  276. * Only allow unprivileged reservations if the file descriptor is open
  277. * for writing.
  278. */
  279. return mode & BLK_OPEN_WRITE;
  280. }
  281. static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode,
  282. struct pr_registration __user *arg)
  283. {
  284. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  285. struct pr_registration reg;
  286. if (!blkdev_pr_allowed(bdev, mode))
  287. return -EPERM;
  288. if (!ops || !ops->pr_register)
  289. return -EOPNOTSUPP;
  290. if (copy_from_user(&reg, arg, sizeof(reg)))
  291. return -EFAULT;
  292. if (reg.flags & ~PR_FL_IGNORE_KEY)
  293. return -EOPNOTSUPP;
  294. return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
  295. }
  296. static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode,
  297. struct pr_reservation __user *arg)
  298. {
  299. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  300. struct pr_reservation rsv;
  301. if (!blkdev_pr_allowed(bdev, mode))
  302. return -EPERM;
  303. if (!ops || !ops->pr_reserve)
  304. return -EOPNOTSUPP;
  305. if (copy_from_user(&rsv, arg, sizeof(rsv)))
  306. return -EFAULT;
  307. if (rsv.flags & ~PR_FL_IGNORE_KEY)
  308. return -EOPNOTSUPP;
  309. return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
  310. }
  311. static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode,
  312. struct pr_reservation __user *arg)
  313. {
  314. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  315. struct pr_reservation rsv;
  316. if (!blkdev_pr_allowed(bdev, mode))
  317. return -EPERM;
  318. if (!ops || !ops->pr_release)
  319. return -EOPNOTSUPP;
  320. if (copy_from_user(&rsv, arg, sizeof(rsv)))
  321. return -EFAULT;
  322. if (rsv.flags)
  323. return -EOPNOTSUPP;
  324. return ops->pr_release(bdev, rsv.key, rsv.type);
  325. }
  326. static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode,
  327. struct pr_preempt __user *arg, bool abort)
  328. {
  329. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  330. struct pr_preempt p;
  331. if (!blkdev_pr_allowed(bdev, mode))
  332. return -EPERM;
  333. if (!ops || !ops->pr_preempt)
  334. return -EOPNOTSUPP;
  335. if (copy_from_user(&p, arg, sizeof(p)))
  336. return -EFAULT;
  337. if (p.flags)
  338. return -EOPNOTSUPP;
  339. return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
  340. }
  341. static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
  342. struct pr_clear __user *arg)
  343. {
  344. const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
  345. struct pr_clear c;
  346. if (!blkdev_pr_allowed(bdev, mode))
  347. return -EPERM;
  348. if (!ops || !ops->pr_clear)
  349. return -EOPNOTSUPP;
  350. if (copy_from_user(&c, arg, sizeof(c)))
  351. return -EFAULT;
  352. if (c.flags)
  353. return -EOPNOTSUPP;
  354. return ops->pr_clear(bdev, c.key);
  355. }
  356. static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
  357. unsigned long arg)
  358. {
  359. if (!capable(CAP_SYS_ADMIN))
  360. return -EACCES;
  361. mutex_lock(&bdev->bd_holder_lock);
  362. if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
  363. bdev->bd_holder_ops->sync(bdev);
  364. else {
  365. mutex_unlock(&bdev->bd_holder_lock);
  366. sync_blockdev(bdev);
  367. }
  368. invalidate_bdev(bdev);
  369. return 0;
  370. }
  371. static int blkdev_roset(struct block_device *bdev, unsigned cmd,
  372. unsigned long arg)
  373. {
  374. int ret, n;
  375. if (!capable(CAP_SYS_ADMIN))
  376. return -EACCES;
  377. if (get_user(n, (int __user *)arg))
  378. return -EFAULT;
  379. if (bdev->bd_disk->fops->set_read_only) {
  380. ret = bdev->bd_disk->fops->set_read_only(bdev, n);
  381. if (ret)
  382. return ret;
  383. }
  384. if (n)
  385. bdev_set_flag(bdev, BD_READ_ONLY);
  386. else
  387. bdev_clear_flag(bdev, BD_READ_ONLY);
  388. return 0;
  389. }
  390. static int blkdev_getgeo(struct block_device *bdev,
  391. struct hd_geometry __user *argp)
  392. {
  393. struct gendisk *disk = bdev->bd_disk;
  394. struct hd_geometry geo;
  395. int ret;
  396. if (!argp)
  397. return -EINVAL;
  398. if (!disk->fops->getgeo)
  399. return -ENOTTY;
  400. /*
  401. * We need to set the startsect first, the driver may
  402. * want to override it.
  403. */
  404. memset(&geo, 0, sizeof(geo));
  405. geo.start = get_start_sect(bdev);
  406. ret = disk->fops->getgeo(bdev, &geo);
  407. if (ret)
  408. return ret;
  409. if (copy_to_user(argp, &geo, sizeof(geo)))
  410. return -EFAULT;
  411. return 0;
  412. }
  413. #ifdef CONFIG_COMPAT
  414. struct compat_hd_geometry {
  415. unsigned char heads;
  416. unsigned char sectors;
  417. unsigned short cylinders;
  418. u32 start;
  419. };
  420. static int compat_hdio_getgeo(struct block_device *bdev,
  421. struct compat_hd_geometry __user *ugeo)
  422. {
  423. struct gendisk *disk = bdev->bd_disk;
  424. struct hd_geometry geo;
  425. int ret;
  426. if (!ugeo)
  427. return -EINVAL;
  428. if (!disk->fops->getgeo)
  429. return -ENOTTY;
  430. memset(&geo, 0, sizeof(geo));
  431. /*
  432. * We need to set the startsect first, the driver may
  433. * want to override it.
  434. */
  435. geo.start = get_start_sect(bdev);
  436. ret = disk->fops->getgeo(bdev, &geo);
  437. if (ret)
  438. return ret;
  439. ret = copy_to_user(ugeo, &geo, 4);
  440. ret |= put_user(geo.start, &ugeo->start);
  441. if (ret)
  442. ret = -EFAULT;
  443. return ret;
  444. }
  445. #endif
  446. /* set the logical block size */
  447. static int blkdev_bszset(struct file *file, blk_mode_t mode,
  448. int __user *argp)
  449. {
  450. // this one might be file_inode(file)->i_rdev - a rare valid
  451. // use of file_inode() for those.
  452. dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
  453. struct file *excl_file;
  454. int ret, n;
  455. if (!capable(CAP_SYS_ADMIN))
  456. return -EACCES;
  457. if (!argp)
  458. return -EINVAL;
  459. if (get_user(n, argp))
  460. return -EFAULT;
  461. if (mode & BLK_OPEN_EXCL)
  462. return set_blocksize(file, n);
  463. excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
  464. if (IS_ERR(excl_file))
  465. return -EBUSY;
  466. ret = set_blocksize(excl_file, n);
  467. fput(excl_file);
  468. return ret;
  469. }
  470. /*
  471. * Common commands that are handled the same way on native and compat
  472. * user space. Note the separate arg/argp parameters that are needed
  473. * to deal with the compat_ptr() conversion.
  474. */
  475. static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
  476. unsigned int cmd, unsigned long arg,
  477. void __user *argp)
  478. {
  479. unsigned int max_sectors;
  480. switch (cmd) {
  481. case BLKFLSBUF:
  482. return blkdev_flushbuf(bdev, cmd, arg);
  483. case BLKROSET:
  484. return blkdev_roset(bdev, cmd, arg);
  485. case BLKDISCARD:
  486. return blk_ioctl_discard(bdev, mode, arg);
  487. case BLKSECDISCARD:
  488. return blk_ioctl_secure_erase(bdev, mode, argp);
  489. case BLKZEROOUT:
  490. return blk_ioctl_zeroout(bdev, mode, arg);
  491. case BLKGETDISKSEQ:
  492. return put_u64(argp, bdev->bd_disk->diskseq);
  493. case BLKREPORTZONE:
  494. return blkdev_report_zones_ioctl(bdev, cmd, arg);
  495. case BLKRESETZONE:
  496. case BLKOPENZONE:
  497. case BLKCLOSEZONE:
  498. case BLKFINISHZONE:
  499. return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
  500. case BLKGETZONESZ:
  501. return put_uint(argp, bdev_zone_sectors(bdev));
  502. case BLKGETNRZONES:
  503. return put_uint(argp, bdev_nr_zones(bdev));
  504. case BLKROGET:
  505. return put_int(argp, bdev_read_only(bdev) != 0);
  506. case BLKSSZGET: /* get block device logical block size */
  507. return put_int(argp, bdev_logical_block_size(bdev));
  508. case BLKPBSZGET: /* get block device physical block size */
  509. return put_uint(argp, bdev_physical_block_size(bdev));
  510. case BLKIOMIN:
  511. return put_uint(argp, bdev_io_min(bdev));
  512. case BLKIOOPT:
  513. return put_uint(argp, bdev_io_opt(bdev));
  514. case BLKALIGNOFF:
  515. return put_int(argp, bdev_alignment_offset(bdev));
  516. case BLKDISCARDZEROES:
  517. return put_uint(argp, 0);
  518. case BLKSECTGET:
  519. max_sectors = min_t(unsigned int, USHRT_MAX,
  520. queue_max_sectors(bdev_get_queue(bdev)));
  521. return put_ushort(argp, max_sectors);
  522. case BLKROTATIONAL:
  523. return put_ushort(argp, !bdev_nonrot(bdev));
  524. case BLKRASET:
  525. case BLKFRASET:
  526. if(!capable(CAP_SYS_ADMIN))
  527. return -EACCES;
  528. bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
  529. return 0;
  530. case BLKRRPART:
  531. if (!capable(CAP_SYS_ADMIN))
  532. return -EACCES;
  533. if (bdev_is_partition(bdev))
  534. return -EINVAL;
  535. return disk_scan_partitions(bdev->bd_disk,
  536. mode | BLK_OPEN_STRICT_SCAN);
  537. case BLKTRACESTART:
  538. case BLKTRACESTOP:
  539. case BLKTRACETEARDOWN:
  540. return blk_trace_ioctl(bdev, cmd, argp);
  541. case IOC_PR_REGISTER:
  542. return blkdev_pr_register(bdev, mode, argp);
  543. case IOC_PR_RESERVE:
  544. return blkdev_pr_reserve(bdev, mode, argp);
  545. case IOC_PR_RELEASE:
  546. return blkdev_pr_release(bdev, mode, argp);
  547. case IOC_PR_PREEMPT:
  548. return blkdev_pr_preempt(bdev, mode, argp, false);
  549. case IOC_PR_PREEMPT_ABORT:
  550. return blkdev_pr_preempt(bdev, mode, argp, true);
  551. case IOC_PR_CLEAR:
  552. return blkdev_pr_clear(bdev, mode, argp);
  553. default:
  554. return -ENOIOCTLCMD;
  555. }
  556. }
  557. /*
  558. * Always keep this in sync with compat_blkdev_ioctl()
  559. * to handle all incompatible commands in both functions.
  560. *
  561. * New commands must be compatible and go into blkdev_common_ioctl
  562. */
  563. long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  564. {
  565. struct block_device *bdev = I_BDEV(file->f_mapping->host);
  566. void __user *argp = (void __user *)arg;
  567. blk_mode_t mode = file_to_blk_mode(file);
  568. int ret;
  569. switch (cmd) {
  570. /* These need separate implementations for the data structure */
  571. case HDIO_GETGEO:
  572. return blkdev_getgeo(bdev, argp);
  573. case BLKPG:
  574. return blkpg_ioctl(bdev, argp);
  575. /* Compat mode returns 32-bit data instead of 'long' */
  576. case BLKRAGET:
  577. case BLKFRAGET:
  578. if (!argp)
  579. return -EINVAL;
  580. return put_long(argp,
  581. (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
  582. case BLKGETSIZE:
  583. if (bdev_nr_sectors(bdev) > ~0UL)
  584. return -EFBIG;
  585. return put_ulong(argp, bdev_nr_sectors(bdev));
  586. /* The data is compatible, but the command number is different */
  587. case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
  588. return put_int(argp, block_size(bdev));
  589. case BLKBSZSET:
  590. return blkdev_bszset(file, mode, argp);
  591. case BLKGETSIZE64:
  592. return put_u64(argp, bdev_nr_bytes(bdev));
  593. /* Incompatible alignment on i386 */
  594. case BLKTRACESETUP:
  595. return blk_trace_ioctl(bdev, cmd, argp);
  596. default:
  597. break;
  598. }
  599. ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
  600. if (ret != -ENOIOCTLCMD)
  601. return ret;
  602. if (!bdev->bd_disk->fops->ioctl)
  603. return -ENOTTY;
  604. return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
  605. }
  606. #ifdef CONFIG_COMPAT
  607. #define BLKBSZGET_32 _IOR(0x12, 112, int)
  608. #define BLKBSZSET_32 _IOW(0x12, 113, int)
  609. #define BLKGETSIZE64_32 _IOR(0x12, 114, int)
  610. /* Most of the generic ioctls are handled in the normal fallback path.
  611. This assumes the blkdev's low level compat_ioctl always returns
  612. ENOIOCTLCMD for unknown ioctls. */
  613. long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  614. {
  615. int ret;
  616. void __user *argp = compat_ptr(arg);
  617. struct block_device *bdev = I_BDEV(file->f_mapping->host);
  618. struct gendisk *disk = bdev->bd_disk;
  619. blk_mode_t mode = file_to_blk_mode(file);
  620. switch (cmd) {
  621. /* These need separate implementations for the data structure */
  622. case HDIO_GETGEO:
  623. return compat_hdio_getgeo(bdev, argp);
  624. case BLKPG:
  625. return compat_blkpg_ioctl(bdev, argp);
  626. /* Compat mode returns 32-bit data instead of 'long' */
  627. case BLKRAGET:
  628. case BLKFRAGET:
  629. if (!argp)
  630. return -EINVAL;
  631. return compat_put_long(argp,
  632. (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
  633. case BLKGETSIZE:
  634. if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
  635. return -EFBIG;
  636. return compat_put_ulong(argp, bdev_nr_sectors(bdev));
  637. /* The data is compatible, but the command number is different */
  638. case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
  639. return put_int(argp, bdev_logical_block_size(bdev));
  640. case BLKBSZSET_32:
  641. return blkdev_bszset(file, mode, argp);
  642. case BLKGETSIZE64_32:
  643. return put_u64(argp, bdev_nr_bytes(bdev));
  644. /* Incompatible alignment on i386 */
  645. case BLKTRACESETUP32:
  646. return blk_trace_ioctl(bdev, cmd, argp);
  647. default:
  648. break;
  649. }
  650. ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
  651. if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
  652. ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
  653. return ret;
  654. }
  655. #endif
  656. struct blk_iou_cmd {
  657. int res;
  658. bool nowait;
  659. };
  660. static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags)
  661. {
  662. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  663. if (bic->res == -EAGAIN && bic->nowait)
  664. io_uring_cmd_issue_blocking(cmd);
  665. else
  666. io_uring_cmd_done(cmd, bic->res, 0, issue_flags);
  667. }
  668. static void bio_cmd_bio_end_io(struct bio *bio)
  669. {
  670. struct io_uring_cmd *cmd = bio->bi_private;
  671. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  672. if (unlikely(bio->bi_status) && !bic->res)
  673. bic->res = blk_status_to_errno(bio->bi_status);
  674. io_uring_cmd_do_in_task_lazy(cmd, blk_cmd_complete);
  675. bio_put(bio);
  676. }
  677. static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
  678. struct block_device *bdev,
  679. uint64_t start, uint64_t len, bool nowait)
  680. {
  681. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  682. gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL;
  683. sector_t sector = start >> SECTOR_SHIFT;
  684. sector_t nr_sects = len >> SECTOR_SHIFT;
  685. struct bio *prev = NULL, *bio;
  686. int err;
  687. if (!bdev_max_discard_sectors(bdev))
  688. return -EOPNOTSUPP;
  689. if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE))
  690. return -EBADF;
  691. if (bdev_read_only(bdev))
  692. return -EPERM;
  693. err = blk_validate_byte_range(bdev, start, len);
  694. if (err)
  695. return err;
  696. err = filemap_invalidate_pages(bdev->bd_mapping, start,
  697. start + len - 1, nowait);
  698. if (err)
  699. return err;
  700. while (true) {
  701. bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp);
  702. if (!bio)
  703. break;
  704. if (nowait) {
  705. /*
  706. * Don't allow multi-bio non-blocking submissions as
  707. * subsequent bios may fail but we won't get a direct
  708. * indication of that. Normally, the caller should
  709. * retry from a blocking context.
  710. */
  711. if (unlikely(nr_sects)) {
  712. bio_put(bio);
  713. return -EAGAIN;
  714. }
  715. bio->bi_opf |= REQ_NOWAIT;
  716. }
  717. prev = bio_chain_and_submit(prev, bio);
  718. }
  719. if (unlikely(!prev))
  720. return -EAGAIN;
  721. if (unlikely(nr_sects))
  722. bic->res = -EAGAIN;
  723. prev->bi_private = cmd;
  724. prev->bi_end_io = bio_cmd_bio_end_io;
  725. submit_bio(prev);
  726. return -EIOCBQUEUED;
  727. }
  728. int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
  729. {
  730. struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
  731. struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
  732. const struct io_uring_sqe *sqe = cmd->sqe;
  733. u32 cmd_op = cmd->cmd_op;
  734. uint64_t start, len;
  735. if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
  736. sqe->rw_flags || sqe->file_index))
  737. return -EINVAL;
  738. bic->res = 0;
  739. bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
  740. start = READ_ONCE(sqe->addr);
  741. len = READ_ONCE(sqe->addr3);
  742. switch (cmd_op) {
  743. case BLOCK_URING_CMD_DISCARD:
  744. return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
  745. }
  746. return -EINVAL;
  747. }