core.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /*
  2. * hw_random/core.c: HWRNG core API
  3. *
  4. * Copyright 2006 Michael Buesch <m@bues.ch>
  5. * Copyright 2005 (c) MontaVista Software, Inc.
  6. *
  7. * Please read Documentation/hw_random.txt for details on use.
  8. *
  9. * This software may be used and distributed according to the terms
  10. * of the GNU General Public License, incorporated herein by reference.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/err.h>
  15. #include <linux/fs.h>
  16. #include <linux/hw_random.h>
  17. #include <linux/kernel.h>
  18. #include <linux/kthread.h>
  19. #include <linux/sched/signal.h>
  20. #include <linux/miscdevice.h>
  21. #include <linux/module.h>
  22. #include <linux/random.h>
  23. #include <linux/sched.h>
  24. #include <linux/slab.h>
  25. #include <linux/uaccess.h>
  26. #define RNG_MODULE_NAME "hw_random"
  27. static struct hwrng *current_rng;
  28. /* the current rng has been explicitly chosen by user via sysfs */
  29. static int cur_rng_set_by_user;
  30. static struct task_struct *hwrng_fill;
  31. /* list of registered rngs, sorted decending by quality */
  32. static LIST_HEAD(rng_list);
  33. /* Protects rng_list and current_rng */
  34. static DEFINE_MUTEX(rng_mutex);
  35. /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
  36. static DEFINE_MUTEX(reading_mutex);
  37. static int data_avail;
  38. static u8 *rng_buffer, *rng_fillbuf;
  39. static unsigned short current_quality;
  40. static unsigned short default_quality; /* = 0; default to "off" */
  41. module_param(current_quality, ushort, 0644);
  42. MODULE_PARM_DESC(current_quality,
  43. "current hwrng entropy estimation per mill");
  44. module_param(default_quality, ushort, 0644);
  45. MODULE_PARM_DESC(default_quality,
  46. "default entropy content of hwrng per mill");
  47. static void drop_current_rng(void);
  48. static int hwrng_init(struct hwrng *rng);
  49. static void start_khwrngd(void);
  50. static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  51. int wait);
  52. static size_t rng_buffer_size(void)
  53. {
  54. return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
  55. }
  56. static void add_early_randomness(struct hwrng *rng)
  57. {
  58. int bytes_read;
  59. size_t size = min_t(size_t, 16, rng_buffer_size());
  60. mutex_lock(&reading_mutex);
  61. bytes_read = rng_get_data(rng, rng_buffer, size, 0);
  62. mutex_unlock(&reading_mutex);
  63. if (bytes_read > 0)
  64. add_device_randomness(rng_buffer, bytes_read);
  65. }
  66. static inline void cleanup_rng(struct kref *kref)
  67. {
  68. struct hwrng *rng = container_of(kref, struct hwrng, ref);
  69. if (rng->cleanup)
  70. rng->cleanup(rng);
  71. complete(&rng->cleanup_done);
  72. }
  73. static int set_current_rng(struct hwrng *rng)
  74. {
  75. int err;
  76. BUG_ON(!mutex_is_locked(&rng_mutex));
  77. err = hwrng_init(rng);
  78. if (err)
  79. return err;
  80. drop_current_rng();
  81. current_rng = rng;
  82. return 0;
  83. }
  84. static void drop_current_rng(void)
  85. {
  86. BUG_ON(!mutex_is_locked(&rng_mutex));
  87. if (!current_rng)
  88. return;
  89. /* decrease last reference for triggering the cleanup */
  90. kref_put(&current_rng->ref, cleanup_rng);
  91. current_rng = NULL;
  92. }
  93. /* Returns ERR_PTR(), NULL or refcounted hwrng */
  94. static struct hwrng *get_current_rng(void)
  95. {
  96. struct hwrng *rng;
  97. if (mutex_lock_interruptible(&rng_mutex))
  98. return ERR_PTR(-ERESTARTSYS);
  99. rng = current_rng;
  100. if (rng)
  101. kref_get(&rng->ref);
  102. mutex_unlock(&rng_mutex);
  103. return rng;
  104. }
  105. static void put_rng(struct hwrng *rng)
  106. {
  107. /*
  108. * Hold rng_mutex here so we serialize in case they set_current_rng
  109. * on rng again immediately.
  110. */
  111. mutex_lock(&rng_mutex);
  112. if (rng)
  113. kref_put(&rng->ref, cleanup_rng);
  114. mutex_unlock(&rng_mutex);
  115. }
  116. static int hwrng_init(struct hwrng *rng)
  117. {
  118. if (kref_get_unless_zero(&rng->ref))
  119. goto skip_init;
  120. if (rng->init) {
  121. int ret;
  122. ret = rng->init(rng);
  123. if (ret)
  124. return ret;
  125. }
  126. kref_init(&rng->ref);
  127. reinit_completion(&rng->cleanup_done);
  128. skip_init:
  129. add_early_randomness(rng);
  130. current_quality = rng->quality ? : default_quality;
  131. if (current_quality > 1024)
  132. current_quality = 1024;
  133. if (current_quality == 0 && hwrng_fill)
  134. kthread_stop(hwrng_fill);
  135. if (current_quality > 0 && !hwrng_fill)
  136. start_khwrngd();
  137. return 0;
  138. }
  139. static int rng_dev_open(struct inode *inode, struct file *filp)
  140. {
  141. /* enforce read-only access to this chrdev */
  142. if ((filp->f_mode & FMODE_READ) == 0)
  143. return -EINVAL;
  144. if (filp->f_mode & FMODE_WRITE)
  145. return -EINVAL;
  146. return 0;
  147. }
  148. static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  149. int wait) {
  150. int present;
  151. BUG_ON(!mutex_is_locked(&reading_mutex));
  152. if (rng->read)
  153. return rng->read(rng, (void *)buffer, size, wait);
  154. if (rng->data_present)
  155. present = rng->data_present(rng, wait);
  156. else
  157. present = 1;
  158. if (present)
  159. return rng->data_read(rng, (u32 *)buffer);
  160. return 0;
  161. }
  162. static ssize_t rng_dev_read(struct file *filp, char __user *buf,
  163. size_t size, loff_t *offp)
  164. {
  165. ssize_t ret = 0;
  166. int err = 0;
  167. int bytes_read, len;
  168. struct hwrng *rng;
  169. while (size) {
  170. rng = get_current_rng();
  171. if (IS_ERR(rng)) {
  172. err = PTR_ERR(rng);
  173. goto out;
  174. }
  175. if (!rng) {
  176. err = -ENODEV;
  177. goto out;
  178. }
  179. if (mutex_lock_interruptible(&reading_mutex)) {
  180. err = -ERESTARTSYS;
  181. goto out_put;
  182. }
  183. if (!data_avail) {
  184. bytes_read = rng_get_data(rng, rng_buffer,
  185. rng_buffer_size(),
  186. !(filp->f_flags & O_NONBLOCK));
  187. if (bytes_read < 0) {
  188. err = bytes_read;
  189. goto out_unlock_reading;
  190. }
  191. data_avail = bytes_read;
  192. }
  193. if (!data_avail) {
  194. if (filp->f_flags & O_NONBLOCK) {
  195. err = -EAGAIN;
  196. goto out_unlock_reading;
  197. }
  198. } else {
  199. len = data_avail;
  200. if (len > size)
  201. len = size;
  202. data_avail -= len;
  203. if (copy_to_user(buf + ret, rng_buffer + data_avail,
  204. len)) {
  205. err = -EFAULT;
  206. goto out_unlock_reading;
  207. }
  208. size -= len;
  209. ret += len;
  210. }
  211. mutex_unlock(&reading_mutex);
  212. put_rng(rng);
  213. if (need_resched())
  214. schedule_timeout_interruptible(1);
  215. if (signal_pending(current)) {
  216. err = -ERESTARTSYS;
  217. goto out;
  218. }
  219. }
  220. out:
  221. return ret ? : err;
  222. out_unlock_reading:
  223. mutex_unlock(&reading_mutex);
  224. out_put:
  225. put_rng(rng);
  226. goto out;
  227. }
  228. static const struct file_operations rng_chrdev_ops = {
  229. .owner = THIS_MODULE,
  230. .open = rng_dev_open,
  231. .read = rng_dev_read,
  232. .llseek = noop_llseek,
  233. };
  234. static const struct attribute_group *rng_dev_groups[];
  235. static struct miscdevice rng_miscdev = {
  236. .minor = HWRNG_MINOR,
  237. .name = RNG_MODULE_NAME,
  238. .nodename = "hwrng",
  239. .fops = &rng_chrdev_ops,
  240. .groups = rng_dev_groups,
  241. };
  242. static int enable_best_rng(void)
  243. {
  244. int ret = -ENODEV;
  245. BUG_ON(!mutex_is_locked(&rng_mutex));
  246. /* rng_list is sorted by quality, use the best (=first) one */
  247. if (!list_empty(&rng_list)) {
  248. struct hwrng *new_rng;
  249. new_rng = list_entry(rng_list.next, struct hwrng, list);
  250. ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
  251. if (!ret)
  252. cur_rng_set_by_user = 0;
  253. } else {
  254. drop_current_rng();
  255. cur_rng_set_by_user = 0;
  256. ret = 0;
  257. }
  258. return ret;
  259. }
  260. static ssize_t hwrng_attr_current_store(struct device *dev,
  261. struct device_attribute *attr,
  262. const char *buf, size_t len)
  263. {
  264. int err = -ENODEV;
  265. struct hwrng *rng;
  266. err = mutex_lock_interruptible(&rng_mutex);
  267. if (err)
  268. return -ERESTARTSYS;
  269. if (sysfs_streq(buf, "")) {
  270. err = enable_best_rng();
  271. } else {
  272. list_for_each_entry(rng, &rng_list, list) {
  273. if (sysfs_streq(rng->name, buf)) {
  274. cur_rng_set_by_user = 1;
  275. err = set_current_rng(rng);
  276. break;
  277. }
  278. }
  279. }
  280. mutex_unlock(&rng_mutex);
  281. return err ? : len;
  282. }
  283. static ssize_t hwrng_attr_current_show(struct device *dev,
  284. struct device_attribute *attr,
  285. char *buf)
  286. {
  287. ssize_t ret;
  288. struct hwrng *rng;
  289. rng = get_current_rng();
  290. if (IS_ERR(rng))
  291. return PTR_ERR(rng);
  292. ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
  293. put_rng(rng);
  294. return ret;
  295. }
  296. static ssize_t hwrng_attr_available_show(struct device *dev,
  297. struct device_attribute *attr,
  298. char *buf)
  299. {
  300. int err;
  301. struct hwrng *rng;
  302. err = mutex_lock_interruptible(&rng_mutex);
  303. if (err)
  304. return -ERESTARTSYS;
  305. buf[0] = '\0';
  306. list_for_each_entry(rng, &rng_list, list) {
  307. strlcat(buf, rng->name, PAGE_SIZE);
  308. strlcat(buf, " ", PAGE_SIZE);
  309. }
  310. strlcat(buf, "\n", PAGE_SIZE);
  311. mutex_unlock(&rng_mutex);
  312. return strlen(buf);
  313. }
  314. static ssize_t hwrng_attr_selected_show(struct device *dev,
  315. struct device_attribute *attr,
  316. char *buf)
  317. {
  318. return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
  319. }
  320. static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
  321. hwrng_attr_current_show,
  322. hwrng_attr_current_store);
  323. static DEVICE_ATTR(rng_available, S_IRUGO,
  324. hwrng_attr_available_show,
  325. NULL);
  326. static DEVICE_ATTR(rng_selected, S_IRUGO,
  327. hwrng_attr_selected_show,
  328. NULL);
  329. static struct attribute *rng_dev_attrs[] = {
  330. &dev_attr_rng_current.attr,
  331. &dev_attr_rng_available.attr,
  332. &dev_attr_rng_selected.attr,
  333. NULL
  334. };
  335. ATTRIBUTE_GROUPS(rng_dev);
  336. static void __exit unregister_miscdev(void)
  337. {
  338. misc_deregister(&rng_miscdev);
  339. }
  340. static int __init register_miscdev(void)
  341. {
  342. return misc_register(&rng_miscdev);
  343. }
  344. static int hwrng_fillfn(void *unused)
  345. {
  346. long rc;
  347. while (!kthread_should_stop()) {
  348. struct hwrng *rng;
  349. rng = get_current_rng();
  350. if (IS_ERR(rng) || !rng)
  351. break;
  352. mutex_lock(&reading_mutex);
  353. rc = rng_get_data(rng, rng_fillbuf,
  354. rng_buffer_size(), 1);
  355. mutex_unlock(&reading_mutex);
  356. put_rng(rng);
  357. if (rc <= 0) {
  358. pr_warn("hwrng: no data available\n");
  359. msleep_interruptible(10000);
  360. continue;
  361. }
  362. /* Outside lock, sure, but y'know: randomness. */
  363. add_hwgenerator_randomness((void *)rng_fillbuf, rc,
  364. rc * current_quality * 8 >> 10);
  365. }
  366. hwrng_fill = NULL;
  367. return 0;
  368. }
  369. static void start_khwrngd(void)
  370. {
  371. hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
  372. if (IS_ERR(hwrng_fill)) {
  373. pr_err("hwrng_fill thread creation failed\n");
  374. hwrng_fill = NULL;
  375. }
  376. }
  377. int hwrng_register(struct hwrng *rng)
  378. {
  379. int err = -EINVAL;
  380. struct hwrng *old_rng, *tmp;
  381. struct list_head *rng_list_ptr;
  382. if (!rng->name || (!rng->data_read && !rng->read))
  383. goto out;
  384. mutex_lock(&rng_mutex);
  385. /* Must not register two RNGs with the same name. */
  386. err = -EEXIST;
  387. list_for_each_entry(tmp, &rng_list, list) {
  388. if (strcmp(tmp->name, rng->name) == 0)
  389. goto out_unlock;
  390. }
  391. init_completion(&rng->cleanup_done);
  392. complete(&rng->cleanup_done);
  393. /* rng_list is sorted by decreasing quality */
  394. list_for_each(rng_list_ptr, &rng_list) {
  395. tmp = list_entry(rng_list_ptr, struct hwrng, list);
  396. if (tmp->quality < rng->quality)
  397. break;
  398. }
  399. list_add_tail(&rng->list, rng_list_ptr);
  400. old_rng = current_rng;
  401. err = 0;
  402. if (!old_rng ||
  403. (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
  404. /*
  405. * Set new rng as current as the new rng source
  406. * provides better entropy quality and was not
  407. * chosen by userspace.
  408. */
  409. err = set_current_rng(rng);
  410. if (err)
  411. goto out_unlock;
  412. }
  413. if (old_rng && !rng->init) {
  414. /*
  415. * Use a new device's input to add some randomness to
  416. * the system. If this rng device isn't going to be
  417. * used right away, its init function hasn't been
  418. * called yet; so only use the randomness from devices
  419. * that don't need an init callback.
  420. */
  421. add_early_randomness(rng);
  422. }
  423. out_unlock:
  424. mutex_unlock(&rng_mutex);
  425. out:
  426. return err;
  427. }
  428. EXPORT_SYMBOL_GPL(hwrng_register);
  429. void hwrng_unregister(struct hwrng *rng)
  430. {
  431. int err;
  432. mutex_lock(&rng_mutex);
  433. list_del(&rng->list);
  434. if (current_rng == rng) {
  435. err = enable_best_rng();
  436. if (err) {
  437. drop_current_rng();
  438. cur_rng_set_by_user = 0;
  439. }
  440. }
  441. if (list_empty(&rng_list)) {
  442. mutex_unlock(&rng_mutex);
  443. if (hwrng_fill)
  444. kthread_stop(hwrng_fill);
  445. } else
  446. mutex_unlock(&rng_mutex);
  447. wait_for_completion(&rng->cleanup_done);
  448. }
  449. EXPORT_SYMBOL_GPL(hwrng_unregister);
  450. static void devm_hwrng_release(struct device *dev, void *res)
  451. {
  452. hwrng_unregister(*(struct hwrng **)res);
  453. }
  454. static int devm_hwrng_match(struct device *dev, void *res, void *data)
  455. {
  456. struct hwrng **r = res;
  457. if (WARN_ON(!r || !*r))
  458. return 0;
  459. return *r == data;
  460. }
  461. int devm_hwrng_register(struct device *dev, struct hwrng *rng)
  462. {
  463. struct hwrng **ptr;
  464. int error;
  465. ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
  466. if (!ptr)
  467. return -ENOMEM;
  468. error = hwrng_register(rng);
  469. if (error) {
  470. devres_free(ptr);
  471. return error;
  472. }
  473. *ptr = rng;
  474. devres_add(dev, ptr);
  475. return 0;
  476. }
  477. EXPORT_SYMBOL_GPL(devm_hwrng_register);
  478. void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
  479. {
  480. devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
  481. }
  482. EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
  483. static int __init hwrng_modinit(void)
  484. {
  485. int ret = -ENOMEM;
  486. /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
  487. rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
  488. if (!rng_buffer)
  489. return -ENOMEM;
  490. rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
  491. if (!rng_fillbuf) {
  492. kfree(rng_buffer);
  493. return -ENOMEM;
  494. }
  495. ret = register_miscdev();
  496. if (ret) {
  497. kfree(rng_fillbuf);
  498. kfree(rng_buffer);
  499. }
  500. return ret;
  501. }
  502. static void __exit hwrng_modexit(void)
  503. {
  504. mutex_lock(&rng_mutex);
  505. BUG_ON(current_rng);
  506. kfree(rng_buffer);
  507. kfree(rng_fillbuf);
  508. mutex_unlock(&rng_mutex);
  509. unregister_miscdev();
  510. }
  511. module_init(hwrng_modinit);
  512. module_exit(hwrng_modexit);
  513. MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
  514. MODULE_LICENSE("GPL");