mtdoops.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * MTD Oops/Panic logger
  4. *
  5. * Copyright © 2007 Nokia Corporation. All rights reserved.
  6. *
  7. * Author: Richard Purdie <rpurdie@openedhand.com>
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/console.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/sched.h>
  16. #include <linux/wait.h>
  17. #include <linux/delay.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/timekeeping.h>
  20. #include <linux/mtd/mtd.h>
  21. #include <linux/kmsg_dump.h>
  22. /* Maximum MTD partition size */
  23. #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
  24. static unsigned long record_size = 4096;
  25. module_param(record_size, ulong, 0400);
  26. MODULE_PARM_DESC(record_size,
  27. "record size for MTD OOPS pages in bytes (default 4096)");
  28. static char mtddev[80];
  29. module_param_string(mtddev, mtddev, 80, 0400);
  30. MODULE_PARM_DESC(mtddev,
  31. "name or index number of the MTD device to use");
  32. static int dump_oops = 1;
  33. module_param(dump_oops, int, 0600);
  34. MODULE_PARM_DESC(dump_oops,
  35. "set to 1 to dump oopses, 0 to only dump panics (default 1)");
  36. #define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00 /* Original */
  37. #define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00 /* Adds the timestamp */
  38. struct mtdoops_hdr {
  39. u32 seq;
  40. u32 magic;
  41. ktime_t timestamp;
  42. } __packed;
  43. static struct mtdoops_context {
  44. struct kmsg_dumper dump;
  45. int mtd_index;
  46. struct work_struct work_erase;
  47. struct work_struct work_write;
  48. struct mtd_info *mtd;
  49. int oops_pages;
  50. int nextpage;
  51. int nextcount;
  52. unsigned long *oops_page_used;
  53. unsigned long oops_buf_busy;
  54. void *oops_buf;
  55. } oops_cxt;
  56. static void mark_page_used(struct mtdoops_context *cxt, int page)
  57. {
  58. set_bit(page, cxt->oops_page_used);
  59. }
  60. static void mark_page_unused(struct mtdoops_context *cxt, int page)
  61. {
  62. clear_bit(page, cxt->oops_page_used);
  63. }
  64. static int page_is_used(struct mtdoops_context *cxt, int page)
  65. {
  66. return test_bit(page, cxt->oops_page_used);
  67. }
  68. static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
  69. {
  70. struct mtd_info *mtd = cxt->mtd;
  71. u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
  72. u32 start_page = start_page_offset / record_size;
  73. u32 erase_pages = mtd->erasesize / record_size;
  74. struct erase_info erase;
  75. int ret;
  76. int page;
  77. erase.addr = offset;
  78. erase.len = mtd->erasesize;
  79. ret = mtd_erase(mtd, &erase);
  80. if (ret) {
  81. pr_warn("erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
  82. (unsigned long long)erase.addr,
  83. (unsigned long long)erase.len, mtddev);
  84. return ret;
  85. }
  86. /* Mark pages as unused */
  87. for (page = start_page; page < start_page + erase_pages; page++)
  88. mark_page_unused(cxt, page);
  89. return 0;
  90. }
  91. static void mtdoops_erase(struct mtdoops_context *cxt)
  92. {
  93. struct mtd_info *mtd = cxt->mtd;
  94. int i = 0, j, ret, mod;
  95. /* We were unregistered */
  96. if (!mtd)
  97. return;
  98. mod = (cxt->nextpage * record_size) % mtd->erasesize;
  99. if (mod != 0) {
  100. cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
  101. if (cxt->nextpage >= cxt->oops_pages)
  102. cxt->nextpage = 0;
  103. }
  104. while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
  105. badblock:
  106. pr_warn("bad block at %08lx\n",
  107. cxt->nextpage * record_size);
  108. i++;
  109. cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
  110. if (cxt->nextpage >= cxt->oops_pages)
  111. cxt->nextpage = 0;
  112. if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
  113. pr_err("all blocks bad!\n");
  114. return;
  115. }
  116. }
  117. if (ret < 0) {
  118. pr_err("mtd_block_isbad failed, aborting\n");
  119. return;
  120. }
  121. for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
  122. ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
  123. if (ret >= 0) {
  124. pr_debug("ready %d, %d\n",
  125. cxt->nextpage, cxt->nextcount);
  126. return;
  127. }
  128. if (ret == -EIO) {
  129. ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
  130. if (ret < 0 && ret != -EOPNOTSUPP) {
  131. pr_err("block_markbad failed, aborting\n");
  132. return;
  133. }
  134. }
  135. goto badblock;
  136. }
  137. /* Scheduled work - when we can't proceed without erasing a block */
  138. static void mtdoops_workfunc_erase(struct work_struct *work)
  139. {
  140. struct mtdoops_context *cxt =
  141. container_of(work, struct mtdoops_context, work_erase);
  142. mtdoops_erase(cxt);
  143. }
  144. static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic)
  145. {
  146. cxt->nextpage++;
  147. if (cxt->nextpage >= cxt->oops_pages)
  148. cxt->nextpage = 0;
  149. cxt->nextcount++;
  150. if (cxt->nextcount == 0xffffffff)
  151. cxt->nextcount = 0;
  152. if (page_is_used(cxt, cxt->nextpage)) {
  153. pr_debug("not ready %d, %d (erase %s)\n",
  154. cxt->nextpage, cxt->nextcount,
  155. panic ? "immediately" : "scheduled");
  156. if (panic) {
  157. /* In case of panic, erase immediately */
  158. mtdoops_erase(cxt);
  159. } else {
  160. /* Otherwise, schedule work to erase it "nicely" */
  161. schedule_work(&cxt->work_erase);
  162. }
  163. } else {
  164. pr_debug("ready %d, %d (no erase)\n",
  165. cxt->nextpage, cxt->nextcount);
  166. }
  167. }
  168. static void mtdoops_write(struct mtdoops_context *cxt, int panic)
  169. {
  170. struct mtd_info *mtd = cxt->mtd;
  171. size_t retlen;
  172. struct mtdoops_hdr *hdr;
  173. int ret;
  174. if (test_and_set_bit(0, &cxt->oops_buf_busy))
  175. return;
  176. /* Add mtdoops header to the buffer */
  177. hdr = (struct mtdoops_hdr *)cxt->oops_buf;
  178. hdr->seq = cxt->nextcount;
  179. hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2;
  180. hdr->timestamp = ktime_get_real();
  181. if (panic) {
  182. ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
  183. record_size, &retlen, cxt->oops_buf);
  184. if (ret == -EOPNOTSUPP) {
  185. pr_err("Cannot write from panic without panic_write\n");
  186. goto out;
  187. }
  188. } else
  189. ret = mtd_write(mtd, cxt->nextpage * record_size,
  190. record_size, &retlen, cxt->oops_buf);
  191. if (retlen != record_size || ret < 0)
  192. pr_err("write failure at %ld (%td of %ld written), error %d\n",
  193. cxt->nextpage * record_size, retlen, record_size, ret);
  194. mark_page_used(cxt, cxt->nextpage);
  195. memset(cxt->oops_buf, 0xff, record_size);
  196. mtdoops_inc_counter(cxt, panic);
  197. out:
  198. clear_bit(0, &cxt->oops_buf_busy);
  199. }
  200. static void mtdoops_workfunc_write(struct work_struct *work)
  201. {
  202. struct mtdoops_context *cxt =
  203. container_of(work, struct mtdoops_context, work_write);
  204. mtdoops_write(cxt, 0);
  205. }
  206. static void find_next_position(struct mtdoops_context *cxt)
  207. {
  208. struct mtd_info *mtd = cxt->mtd;
  209. struct mtdoops_hdr hdr;
  210. int ret, page, maxpos = 0;
  211. u32 maxcount = 0xffffffff;
  212. size_t retlen;
  213. for (page = 0; page < cxt->oops_pages; page++) {
  214. if (mtd_block_isbad(mtd, page * record_size))
  215. continue;
  216. /* Assume the page is used */
  217. mark_page_used(cxt, page);
  218. ret = mtd_read(mtd, page * record_size, sizeof(hdr),
  219. &retlen, (u_char *)&hdr);
  220. if (retlen != sizeof(hdr) ||
  221. (ret < 0 && !mtd_is_bitflip(ret))) {
  222. pr_err("read failure at %ld (%zu of %zu read), err %d\n",
  223. page * record_size, retlen, sizeof(hdr), ret);
  224. continue;
  225. }
  226. if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff)
  227. mark_page_unused(cxt, page);
  228. if (hdr.seq == 0xffffffff ||
  229. (hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 &&
  230. hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2))
  231. continue;
  232. if (maxcount == 0xffffffff) {
  233. maxcount = hdr.seq;
  234. maxpos = page;
  235. } else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) {
  236. maxcount = hdr.seq;
  237. maxpos = page;
  238. } else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) {
  239. maxcount = hdr.seq;
  240. maxpos = page;
  241. } else if (hdr.seq > maxcount && hdr.seq > 0xc0000000
  242. && maxcount > 0x80000000) {
  243. maxcount = hdr.seq;
  244. maxpos = page;
  245. }
  246. }
  247. if (maxcount == 0xffffffff) {
  248. cxt->nextpage = cxt->oops_pages - 1;
  249. cxt->nextcount = 0;
  250. }
  251. else {
  252. cxt->nextpage = maxpos;
  253. cxt->nextcount = maxcount;
  254. }
  255. mtdoops_inc_counter(cxt, 0);
  256. }
  257. static void mtdoops_do_dump(struct kmsg_dumper *dumper,
  258. struct kmsg_dump_detail *detail)
  259. {
  260. struct mtdoops_context *cxt = container_of(dumper,
  261. struct mtdoops_context, dump);
  262. struct kmsg_dump_iter iter;
  263. /* Only dump oopses if dump_oops is set */
  264. if (detail->reason == KMSG_DUMP_OOPS && !dump_oops)
  265. return;
  266. kmsg_dump_rewind(&iter);
  267. if (test_and_set_bit(0, &cxt->oops_buf_busy))
  268. return;
  269. kmsg_dump_get_buffer(&iter, true,
  270. cxt->oops_buf + sizeof(struct mtdoops_hdr),
  271. record_size - sizeof(struct mtdoops_hdr), NULL);
  272. clear_bit(0, &cxt->oops_buf_busy);
  273. if (detail->reason != KMSG_DUMP_OOPS) {
  274. /* Panics must be written immediately */
  275. mtdoops_write(cxt, 1);
  276. } else {
  277. /* For other cases, schedule work to write it "nicely" */
  278. schedule_work(&cxt->work_write);
  279. }
  280. }
  281. static void mtdoops_notify_add(struct mtd_info *mtd)
  282. {
  283. struct mtdoops_context *cxt = &oops_cxt;
  284. u64 mtdoops_pages = div_u64(mtd->size, record_size);
  285. int err;
  286. if (!strcmp(mtd->name, mtddev))
  287. cxt->mtd_index = mtd->index;
  288. if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
  289. return;
  290. if (mtd->size < mtd->erasesize * 2) {
  291. pr_err("MTD partition %d not big enough for mtdoops\n",
  292. mtd->index);
  293. return;
  294. }
  295. if (mtd->erasesize < record_size) {
  296. pr_err("eraseblock size of MTD partition %d too small\n",
  297. mtd->index);
  298. return;
  299. }
  300. if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
  301. pr_err("mtd%d is too large (limit is %d MiB)\n",
  302. mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
  303. return;
  304. }
  305. /* oops_page_used is a bit field */
  306. cxt->oops_page_used =
  307. vmalloc(array_size(sizeof(unsigned long),
  308. DIV_ROUND_UP(mtdoops_pages,
  309. BITS_PER_LONG)));
  310. if (!cxt->oops_page_used) {
  311. pr_err("could not allocate page array\n");
  312. return;
  313. }
  314. cxt->dump.max_reason = KMSG_DUMP_OOPS;
  315. cxt->dump.dump = mtdoops_do_dump;
  316. err = kmsg_dump_register(&cxt->dump);
  317. if (err) {
  318. pr_err("registering kmsg dumper failed, error %d\n", err);
  319. vfree(cxt->oops_page_used);
  320. cxt->oops_page_used = NULL;
  321. return;
  322. }
  323. cxt->mtd = mtd;
  324. cxt->oops_pages = (int)mtd->size / record_size;
  325. find_next_position(cxt);
  326. pr_info("Attached to MTD device %d\n", mtd->index);
  327. }
  328. static void mtdoops_notify_remove(struct mtd_info *mtd)
  329. {
  330. struct mtdoops_context *cxt = &oops_cxt;
  331. if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
  332. return;
  333. if (kmsg_dump_unregister(&cxt->dump) < 0)
  334. pr_warn("could not unregister kmsg_dumper\n");
  335. cxt->mtd = NULL;
  336. flush_work(&cxt->work_erase);
  337. flush_work(&cxt->work_write);
  338. }
  339. static struct mtd_notifier mtdoops_notifier = {
  340. .add = mtdoops_notify_add,
  341. .remove = mtdoops_notify_remove,
  342. };
  343. static int __init mtdoops_init(void)
  344. {
  345. struct mtdoops_context *cxt = &oops_cxt;
  346. int mtd_index;
  347. char *endp;
  348. if (strlen(mtddev) == 0) {
  349. pr_err("mtd device (mtddev=name/number) must be supplied\n");
  350. return -EINVAL;
  351. }
  352. if ((record_size & 4095) != 0) {
  353. pr_err("record_size must be a multiple of 4096\n");
  354. return -EINVAL;
  355. }
  356. if (record_size < 4096) {
  357. pr_err("record_size must be over 4096 bytes\n");
  358. return -EINVAL;
  359. }
  360. /* Setup the MTD device to use */
  361. cxt->mtd_index = -1;
  362. mtd_index = simple_strtoul(mtddev, &endp, 0);
  363. if (*endp == '\0')
  364. cxt->mtd_index = mtd_index;
  365. cxt->oops_buf = vmalloc(record_size);
  366. if (!cxt->oops_buf)
  367. return -ENOMEM;
  368. memset(cxt->oops_buf, 0xff, record_size);
  369. cxt->oops_buf_busy = 0;
  370. INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
  371. INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
  372. register_mtd_user(&mtdoops_notifier);
  373. return 0;
  374. }
  375. static void __exit mtdoops_exit(void)
  376. {
  377. struct mtdoops_context *cxt = &oops_cxt;
  378. unregister_mtd_user(&mtdoops_notifier);
  379. vfree(cxt->oops_buf);
  380. vfree(cxt->oops_page_used);
  381. }
  382. module_init(mtdoops_init);
  383. module_exit(mtdoops_exit);
  384. MODULE_LICENSE("GPL");
  385. MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
  386. MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");