zcore.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. // SPDX-License-Identifier: GPL-1.0+
  2. /*
  3. * zcore module to export memory content and register sets for creating system
  4. * dumps on SCSI/NVMe disks (zfcp/nvme dump).
  5. *
  6. * For more information please refer to Documentation/arch/s390/zfcpdump.rst
  7. *
  8. * Copyright IBM Corp. 2003, 2008
  9. * Author(s): Michael Holzheu
  10. */
  11. #define KMSG_COMPONENT "zdump"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/init.h>
  14. #include <linux/slab.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/panic_notifier.h>
  17. #include <linux/reboot.h>
  18. #include <linux/uio.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/ipl.h>
  21. #include <asm/sclp.h>
  22. #include <asm/setup.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/debug.h>
  25. #include <asm/processor.h>
  26. #include <asm/irqflags.h>
  27. #include <asm/checksum.h>
  28. #include <asm/os_info.h>
  29. #include <asm/maccess.h>
  30. #include "sclp.h"
  31. #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
  32. enum arch_id {
  33. ARCH_S390 = 0,
  34. ARCH_S390X = 1,
  35. };
  36. struct ipib_info {
  37. unsigned long ipib;
  38. u32 checksum;
  39. } __attribute__((packed));
  40. static struct debug_info *zcore_dbf;
  41. static int hsa_available;
  42. static struct dentry *zcore_dir;
  43. static struct dentry *zcore_reipl_file;
  44. static struct dentry *zcore_hsa_file;
  45. static struct ipl_parameter_block *zcore_ipl_block;
  46. static unsigned long os_info_flags;
  47. static DEFINE_MUTEX(hsa_buf_mutex);
  48. static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
  49. /*
  50. * Copy memory from HSA to iterator (not reentrant):
  51. *
  52. * @iter: Iterator where memory should be copied to
  53. * @src: Start address within HSA where data should be copied
  54. * @count: Size of buffer, which should be copied
  55. */
  56. size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
  57. {
  58. size_t bytes, copied, res = 0;
  59. unsigned long offset;
  60. if (!hsa_available)
  61. return 0;
  62. mutex_lock(&hsa_buf_mutex);
  63. while (count) {
  64. if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
  65. TRACE("sclp_sdias_copy() failed\n");
  66. break;
  67. }
  68. offset = src % PAGE_SIZE;
  69. bytes = min(PAGE_SIZE - offset, count);
  70. copied = copy_to_iter(hsa_buf + offset, bytes, iter);
  71. count -= copied;
  72. src += copied;
  73. res += copied;
  74. if (copied < bytes)
  75. break;
  76. }
  77. mutex_unlock(&hsa_buf_mutex);
  78. return res;
  79. }
  80. /*
  81. * Copy memory from HSA to kernel memory (not reentrant):
  82. *
  83. * @dest: Kernel or user buffer where memory should be copied to
  84. * @src: Start address within HSA where data should be copied
  85. * @count: Size of buffer, which should be copied
  86. */
  87. static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
  88. {
  89. struct iov_iter iter;
  90. struct kvec kvec;
  91. kvec.iov_base = dst;
  92. kvec.iov_len = count;
  93. iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
  94. if (memcpy_hsa_iter(&iter, src, count) < count)
  95. return -EIO;
  96. return 0;
  97. }
  98. static int __init init_cpu_info(void)
  99. {
  100. struct save_area *sa;
  101. /* get info for boot cpu from lowcore, stored in the HSA */
  102. sa = save_area_boot_cpu();
  103. if (!sa)
  104. return -ENOMEM;
  105. if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) {
  106. TRACE("could not copy from HSA\n");
  107. return -EIO;
  108. }
  109. save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */
  110. return 0;
  111. }
  112. /*
  113. * Release the HSA
  114. */
  115. static void release_hsa(void)
  116. {
  117. diag308(DIAG308_REL_HSA, NULL);
  118. hsa_available = 0;
  119. }
  120. static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
  121. size_t count, loff_t *ppos)
  122. {
  123. if (zcore_ipl_block) {
  124. diag308(DIAG308_SET, zcore_ipl_block);
  125. if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR)
  126. diag308(DIAG308_LOAD_CLEAR, NULL);
  127. /* Use special diag308 subcode for CCW normal ipl */
  128. if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW)
  129. diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
  130. else
  131. diag308(DIAG308_LOAD_NORMAL, NULL);
  132. }
  133. return count;
  134. }
  135. static int zcore_reipl_open(struct inode *inode, struct file *filp)
  136. {
  137. return stream_open(inode, filp);
  138. }
  139. static int zcore_reipl_release(struct inode *inode, struct file *filp)
  140. {
  141. return 0;
  142. }
  143. static const struct file_operations zcore_reipl_fops = {
  144. .owner = THIS_MODULE,
  145. .write = zcore_reipl_write,
  146. .open = zcore_reipl_open,
  147. .release = zcore_reipl_release,
  148. };
  149. static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
  150. size_t count, loff_t *ppos)
  151. {
  152. static char str[18];
  153. if (hsa_available)
  154. snprintf(str, sizeof(str), "%lx\n", sclp.hsa_size);
  155. else
  156. snprintf(str, sizeof(str), "0\n");
  157. return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
  158. }
  159. static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
  160. size_t count, loff_t *ppos)
  161. {
  162. char value;
  163. if (*ppos != 0)
  164. return -EPIPE;
  165. if (copy_from_user(&value, buf, 1))
  166. return -EFAULT;
  167. if (value != '0')
  168. return -EINVAL;
  169. release_hsa();
  170. return count;
  171. }
  172. static const struct file_operations zcore_hsa_fops = {
  173. .owner = THIS_MODULE,
  174. .write = zcore_hsa_write,
  175. .read = zcore_hsa_read,
  176. .open = nonseekable_open,
  177. };
  178. static int __init check_sdias(void)
  179. {
  180. if (!sclp.hsa_size) {
  181. TRACE("Could not determine HSA size\n");
  182. return -ENODEV;
  183. }
  184. return 0;
  185. }
  186. /*
  187. * Provide IPL parameter information block from either HSA or memory
  188. * for future reipl
  189. */
  190. static int __init zcore_reipl_init(void)
  191. {
  192. struct os_info_entry *entry;
  193. struct ipib_info ipib_info;
  194. unsigned long os_info_addr;
  195. struct os_info *os_info;
  196. int rc;
  197. rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
  198. if (rc)
  199. return rc;
  200. if (ipib_info.ipib == 0)
  201. return 0;
  202. zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL);
  203. if (!zcore_ipl_block)
  204. return -ENOMEM;
  205. if (ipib_info.ipib < sclp.hsa_size)
  206. rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib,
  207. PAGE_SIZE);
  208. else
  209. rc = memcpy_real(zcore_ipl_block, ipib_info.ipib, PAGE_SIZE);
  210. if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) !=
  211. ipib_info.checksum) {
  212. TRACE("Checksum does not match\n");
  213. free_page((unsigned long) zcore_ipl_block);
  214. zcore_ipl_block = NULL;
  215. }
  216. /*
  217. * Read the bit-flags field from os_info flags entry.
  218. * Return zero even for os_info read or entry checksum errors in order
  219. * to continue dump processing, considering that os_info could be
  220. * corrupted on the panicked system.
  221. */
  222. os_info = (void *)__get_free_page(GFP_KERNEL);
  223. if (!os_info)
  224. return -ENOMEM;
  225. rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr));
  226. if (rc)
  227. goto out;
  228. if (os_info_addr < sclp.hsa_size)
  229. rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE);
  230. else
  231. rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE);
  232. if (rc || os_info_csum(os_info) != os_info->csum)
  233. goto out;
  234. entry = &os_info->entry[OS_INFO_FLAGS_ENTRY];
  235. if (entry->addr && entry->size) {
  236. if (entry->addr < sclp.hsa_size)
  237. rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags));
  238. else
  239. rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags));
  240. if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum)
  241. os_info_flags = 0;
  242. }
  243. out:
  244. free_page((unsigned long)os_info);
  245. return 0;
  246. }
  247. static int zcore_reboot_and_on_panic_handler(struct notifier_block *self,
  248. unsigned long event,
  249. void *data)
  250. {
  251. if (hsa_available)
  252. release_hsa();
  253. return NOTIFY_OK;
  254. }
  255. static struct notifier_block zcore_reboot_notifier = {
  256. .notifier_call = zcore_reboot_and_on_panic_handler,
  257. /* we need to be notified before reipl and kdump */
  258. .priority = INT_MAX,
  259. };
  260. static struct notifier_block zcore_on_panic_notifier = {
  261. .notifier_call = zcore_reboot_and_on_panic_handler,
  262. /* we need to be notified before reipl and kdump */
  263. .priority = INT_MAX,
  264. };
  265. static int __init zcore_init(void)
  266. {
  267. unsigned char arch;
  268. int rc;
  269. if (!is_ipl_type_dump())
  270. return -ENODATA;
  271. if (oldmem_data.start)
  272. return -ENODATA;
  273. zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
  274. debug_register_view(zcore_dbf, &debug_sprintf_view);
  275. debug_set_level(zcore_dbf, 6);
  276. if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
  277. TRACE("type: fcp\n");
  278. TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
  279. TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
  280. TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
  281. } else if (ipl_info.type == IPL_TYPE_NVME_DUMP) {
  282. TRACE("type: nvme\n");
  283. TRACE("fid: %x\n", ipl_info.data.nvme.fid);
  284. TRACE("nsid: %x\n", ipl_info.data.nvme.nsid);
  285. } else if (ipl_info.type == IPL_TYPE_ECKD_DUMP) {
  286. TRACE("type: eckd\n");
  287. TRACE("devno: %x\n", ipl_info.data.eckd.dev_id.devno);
  288. TRACE("ssid: %x\n", ipl_info.data.eckd.dev_id.ssid);
  289. }
  290. rc = sclp_sdias_init();
  291. if (rc)
  292. goto fail;
  293. rc = check_sdias();
  294. if (rc)
  295. goto fail;
  296. hsa_available = 1;
  297. rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
  298. if (rc)
  299. goto fail;
  300. if (arch == ARCH_S390) {
  301. pr_alert("The 64-bit dump tool cannot be used for a "
  302. "32-bit system\n");
  303. rc = -EINVAL;
  304. goto fail;
  305. }
  306. pr_alert("The dump process started for a 64-bit operating system\n");
  307. rc = init_cpu_info();
  308. if (rc)
  309. goto fail;
  310. rc = zcore_reipl_init();
  311. if (rc)
  312. goto fail;
  313. zcore_dir = debugfs_create_dir("zcore" , NULL);
  314. zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
  315. NULL, &zcore_reipl_fops);
  316. zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
  317. NULL, &zcore_hsa_fops);
  318. register_reboot_notifier(&zcore_reboot_notifier);
  319. atomic_notifier_chain_register(&panic_notifier_list, &zcore_on_panic_notifier);
  320. return 0;
  321. fail:
  322. diag308(DIAG308_REL_HSA, NULL);
  323. return rc;
  324. }
  325. subsys_initcall(zcore_init);