mmio-mod.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2005
  17. * Jeff Muizelaar, 2006, 2007
  18. * Pekka Paalanen, 2008 <pq@iki.fi>
  19. *
  20. * Derived from the read-mod example from relay-examples by Tom Zanussi.
  21. */
  22. #define pr_fmt(fmt) "mmiotrace: " fmt
  23. #define DEBUG 1
  24. #include <linux/moduleparam.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/slab.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/io.h>
  29. #include <asm/pgtable.h>
  30. #include <linux/mmiotrace.h>
  31. #include <asm/e820/api.h> /* for ISA_START_ADDRESS */
  32. #include <linux/atomic.h>
  33. #include <linux/percpu.h>
  34. #include <linux/cpu.h>
  35. #include "pf_in.h"
  36. struct trap_reason {
  37. unsigned long addr;
  38. unsigned long ip;
  39. enum reason_type type;
  40. int active_traces;
  41. };
  42. struct remap_trace {
  43. struct list_head list;
  44. struct kmmio_probe probe;
  45. resource_size_t phys;
  46. unsigned long id;
  47. };
  48. /* Accessed per-cpu. */
  49. static DEFINE_PER_CPU(struct trap_reason, pf_reason);
  50. static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
  51. static DEFINE_MUTEX(mmiotrace_mutex);
  52. static DEFINE_SPINLOCK(trace_lock);
  53. static atomic_t mmiotrace_enabled;
  54. static LIST_HEAD(trace_list); /* struct remap_trace */
  55. /*
  56. * Locking in this file:
  57. * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
  58. * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
  59. * and trace_lock.
  60. * - Routines depending on is_enabled() must take trace_lock.
  61. * - trace_list users must hold trace_lock.
  62. * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
  63. * - pre/post callbacks assume the effect of is_enabled() being true.
  64. */
  65. /* module parameters */
  66. static unsigned long filter_offset;
  67. static bool nommiotrace;
  68. static bool trace_pc;
  69. module_param(filter_offset, ulong, 0);
  70. module_param(nommiotrace, bool, 0);
  71. module_param(trace_pc, bool, 0);
  72. MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
  73. MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
  74. MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
  75. static bool is_enabled(void)
  76. {
  77. return atomic_read(&mmiotrace_enabled);
  78. }
  79. static void print_pte(unsigned long address)
  80. {
  81. unsigned int level;
  82. pte_t *pte = lookup_address(address, &level);
  83. if (!pte) {
  84. pr_err("Error in %s: no pte for page 0x%08lx\n",
  85. __func__, address);
  86. return;
  87. }
  88. if (level == PG_LEVEL_2M) {
  89. pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
  90. address);
  91. BUG();
  92. }
  93. pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
  94. address,
  95. (unsigned long long)pte_val(*pte),
  96. (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
  97. }
  98. /*
  99. * For some reason the pre/post pairs have been called in an
  100. * unmatched order. Report and die.
  101. */
  102. static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
  103. {
  104. const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  105. pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
  106. addr, my_reason->addr);
  107. print_pte(addr);
  108. pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
  109. pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
  110. #ifdef __i386__
  111. pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
  112. regs->ax, regs->bx, regs->cx, regs->dx);
  113. pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
  114. regs->si, regs->di, regs->bp, regs->sp);
  115. #else
  116. pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
  117. regs->ax, regs->cx, regs->dx);
  118. pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
  119. regs->si, regs->di, regs->bp, regs->sp);
  120. #endif
  121. put_cpu_var(pf_reason);
  122. BUG();
  123. }
  124. static void pre(struct kmmio_probe *p, struct pt_regs *regs,
  125. unsigned long addr)
  126. {
  127. struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  128. struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
  129. const unsigned long instptr = instruction_pointer(regs);
  130. const enum reason_type type = get_ins_type(instptr);
  131. struct remap_trace *trace = p->private;
  132. /* it doesn't make sense to have more than one active trace per cpu */
  133. if (my_reason->active_traces)
  134. die_kmmio_nesting_error(regs, addr);
  135. else
  136. my_reason->active_traces++;
  137. my_reason->type = type;
  138. my_reason->addr = addr;
  139. my_reason->ip = instptr;
  140. my_trace->phys = addr - trace->probe.addr + trace->phys;
  141. my_trace->map_id = trace->id;
  142. /*
  143. * Only record the program counter when requested.
  144. * It may taint clean-room reverse engineering.
  145. */
  146. if (trace_pc)
  147. my_trace->pc = instptr;
  148. else
  149. my_trace->pc = 0;
  150. /*
  151. * XXX: the timestamp recorded will be *after* the tracing has been
  152. * done, not at the time we hit the instruction. SMP implications
  153. * on event ordering?
  154. */
  155. switch (type) {
  156. case REG_READ:
  157. my_trace->opcode = MMIO_READ;
  158. my_trace->width = get_ins_mem_width(instptr);
  159. break;
  160. case REG_WRITE:
  161. my_trace->opcode = MMIO_WRITE;
  162. my_trace->width = get_ins_mem_width(instptr);
  163. my_trace->value = get_ins_reg_val(instptr, regs);
  164. break;
  165. case IMM_WRITE:
  166. my_trace->opcode = MMIO_WRITE;
  167. my_trace->width = get_ins_mem_width(instptr);
  168. my_trace->value = get_ins_imm_val(instptr);
  169. break;
  170. default:
  171. {
  172. unsigned char *ip = (unsigned char *)instptr;
  173. my_trace->opcode = MMIO_UNKNOWN_OP;
  174. my_trace->width = 0;
  175. my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
  176. *(ip + 2);
  177. }
  178. }
  179. put_cpu_var(cpu_trace);
  180. put_cpu_var(pf_reason);
  181. }
  182. static void post(struct kmmio_probe *p, unsigned long condition,
  183. struct pt_regs *regs)
  184. {
  185. struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  186. struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
  187. /* this should always return the active_trace count to 0 */
  188. my_reason->active_traces--;
  189. if (my_reason->active_traces) {
  190. pr_emerg("unexpected post handler");
  191. BUG();
  192. }
  193. switch (my_reason->type) {
  194. case REG_READ:
  195. my_trace->value = get_ins_reg_val(my_reason->ip, regs);
  196. break;
  197. default:
  198. break;
  199. }
  200. mmio_trace_rw(my_trace);
  201. put_cpu_var(cpu_trace);
  202. put_cpu_var(pf_reason);
  203. }
  204. static void ioremap_trace_core(resource_size_t offset, unsigned long size,
  205. void __iomem *addr)
  206. {
  207. static atomic_t next_id;
  208. struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
  209. /* These are page-unaligned. */
  210. struct mmiotrace_map map = {
  211. .phys = offset,
  212. .virt = (unsigned long)addr,
  213. .len = size,
  214. .opcode = MMIO_PROBE
  215. };
  216. if (!trace) {
  217. pr_err("kmalloc failed in ioremap\n");
  218. return;
  219. }
  220. *trace = (struct remap_trace) {
  221. .probe = {
  222. .addr = (unsigned long)addr,
  223. .len = size,
  224. .pre_handler = pre,
  225. .post_handler = post,
  226. .private = trace
  227. },
  228. .phys = offset,
  229. .id = atomic_inc_return(&next_id)
  230. };
  231. map.map_id = trace->id;
  232. spin_lock_irq(&trace_lock);
  233. if (!is_enabled()) {
  234. kfree(trace);
  235. goto not_enabled;
  236. }
  237. mmio_trace_mapping(&map);
  238. list_add_tail(&trace->list, &trace_list);
  239. if (!nommiotrace)
  240. register_kmmio_probe(&trace->probe);
  241. not_enabled:
  242. spin_unlock_irq(&trace_lock);
  243. }
  244. void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
  245. void __iomem *addr)
  246. {
  247. if (!is_enabled()) /* recheck and proper locking in *_core() */
  248. return;
  249. pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
  250. (unsigned long long)offset, size, addr);
  251. if ((filter_offset) && (offset != filter_offset))
  252. return;
  253. ioremap_trace_core(offset, size, addr);
  254. }
  255. static void iounmap_trace_core(volatile void __iomem *addr)
  256. {
  257. struct mmiotrace_map map = {
  258. .phys = 0,
  259. .virt = (unsigned long)addr,
  260. .len = 0,
  261. .opcode = MMIO_UNPROBE
  262. };
  263. struct remap_trace *trace;
  264. struct remap_trace *tmp;
  265. struct remap_trace *found_trace = NULL;
  266. pr_debug("Unmapping %p.\n", addr);
  267. spin_lock_irq(&trace_lock);
  268. if (!is_enabled())
  269. goto not_enabled;
  270. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  271. if ((unsigned long)addr == trace->probe.addr) {
  272. if (!nommiotrace)
  273. unregister_kmmio_probe(&trace->probe);
  274. list_del(&trace->list);
  275. found_trace = trace;
  276. break;
  277. }
  278. }
  279. map.map_id = (found_trace) ? found_trace->id : -1;
  280. mmio_trace_mapping(&map);
  281. not_enabled:
  282. spin_unlock_irq(&trace_lock);
  283. if (found_trace) {
  284. synchronize_rcu(); /* unregister_kmmio_probe() requirement */
  285. kfree(found_trace);
  286. }
  287. }
  288. void mmiotrace_iounmap(volatile void __iomem *addr)
  289. {
  290. might_sleep();
  291. if (is_enabled()) /* recheck and proper locking in *_core() */
  292. iounmap_trace_core(addr);
  293. }
  294. int mmiotrace_printk(const char *fmt, ...)
  295. {
  296. int ret = 0;
  297. va_list args;
  298. unsigned long flags;
  299. va_start(args, fmt);
  300. spin_lock_irqsave(&trace_lock, flags);
  301. if (is_enabled())
  302. ret = mmio_trace_printk(fmt, args);
  303. spin_unlock_irqrestore(&trace_lock, flags);
  304. va_end(args);
  305. return ret;
  306. }
  307. EXPORT_SYMBOL(mmiotrace_printk);
  308. static void clear_trace_list(void)
  309. {
  310. struct remap_trace *trace;
  311. struct remap_trace *tmp;
  312. /*
  313. * No locking required, because the caller ensures we are in a
  314. * critical section via mutex, and is_enabled() is false,
  315. * i.e. nothing can traverse or modify this list.
  316. * Caller also ensures is_enabled() cannot change.
  317. */
  318. list_for_each_entry(trace, &trace_list, list) {
  319. pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
  320. trace->probe.addr, trace->probe.len);
  321. if (!nommiotrace)
  322. unregister_kmmio_probe(&trace->probe);
  323. }
  324. synchronize_rcu(); /* unregister_kmmio_probe() requirement */
  325. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  326. list_del(&trace->list);
  327. kfree(trace);
  328. }
  329. }
  330. #ifdef CONFIG_HOTPLUG_CPU
  331. static cpumask_var_t downed_cpus;
  332. static void enter_uniprocessor(void)
  333. {
  334. int cpu;
  335. int err;
  336. if (!cpumask_available(downed_cpus) &&
  337. !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
  338. pr_notice("Failed to allocate mask\n");
  339. goto out;
  340. }
  341. get_online_cpus();
  342. cpumask_copy(downed_cpus, cpu_online_mask);
  343. cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
  344. if (num_online_cpus() > 1)
  345. pr_notice("Disabling non-boot CPUs...\n");
  346. put_online_cpus();
  347. for_each_cpu(cpu, downed_cpus) {
  348. err = cpu_down(cpu);
  349. if (!err)
  350. pr_info("CPU%d is down.\n", cpu);
  351. else
  352. pr_err("Error taking CPU%d down: %d\n", cpu, err);
  353. }
  354. out:
  355. if (num_online_cpus() > 1)
  356. pr_warning("multiple CPUs still online, may miss events.\n");
  357. }
  358. static void leave_uniprocessor(void)
  359. {
  360. int cpu;
  361. int err;
  362. if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
  363. return;
  364. pr_notice("Re-enabling CPUs...\n");
  365. for_each_cpu(cpu, downed_cpus) {
  366. err = cpu_up(cpu);
  367. if (!err)
  368. pr_info("enabled CPU%d.\n", cpu);
  369. else
  370. pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
  371. }
  372. }
  373. #else /* !CONFIG_HOTPLUG_CPU */
  374. static void enter_uniprocessor(void)
  375. {
  376. if (num_online_cpus() > 1)
  377. pr_warning("multiple CPUs are online, may miss events. "
  378. "Suggest booting with maxcpus=1 kernel argument.\n");
  379. }
  380. static void leave_uniprocessor(void)
  381. {
  382. }
  383. #endif
  384. void enable_mmiotrace(void)
  385. {
  386. mutex_lock(&mmiotrace_mutex);
  387. if (is_enabled())
  388. goto out;
  389. if (nommiotrace)
  390. pr_info("MMIO tracing disabled.\n");
  391. kmmio_init();
  392. enter_uniprocessor();
  393. spin_lock_irq(&trace_lock);
  394. atomic_inc(&mmiotrace_enabled);
  395. spin_unlock_irq(&trace_lock);
  396. pr_info("enabled.\n");
  397. out:
  398. mutex_unlock(&mmiotrace_mutex);
  399. }
  400. void disable_mmiotrace(void)
  401. {
  402. mutex_lock(&mmiotrace_mutex);
  403. if (!is_enabled())
  404. goto out;
  405. spin_lock_irq(&trace_lock);
  406. atomic_dec(&mmiotrace_enabled);
  407. BUG_ON(is_enabled());
  408. spin_unlock_irq(&trace_lock);
  409. clear_trace_list(); /* guarantees: no more kmmio callbacks */
  410. leave_uniprocessor();
  411. kmmio_cleanup();
  412. pr_info("disabled.\n");
  413. out:
  414. mutex_unlock(&mmiotrace_mutex);
  415. }