dtl.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * Virtual Processor Dispatch Trace Log
  3. *
  4. * (C) Copyright IBM Corporation 2009
  5. *
  6. * Author: Jeremy Kerr <jk@ozlabs.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <asm/smp.h>
  25. #include <linux/uaccess.h>
  26. #include <asm/firmware.h>
  27. #include <asm/lppaca.h>
  28. #include <asm/debugfs.h>
  29. #include <asm/plpar_wrappers.h>
  30. #include <asm/machdep.h>
  31. struct dtl {
  32. struct dtl_entry *buf;
  33. struct dentry *file;
  34. int cpu;
  35. int buf_entries;
  36. u64 last_idx;
  37. spinlock_t lock;
  38. };
  39. static DEFINE_PER_CPU(struct dtl, cpu_dtl);
  40. /*
  41. * Dispatch trace log event mask:
  42. * 0x7: 0x1: voluntary virtual processor waits
  43. * 0x2: time-slice preempts
  44. * 0x4: virtual partition memory page faults
  45. */
  46. static u8 dtl_event_mask = 0x7;
  47. /*
  48. * Size of per-cpu log buffers. Firmware requires that the buffer does
  49. * not cross a 4k boundary.
  50. */
  51. static int dtl_buf_entries = N_DISPATCH_LOG;
  52. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  53. struct dtl_ring {
  54. u64 write_index;
  55. struct dtl_entry *write_ptr;
  56. struct dtl_entry *buf;
  57. struct dtl_entry *buf_end;
  58. u8 saved_dtl_mask;
  59. };
  60. static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
  61. static atomic_t dtl_count;
  62. /*
  63. * The cpu accounting code controls the DTL ring buffer, and we get
  64. * given entries as they are processed.
  65. */
  66. static void consume_dtle(struct dtl_entry *dtle, u64 index)
  67. {
  68. struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
  69. struct dtl_entry *wp = dtlr->write_ptr;
  70. struct lppaca *vpa = local_paca->lppaca_ptr;
  71. if (!wp)
  72. return;
  73. *wp = *dtle;
  74. barrier();
  75. /* check for hypervisor ring buffer overflow, ignore this entry if so */
  76. if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
  77. return;
  78. ++wp;
  79. if (wp == dtlr->buf_end)
  80. wp = dtlr->buf;
  81. dtlr->write_ptr = wp;
  82. /* incrementing write_index makes the new entry visible */
  83. smp_wmb();
  84. ++dtlr->write_index;
  85. }
  86. static int dtl_start(struct dtl *dtl)
  87. {
  88. struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
  89. dtlr->buf = dtl->buf;
  90. dtlr->buf_end = dtl->buf + dtl->buf_entries;
  91. dtlr->write_index = 0;
  92. /* setting write_ptr enables logging into our buffer */
  93. smp_wmb();
  94. dtlr->write_ptr = dtl->buf;
  95. /* enable event logging */
  96. dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
  97. lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
  98. dtl_consumer = consume_dtle;
  99. atomic_inc(&dtl_count);
  100. return 0;
  101. }
  102. static void dtl_stop(struct dtl *dtl)
  103. {
  104. struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
  105. dtlr->write_ptr = NULL;
  106. smp_wmb();
  107. dtlr->buf = NULL;
  108. /* restore dtl_enable_mask */
  109. lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
  110. if (atomic_dec_and_test(&dtl_count))
  111. dtl_consumer = NULL;
  112. }
  113. static u64 dtl_current_index(struct dtl *dtl)
  114. {
  115. return per_cpu(dtl_rings, dtl->cpu).write_index;
  116. }
  117. #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  118. static int dtl_start(struct dtl *dtl)
  119. {
  120. unsigned long addr;
  121. int ret, hwcpu;
  122. /* Register our dtl buffer with the hypervisor. The HV expects the
  123. * buffer size to be passed in the second word of the buffer */
  124. ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
  125. hwcpu = get_hard_smp_processor_id(dtl->cpu);
  126. addr = __pa(dtl->buf);
  127. ret = register_dtl(hwcpu, addr);
  128. if (ret) {
  129. printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
  130. "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
  131. return -EIO;
  132. }
  133. /* set our initial buffer indices */
  134. lppaca_of(dtl->cpu).dtl_idx = 0;
  135. /* ensure that our updates to the lppaca fields have occurred before
  136. * we actually enable the logging */
  137. smp_wmb();
  138. /* enable event logging */
  139. lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
  140. return 0;
  141. }
  142. static void dtl_stop(struct dtl *dtl)
  143. {
  144. int hwcpu = get_hard_smp_processor_id(dtl->cpu);
  145. lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
  146. unregister_dtl(hwcpu);
  147. }
  148. static u64 dtl_current_index(struct dtl *dtl)
  149. {
  150. return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
  151. }
  152. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  153. static int dtl_enable(struct dtl *dtl)
  154. {
  155. long int n_entries;
  156. long int rc;
  157. struct dtl_entry *buf = NULL;
  158. if (!dtl_cache)
  159. return -ENOMEM;
  160. /* only allow one reader */
  161. if (dtl->buf)
  162. return -EBUSY;
  163. n_entries = dtl_buf_entries;
  164. buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
  165. if (!buf) {
  166. printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
  167. __func__, dtl->cpu);
  168. return -ENOMEM;
  169. }
  170. spin_lock(&dtl->lock);
  171. rc = -EBUSY;
  172. if (!dtl->buf) {
  173. /* store the original allocation size for use during read */
  174. dtl->buf_entries = n_entries;
  175. dtl->buf = buf;
  176. dtl->last_idx = 0;
  177. rc = dtl_start(dtl);
  178. if (rc)
  179. dtl->buf = NULL;
  180. }
  181. spin_unlock(&dtl->lock);
  182. if (rc)
  183. kmem_cache_free(dtl_cache, buf);
  184. return rc;
  185. }
  186. static void dtl_disable(struct dtl *dtl)
  187. {
  188. spin_lock(&dtl->lock);
  189. dtl_stop(dtl);
  190. kmem_cache_free(dtl_cache, dtl->buf);
  191. dtl->buf = NULL;
  192. dtl->buf_entries = 0;
  193. spin_unlock(&dtl->lock);
  194. }
  195. /* file interface */
  196. static int dtl_file_open(struct inode *inode, struct file *filp)
  197. {
  198. struct dtl *dtl = inode->i_private;
  199. int rc;
  200. rc = dtl_enable(dtl);
  201. if (rc)
  202. return rc;
  203. filp->private_data = dtl;
  204. return 0;
  205. }
  206. static int dtl_file_release(struct inode *inode, struct file *filp)
  207. {
  208. struct dtl *dtl = inode->i_private;
  209. dtl_disable(dtl);
  210. return 0;
  211. }
  212. static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
  213. loff_t *pos)
  214. {
  215. long int rc, n_read, n_req, read_size;
  216. struct dtl *dtl;
  217. u64 cur_idx, last_idx, i;
  218. if ((len % sizeof(struct dtl_entry)) != 0)
  219. return -EINVAL;
  220. dtl = filp->private_data;
  221. /* requested number of entries to read */
  222. n_req = len / sizeof(struct dtl_entry);
  223. /* actual number of entries read */
  224. n_read = 0;
  225. spin_lock(&dtl->lock);
  226. cur_idx = dtl_current_index(dtl);
  227. last_idx = dtl->last_idx;
  228. if (last_idx + dtl->buf_entries <= cur_idx)
  229. last_idx = cur_idx - dtl->buf_entries + 1;
  230. if (last_idx + n_req > cur_idx)
  231. n_req = cur_idx - last_idx;
  232. if (n_req > 0)
  233. dtl->last_idx = last_idx + n_req;
  234. spin_unlock(&dtl->lock);
  235. if (n_req <= 0)
  236. return 0;
  237. i = last_idx % dtl->buf_entries;
  238. /* read the tail of the buffer if we've wrapped */
  239. if (i + n_req > dtl->buf_entries) {
  240. read_size = dtl->buf_entries - i;
  241. rc = copy_to_user(buf, &dtl->buf[i],
  242. read_size * sizeof(struct dtl_entry));
  243. if (rc)
  244. return -EFAULT;
  245. i = 0;
  246. n_req -= read_size;
  247. n_read += read_size;
  248. buf += read_size * sizeof(struct dtl_entry);
  249. }
  250. /* .. and now the head */
  251. rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
  252. if (rc)
  253. return -EFAULT;
  254. n_read += n_req;
  255. return n_read * sizeof(struct dtl_entry);
  256. }
  257. static const struct file_operations dtl_fops = {
  258. .open = dtl_file_open,
  259. .release = dtl_file_release,
  260. .read = dtl_file_read,
  261. .llseek = no_llseek,
  262. };
  263. static struct dentry *dtl_dir;
  264. static int dtl_setup_file(struct dtl *dtl)
  265. {
  266. char name[10];
  267. sprintf(name, "cpu-%d", dtl->cpu);
  268. dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
  269. if (!dtl->file)
  270. return -ENOMEM;
  271. return 0;
  272. }
  273. static int dtl_init(void)
  274. {
  275. struct dentry *event_mask_file, *buf_entries_file;
  276. int rc, i;
  277. if (!firmware_has_feature(FW_FEATURE_SPLPAR))
  278. return -ENODEV;
  279. /* set up common debugfs structure */
  280. rc = -ENOMEM;
  281. dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
  282. if (!dtl_dir) {
  283. printk(KERN_WARNING "%s: can't create dtl root dir\n",
  284. __func__);
  285. goto err;
  286. }
  287. event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
  288. dtl_dir, &dtl_event_mask);
  289. buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
  290. dtl_dir, &dtl_buf_entries);
  291. if (!event_mask_file || !buf_entries_file) {
  292. printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
  293. goto err_remove_dir;
  294. }
  295. /* set up the per-cpu log structures */
  296. for_each_possible_cpu(i) {
  297. struct dtl *dtl = &per_cpu(cpu_dtl, i);
  298. spin_lock_init(&dtl->lock);
  299. dtl->cpu = i;
  300. rc = dtl_setup_file(dtl);
  301. if (rc)
  302. goto err_remove_dir;
  303. }
  304. return 0;
  305. err_remove_dir:
  306. debugfs_remove_recursive(dtl_dir);
  307. err:
  308. return rc;
  309. }
  310. machine_arch_initcall(pseries, dtl_init);