uv_irq.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV IRQ functions
  7. *
  8. * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/export.h>
  11. #include <linux/rbtree.h>
  12. #include <linux/slab.h>
  13. #include <linux/irq.h>
  14. #include <asm/irqdomain.h>
  15. #include <asm/apic.h>
  16. #include <asm/uv/uv_irq.h>
  17. #include <asm/uv/uv_hub.h>
  18. /* MMR offset and pnode of hub sourcing interrupts for a given irq */
  19. struct uv_irq_2_mmr_pnode {
  20. unsigned long offset;
  21. int pnode;
  22. };
  23. static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
  24. {
  25. unsigned long mmr_value;
  26. struct uv_IO_APIC_route_entry *entry;
  27. BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
  28. sizeof(unsigned long));
  29. mmr_value = 0;
  30. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  31. entry->vector = cfg->vector;
  32. entry->delivery_mode = apic->irq_delivery_mode;
  33. entry->dest_mode = apic->irq_dest_mode;
  34. entry->polarity = 0;
  35. entry->trigger = 0;
  36. entry->mask = 0;
  37. entry->dest = cfg->dest_apicid;
  38. uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
  39. }
  40. static void uv_noop(struct irq_data *data) { }
  41. static int
  42. uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
  43. bool force)
  44. {
  45. struct irq_data *parent = data->parent_data;
  46. struct irq_cfg *cfg = irqd_cfg(data);
  47. int ret;
  48. ret = parent->chip->irq_set_affinity(parent, mask, force);
  49. if (ret >= 0) {
  50. uv_program_mmr(cfg, data->chip_data);
  51. send_cleanup_vector(cfg);
  52. }
  53. return ret;
  54. }
  55. static struct irq_chip uv_irq_chip = {
  56. .name = "UV-CORE",
  57. .irq_mask = uv_noop,
  58. .irq_unmask = uv_noop,
  59. .irq_eoi = apic_ack_irq,
  60. .irq_set_affinity = uv_set_irq_affinity,
  61. };
  62. static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
  63. unsigned int nr_irqs, void *arg)
  64. {
  65. struct uv_irq_2_mmr_pnode *chip_data;
  66. struct irq_alloc_info *info = arg;
  67. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  68. int ret;
  69. if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
  70. return -EINVAL;
  71. chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
  72. irq_data_get_node(irq_data));
  73. if (!chip_data)
  74. return -ENOMEM;
  75. ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
  76. if (ret >= 0) {
  77. if (info->uv_limit == UV_AFFINITY_CPU)
  78. irq_set_status_flags(virq, IRQ_NO_BALANCING);
  79. else
  80. irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
  81. chip_data->pnode = uv_blade_to_pnode(info->uv_blade);
  82. chip_data->offset = info->uv_offset;
  83. irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
  84. handle_percpu_irq, NULL, info->uv_name);
  85. } else {
  86. kfree(chip_data);
  87. }
  88. return ret;
  89. }
  90. static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
  91. unsigned int nr_irqs)
  92. {
  93. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  94. BUG_ON(nr_irqs != 1);
  95. kfree(irq_data->chip_data);
  96. irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
  97. irq_clear_status_flags(virq, IRQ_NO_BALANCING);
  98. irq_domain_free_irqs_top(domain, virq, nr_irqs);
  99. }
  100. /*
  101. * Re-target the irq to the specified CPU and enable the specified MMR located
  102. * on the specified blade to allow the sending of MSIs to the specified CPU.
  103. */
  104. static int uv_domain_activate(struct irq_domain *domain,
  105. struct irq_data *irq_data, bool reserve)
  106. {
  107. uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
  108. return 0;
  109. }
  110. /*
  111. * Disable the specified MMR located on the specified blade so that MSIs are
  112. * longer allowed to be sent.
  113. */
  114. static void uv_domain_deactivate(struct irq_domain *domain,
  115. struct irq_data *irq_data)
  116. {
  117. unsigned long mmr_value;
  118. struct uv_IO_APIC_route_entry *entry;
  119. mmr_value = 0;
  120. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  121. entry->mask = 1;
  122. uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
  123. }
  124. static const struct irq_domain_ops uv_domain_ops = {
  125. .alloc = uv_domain_alloc,
  126. .free = uv_domain_free,
  127. .activate = uv_domain_activate,
  128. .deactivate = uv_domain_deactivate,
  129. };
  130. static struct irq_domain *uv_get_irq_domain(void)
  131. {
  132. static struct irq_domain *uv_domain;
  133. static DEFINE_MUTEX(uv_lock);
  134. struct fwnode_handle *fn;
  135. mutex_lock(&uv_lock);
  136. if (uv_domain)
  137. goto out;
  138. fn = irq_domain_alloc_named_fwnode("UV-CORE");
  139. if (!fn)
  140. goto out;
  141. uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
  142. if (uv_domain)
  143. uv_domain->parent = x86_vector_domain;
  144. else
  145. irq_domain_free_fwnode(fn);
  146. out:
  147. mutex_unlock(&uv_lock);
  148. return uv_domain;
  149. }
  150. /*
  151. * Set up a mapping of an available irq and vector, and enable the specified
  152. * MMR that defines the MSI that is to be sent to the specified CPU when an
  153. * interrupt is raised.
  154. */
  155. int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
  156. unsigned long mmr_offset, int limit)
  157. {
  158. struct irq_alloc_info info;
  159. struct irq_domain *domain = uv_get_irq_domain();
  160. if (!domain)
  161. return -ENOMEM;
  162. init_irq_alloc_info(&info, cpumask_of(cpu));
  163. info.type = X86_IRQ_ALLOC_TYPE_UV;
  164. info.uv_limit = limit;
  165. info.uv_blade = mmr_blade;
  166. info.uv_offset = mmr_offset;
  167. info.uv_name = irq_name;
  168. return irq_domain_alloc_irqs(domain, 1,
  169. uv_blade_to_memory_nid(mmr_blade), &info);
  170. }
  171. EXPORT_SYMBOL_GPL(uv_setup_irq);
  172. /*
  173. * Tear down a mapping of an irq and vector, and disable the specified MMR that
  174. * defined the MSI that was to be sent to the specified CPU when an interrupt
  175. * was raised.
  176. *
  177. * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
  178. */
  179. void uv_teardown_irq(unsigned int irq)
  180. {
  181. irq_domain_free_irqs(irq, 1);
  182. }
  183. EXPORT_SYMBOL_GPL(uv_teardown_irq);