events_internal.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * Xen Event Channels (internal header)
  3. *
  4. * Copyright (C) 2013 Citrix Systems R&D Ltd.
  5. *
  6. * This source code is licensed under the GNU General Public License,
  7. * Version 2 or later. See the file COPYING for more details.
  8. */
  9. #ifndef __EVENTS_INTERNAL_H__
  10. #define __EVENTS_INTERNAL_H__
  11. /* Interrupt types. */
  12. enum xen_irq_type {
  13. IRQT_UNBOUND = 0,
  14. IRQT_PIRQ,
  15. IRQT_VIRQ,
  16. IRQT_IPI,
  17. IRQT_EVTCHN
  18. };
  19. /*
  20. * Packed IRQ information:
  21. * type - enum xen_irq_type
  22. * event channel - irq->event channel mapping
  23. * cpu - cpu this event channel is bound to
  24. * index - type-specific information:
  25. * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
  26. * guest, or GSI (real passthrough IRQ) of the device.
  27. * VIRQ - virq number
  28. * IPI - IPI vector
  29. * EVTCHN -
  30. */
  31. struct irq_info {
  32. struct list_head list;
  33. struct list_head eoi_list;
  34. short refcnt;
  35. short spurious_cnt;
  36. short type; /* type */
  37. u8 mask_reason; /* Why is event channel masked */
  38. #define EVT_MASK_REASON_EXPLICIT 0x01
  39. #define EVT_MASK_REASON_TEMPORARY 0x02
  40. #define EVT_MASK_REASON_EOI_PENDING 0x04
  41. u8 is_active; /* Is event just being handled? */
  42. unsigned irq;
  43. unsigned int evtchn; /* event channel */
  44. unsigned short cpu; /* cpu bound */
  45. unsigned short eoi_cpu; /* EOI must happen on this cpu */
  46. unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
  47. u64 eoi_time; /* Time in jiffies when to EOI. */
  48. raw_spinlock_t lock;
  49. union {
  50. unsigned short virq;
  51. enum ipi_vector ipi;
  52. struct {
  53. unsigned short pirq;
  54. unsigned short gsi;
  55. unsigned char vector;
  56. unsigned char flags;
  57. uint16_t domid;
  58. } pirq;
  59. } u;
  60. };
  61. #define PIRQ_NEEDS_EOI (1 << 0)
  62. #define PIRQ_SHAREABLE (1 << 1)
  63. #define PIRQ_MSI_GROUP (1 << 2)
  64. struct evtchn_loop_ctrl;
  65. struct evtchn_ops {
  66. unsigned (*max_channels)(void);
  67. unsigned (*nr_channels)(void);
  68. int (*setup)(struct irq_info *info);
  69. void (*remove)(evtchn_port_t port, unsigned int cpu);
  70. void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
  71. void (*clear_pending)(unsigned port);
  72. void (*set_pending)(unsigned port);
  73. bool (*is_pending)(unsigned port);
  74. void (*mask)(unsigned port);
  75. void (*unmask)(unsigned port);
  76. void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
  77. void (*resume)(void);
  78. int (*percpu_init)(unsigned int cpu);
  79. int (*percpu_deinit)(unsigned int cpu);
  80. };
  81. extern const struct evtchn_ops *evtchn_ops;
  82. extern int **evtchn_to_irq;
  83. int get_evtchn_to_irq(unsigned int evtchn);
  84. void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
  85. struct irq_info *info_for_irq(unsigned irq);
  86. unsigned cpu_from_irq(unsigned irq);
  87. unsigned cpu_from_evtchn(unsigned int evtchn);
  88. static inline unsigned xen_evtchn_max_channels(void)
  89. {
  90. return evtchn_ops->max_channels();
  91. }
  92. /*
  93. * Do any ABI specific setup for a bound event channel before it can
  94. * be unmasked and used.
  95. */
  96. static inline int xen_evtchn_port_setup(struct irq_info *info)
  97. {
  98. if (evtchn_ops->setup)
  99. return evtchn_ops->setup(info);
  100. return 0;
  101. }
  102. static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
  103. unsigned int cpu)
  104. {
  105. if (evtchn_ops->remove)
  106. evtchn_ops->remove(evtchn, cpu);
  107. }
  108. static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
  109. unsigned cpu)
  110. {
  111. evtchn_ops->bind_to_cpu(info, cpu);
  112. }
  113. static inline void clear_evtchn(unsigned port)
  114. {
  115. evtchn_ops->clear_pending(port);
  116. }
  117. static inline void set_evtchn(unsigned port)
  118. {
  119. evtchn_ops->set_pending(port);
  120. }
  121. static inline bool test_evtchn(unsigned port)
  122. {
  123. return evtchn_ops->is_pending(port);
  124. }
  125. static inline void mask_evtchn(unsigned port)
  126. {
  127. return evtchn_ops->mask(port);
  128. }
  129. static inline void unmask_evtchn(unsigned port)
  130. {
  131. return evtchn_ops->unmask(port);
  132. }
  133. static inline void xen_evtchn_handle_events(unsigned cpu,
  134. struct evtchn_loop_ctrl *ctrl)
  135. {
  136. return evtchn_ops->handle_events(cpu, ctrl);
  137. }
  138. static inline void xen_evtchn_resume(void)
  139. {
  140. if (evtchn_ops->resume)
  141. evtchn_ops->resume();
  142. }
  143. void xen_evtchn_2l_init(void);
  144. int xen_evtchn_fifo_init(void);
  145. #endif /* #ifndef __EVENTS_INTERNAL_H__ */