pci.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __POWERNV_PCI_H
  3. #define __POWERNV_PCI_H
  4. #include <linux/iommu.h>
  5. #include <asm/iommu.h>
  6. #include <asm/msi_bitmap.h>
  7. struct pci_dn;
  8. /* Maximum possible number of ATSD MMIO registers per NPU */
  9. #define NV_NMMU_ATSD_REGS 8
  10. enum pnv_phb_type {
  11. PNV_PHB_IODA1 = 0,
  12. PNV_PHB_IODA2 = 1,
  13. PNV_PHB_NPU_NVLINK = 2,
  14. PNV_PHB_NPU_OCAPI = 3,
  15. };
  16. /* Precise PHB model for error management */
  17. enum pnv_phb_model {
  18. PNV_PHB_MODEL_UNKNOWN,
  19. PNV_PHB_MODEL_P7IOC,
  20. PNV_PHB_MODEL_PHB3,
  21. PNV_PHB_MODEL_NPU,
  22. PNV_PHB_MODEL_NPU2,
  23. };
  24. #define PNV_PCI_DIAG_BUF_SIZE 8192
  25. #define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
  26. #define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
  27. #define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
  28. #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
  29. #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
  30. #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
  31. /* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */
  32. #define PNV_IODA_STOPPED_STATE 0x8000000000000000
  33. /* Data associated with a PE, including IOMMU tracking etc.. */
  34. struct pnv_phb;
  35. struct pnv_ioda_pe {
  36. unsigned long flags;
  37. struct pnv_phb *phb;
  38. int device_count;
  39. /* A PE can be associated with a single device or an
  40. * entire bus (& children). In the former case, pdev
  41. * is populated, in the later case, pbus is.
  42. */
  43. #ifdef CONFIG_PCI_IOV
  44. struct pci_dev *parent_dev;
  45. #endif
  46. struct pci_dev *pdev;
  47. struct pci_bus *pbus;
  48. /* Effective RID (device RID for a device PE and base bus
  49. * RID with devfn 0 for a bus PE)
  50. */
  51. unsigned int rid;
  52. /* PE number */
  53. unsigned int pe_number;
  54. /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
  55. struct iommu_table_group table_group;
  56. /* 64-bit TCE bypass region */
  57. bool tce_bypass_enabled;
  58. uint64_t tce_bypass_base;
  59. /* MSIs. MVE index is identical for for 32 and 64 bit MSI
  60. * and -1 if not supported. (It's actually identical to the
  61. * PE number)
  62. */
  63. int mve_number;
  64. /* PEs in compound case */
  65. struct pnv_ioda_pe *master;
  66. struct list_head slaves;
  67. /* PCI peer-to-peer*/
  68. int p2p_initiator_count;
  69. /* Link in list of PE#s */
  70. struct list_head list;
  71. };
  72. #define PNV_PHB_FLAG_EEH (1 << 0)
  73. struct pnv_phb {
  74. struct pci_controller *hose;
  75. enum pnv_phb_type type;
  76. enum pnv_phb_model model;
  77. u64 hub_id;
  78. u64 opal_id;
  79. int flags;
  80. void __iomem *regs;
  81. u64 regs_phys;
  82. int initialized;
  83. spinlock_t lock;
  84. #ifdef CONFIG_DEBUG_FS
  85. int has_dbgfs;
  86. struct dentry *dbgfs;
  87. #endif
  88. #ifdef CONFIG_PCI_MSI
  89. unsigned int msi_base;
  90. unsigned int msi32_support;
  91. struct msi_bitmap msi_bmp;
  92. #endif
  93. int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
  94. unsigned int hwirq, unsigned int virq,
  95. unsigned int is_64, struct msi_msg *msg);
  96. void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
  97. void (*fixup_phb)(struct pci_controller *hose);
  98. int (*init_m64)(struct pnv_phb *phb);
  99. void (*reserve_m64_pe)(struct pci_bus *bus,
  100. unsigned long *pe_bitmap, bool all);
  101. struct pnv_ioda_pe *(*pick_m64_pe)(struct pci_bus *bus, bool all);
  102. int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
  103. void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
  104. int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
  105. struct {
  106. /* Global bridge info */
  107. unsigned int total_pe_num;
  108. unsigned int reserved_pe_idx;
  109. unsigned int root_pe_idx;
  110. bool root_pe_populated;
  111. /* 32-bit MMIO window */
  112. unsigned int m32_size;
  113. unsigned int m32_segsize;
  114. unsigned int m32_pci_base;
  115. /* 64-bit MMIO window */
  116. unsigned int m64_bar_idx;
  117. unsigned long m64_size;
  118. unsigned long m64_segsize;
  119. unsigned long m64_base;
  120. unsigned long m64_bar_alloc;
  121. /* IO ports */
  122. unsigned int io_size;
  123. unsigned int io_segsize;
  124. unsigned int io_pci_base;
  125. /* PE allocation */
  126. struct mutex pe_alloc_mutex;
  127. unsigned long *pe_alloc;
  128. struct pnv_ioda_pe *pe_array;
  129. /* M32 & IO segment maps */
  130. unsigned int *m64_segmap;
  131. unsigned int *m32_segmap;
  132. unsigned int *io_segmap;
  133. /* DMA32 segment maps - IODA1 only */
  134. unsigned int dma32_count;
  135. unsigned int *dma32_segmap;
  136. /* IRQ chip */
  137. int irq_chip_init;
  138. struct irq_chip irq_chip;
  139. /* Sorted list of used PE's based
  140. * on the sequence of creation
  141. */
  142. struct list_head pe_list;
  143. struct mutex pe_list_mutex;
  144. /* Reverse map of PEs, indexed by {bus, devfn} */
  145. unsigned int pe_rmap[0x10000];
  146. } ioda;
  147. /* PHB and hub diagnostics */
  148. unsigned int diag_data_size;
  149. u8 *diag_data;
  150. /* Nvlink2 data */
  151. struct npu {
  152. int index;
  153. __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
  154. unsigned int mmio_atsd_count;
  155. /* Bitmask for MMIO register usage */
  156. unsigned long mmio_atsd_usage;
  157. /* Do we need to explicitly flush the nest mmu? */
  158. bool nmmu_flush;
  159. } npu;
  160. int p2p_target_count;
  161. };
  162. extern struct pci_ops pnv_pci_ops;
  163. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  164. unsigned char *log_buff);
  165. int pnv_pci_cfg_read(struct pci_dn *pdn,
  166. int where, int size, u32 *val);
  167. int pnv_pci_cfg_write(struct pci_dn *pdn,
  168. int where, int size, u32 val);
  169. extern struct iommu_table *pnv_pci_table_alloc(int nid);
  170. extern void pnv_pci_init_ioda_hub(struct device_node *np);
  171. extern void pnv_pci_init_ioda2_phb(struct device_node *np);
  172. extern void pnv_pci_init_npu_phb(struct device_node *np);
  173. extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
  174. extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
  175. extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
  176. extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
  177. extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
  178. extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
  179. extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
  180. extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
  181. extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
  182. extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
  183. extern int pnv_eeh_post_init(void);
  184. extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
  185. const char *fmt, ...);
  186. #define pe_err(pe, fmt, ...) \
  187. pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
  188. #define pe_warn(pe, fmt, ...) \
  189. pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
  190. #define pe_info(pe, fmt, ...) \
  191. pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
  192. /* Nvlink functions */
  193. extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
  194. extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
  195. extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
  196. extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
  197. struct iommu_table *tbl);
  198. extern long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num);
  199. extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
  200. extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
  201. extern int pnv_npu2_init(struct pnv_phb *phb);
  202. /* pci-ioda-tce.c */
  203. #define POWERNV_IOMMU_DEFAULT_LEVELS 2
  204. #define POWERNV_IOMMU_MAX_LEVELS 5
  205. extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
  206. unsigned long uaddr, enum dma_data_direction direction,
  207. unsigned long attrs);
  208. extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
  209. extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
  210. unsigned long *hpa, enum dma_data_direction *direction,
  211. bool alloc);
  212. extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
  213. bool alloc);
  214. extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
  215. extern long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
  216. __u32 page_shift, __u64 window_size, __u32 levels,
  217. bool alloc_userspace_copy, struct iommu_table *tbl);
  218. extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
  219. extern long pnv_pci_link_table_and_group(int node, int num,
  220. struct iommu_table *tbl,
  221. struct iommu_table_group *table_group);
  222. extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
  223. struct iommu_table_group *table_group);
  224. extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
  225. void *tce_mem, u64 tce_size,
  226. u64 dma_offset, unsigned int page_shift);
  227. #endif /* __POWERNV_PCI_H */