cxl.h 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright(c) 2020 Intel Corporation. */
  3. #ifndef __CXL_H__
  4. #define __CXL_H__
  5. #include <linux/libnvdimm.h>
  6. #include <linux/bitfield.h>
  7. #include <linux/notifier.h>
  8. #include <linux/bitops.h>
  9. #include <linux/log2.h>
  10. #include <linux/node.h>
  11. #include <linux/io.h>
  12. extern const struct nvdimm_security_ops *cxl_security_ops;
  13. /**
  14. * DOC: cxl objects
  15. *
  16. * The CXL core objects like ports, decoders, and regions are shared
  17. * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
  18. * (port-driver, region-driver, nvdimm object-drivers... etc).
  19. */
  20. /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */
  21. #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K
  22. /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
  23. #define CXL_CM_OFFSET 0x1000
  24. #define CXL_CM_CAP_HDR_OFFSET 0x0
  25. #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
  26. #define CM_CAP_HDR_CAP_ID 1
  27. #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
  28. #define CM_CAP_HDR_CAP_VERSION 1
  29. #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
  30. #define CM_CAP_HDR_CACHE_MEM_VERSION 1
  31. #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
  32. #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
  33. #define CXL_CM_CAP_CAP_ID_RAS 0x2
  34. #define CXL_CM_CAP_CAP_ID_HDM 0x5
  35. #define CXL_CM_CAP_CAP_HDM_VERSION 1
  36. /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
  37. #define CXL_HDM_DECODER_CAP_OFFSET 0x0
  38. #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
  39. #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
  40. #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
  41. #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
  42. #define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
  43. #define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
  44. #define CXL_HDM_DECODER_CTRL_OFFSET 0x4
  45. #define CXL_HDM_DECODER_ENABLE BIT(1)
  46. #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
  47. #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14)
  48. #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18)
  49. #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c)
  50. #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20)
  51. #define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0)
  52. #define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4)
  53. #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
  54. #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
  55. #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
  56. #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
  57. #define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12)
  58. #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
  59. #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
  60. #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
  61. #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i)
  62. /* HDM decoder control register constants CXL 3.0 8.2.5.19.7 */
  63. #define CXL_DECODER_MIN_GRANULARITY 256
  64. #define CXL_DECODER_MAX_ENCODED_IG 6
  65. static inline int cxl_hdm_decoder_count(u32 cap_hdr)
  66. {
  67. int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
  68. return val ? val * 2 : 1;
  69. }
  70. /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
  71. static inline int eig_to_granularity(u16 eig, unsigned int *granularity)
  72. {
  73. if (eig > CXL_DECODER_MAX_ENCODED_IG)
  74. return -EINVAL;
  75. *granularity = CXL_DECODER_MIN_GRANULARITY << eig;
  76. return 0;
  77. }
  78. /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */
  79. static inline int eiw_to_ways(u8 eiw, unsigned int *ways)
  80. {
  81. switch (eiw) {
  82. case 0 ... 4:
  83. *ways = 1 << eiw;
  84. break;
  85. case 8 ... 10:
  86. *ways = 3 << (eiw - 8);
  87. break;
  88. default:
  89. return -EINVAL;
  90. }
  91. return 0;
  92. }
  93. static inline int granularity_to_eig(int granularity, u16 *eig)
  94. {
  95. if (granularity > SZ_16K || granularity < CXL_DECODER_MIN_GRANULARITY ||
  96. !is_power_of_2(granularity))
  97. return -EINVAL;
  98. *eig = ilog2(granularity) - 8;
  99. return 0;
  100. }
  101. static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
  102. {
  103. if (ways > 16)
  104. return -EINVAL;
  105. if (is_power_of_2(ways)) {
  106. *eiw = ilog2(ways);
  107. return 0;
  108. }
  109. if (ways % 3)
  110. return -EINVAL;
  111. ways /= 3;
  112. if (!is_power_of_2(ways))
  113. return -EINVAL;
  114. *eiw = ilog2(ways) + 8;
  115. return 0;
  116. }
  117. /* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */
  118. #define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0
  119. #define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0))
  120. #define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4
  121. #define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0))
  122. #define CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK BIT(8)
  123. #define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8
  124. #define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0))
  125. #define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC
  126. #define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0)
  127. #define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10
  128. #define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0)
  129. #define CXL_RAS_CAP_CONTROL_OFFSET 0x14
  130. #define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0)
  131. #define CXL_RAS_HEADER_LOG_OFFSET 0x18
  132. #define CXL_RAS_CAPABILITY_LENGTH 0x58
  133. #define CXL_HEADERLOG_SIZE SZ_512
  134. #define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
  135. /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
  136. #define CXLDEV_CAP_ARRAY_OFFSET 0x0
  137. #define CXLDEV_CAP_ARRAY_CAP_ID 0
  138. #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
  139. #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
  140. /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
  141. #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
  142. /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
  143. #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
  144. #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
  145. #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
  146. #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
  147. /* CXL 3.0 8.2.8.3.1 Event Status Register */
  148. #define CXLDEV_DEV_EVENT_STATUS_OFFSET 0x00
  149. #define CXLDEV_EVENT_STATUS_INFO BIT(0)
  150. #define CXLDEV_EVENT_STATUS_WARN BIT(1)
  151. #define CXLDEV_EVENT_STATUS_FAIL BIT(2)
  152. #define CXLDEV_EVENT_STATUS_FATAL BIT(3)
  153. #define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \
  154. CXLDEV_EVENT_STATUS_WARN | \
  155. CXLDEV_EVENT_STATUS_FAIL | \
  156. CXLDEV_EVENT_STATUS_FATAL)
  157. /* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */
  158. #define CXLDEV_EVENT_INT_MODE_MASK GENMASK(1, 0)
  159. #define CXLDEV_EVENT_INT_MSGNUM_MASK GENMASK(7, 4)
  160. /* CXL 2.0 8.2.8.4 Mailbox Registers */
  161. #define CXLDEV_MBOX_CAPS_OFFSET 0x00
  162. #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
  163. #define CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6)
  164. #define CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7)
  165. #define CXLDEV_MBOX_CTRL_OFFSET 0x04
  166. #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
  167. #define CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2)
  168. #define CXLDEV_MBOX_CMD_OFFSET 0x08
  169. #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
  170. #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
  171. #define CXLDEV_MBOX_STATUS_OFFSET 0x10
  172. #define CXLDEV_MBOX_STATUS_BG_CMD BIT(0)
  173. #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
  174. #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
  175. #define CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
  176. #define CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16)
  177. #define CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32)
  178. #define CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48)
  179. #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
  180. /*
  181. * Using struct_group() allows for per register-block-type helper routines,
  182. * without requiring block-type agnostic code to include the prefix.
  183. */
  184. struct cxl_regs {
  185. /*
  186. * Common set of CXL Component register block base pointers
  187. * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
  188. * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure
  189. */
  190. struct_group_tagged(cxl_component_regs, component,
  191. void __iomem *hdm_decoder;
  192. void __iomem *ras;
  193. );
  194. /*
  195. * Common set of CXL Device register block base pointers
  196. * @status: CXL 2.0 8.2.8.3 Device Status Registers
  197. * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
  198. * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
  199. */
  200. struct_group_tagged(cxl_device_regs, device_regs,
  201. void __iomem *status, *mbox, *memdev;
  202. );
  203. struct_group_tagged(cxl_pmu_regs, pmu_regs,
  204. void __iomem *pmu;
  205. );
  206. /*
  207. * RCH downstream port specific RAS register
  208. * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB
  209. */
  210. struct_group_tagged(cxl_rch_regs, rch_regs,
  211. void __iomem *dport_aer;
  212. );
  213. };
  214. struct cxl_reg_map {
  215. bool valid;
  216. int id;
  217. unsigned long offset;
  218. unsigned long size;
  219. };
  220. struct cxl_component_reg_map {
  221. struct cxl_reg_map hdm_decoder;
  222. struct cxl_reg_map ras;
  223. };
  224. struct cxl_device_reg_map {
  225. struct cxl_reg_map status;
  226. struct cxl_reg_map mbox;
  227. struct cxl_reg_map memdev;
  228. };
  229. struct cxl_pmu_reg_map {
  230. struct cxl_reg_map pmu;
  231. };
  232. /**
  233. * struct cxl_register_map - DVSEC harvested register block mapping parameters
  234. * @host: device for devm operations and logging
  235. * @base: virtual base of the register-block-BAR + @block_offset
  236. * @resource: physical resource base of the register block
  237. * @max_size: maximum mapping size to perform register search
  238. * @reg_type: see enum cxl_regloc_type
  239. * @component_map: cxl_reg_map for component registers
  240. * @device_map: cxl_reg_maps for device registers
  241. * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
  242. */
  243. struct cxl_register_map {
  244. struct device *host;
  245. void __iomem *base;
  246. resource_size_t resource;
  247. resource_size_t max_size;
  248. u8 reg_type;
  249. union {
  250. struct cxl_component_reg_map component_map;
  251. struct cxl_device_reg_map device_map;
  252. struct cxl_pmu_reg_map pmu_map;
  253. };
  254. };
  255. void cxl_probe_component_regs(struct device *dev, void __iomem *base,
  256. struct cxl_component_reg_map *map);
  257. void cxl_probe_device_regs(struct device *dev, void __iomem *base,
  258. struct cxl_device_reg_map *map);
  259. int cxl_map_component_regs(const struct cxl_register_map *map,
  260. struct cxl_component_regs *regs,
  261. unsigned long map_mask);
  262. int cxl_map_device_regs(const struct cxl_register_map *map,
  263. struct cxl_device_regs *regs);
  264. int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
  265. enum cxl_regloc_type;
  266. int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
  267. int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
  268. struct cxl_register_map *map, int index);
  269. int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
  270. struct cxl_register_map *map);
  271. int cxl_setup_regs(struct cxl_register_map *map);
  272. struct cxl_dport;
  273. resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
  274. struct cxl_dport *dport);
  275. #define CXL_RESOURCE_NONE ((resource_size_t) -1)
  276. #define CXL_TARGET_STRLEN 20
  277. /*
  278. * cxl_decoder flags that define the type of memory / devices this
  279. * decoder supports as well as configuration lock status See "CXL 2.0
  280. * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
  281. * Additionally indicate whether decoder settings were autodetected,
  282. * user customized.
  283. */
  284. #define CXL_DECODER_F_RAM BIT(0)
  285. #define CXL_DECODER_F_PMEM BIT(1)
  286. #define CXL_DECODER_F_TYPE2 BIT(2)
  287. #define CXL_DECODER_F_TYPE3 BIT(3)
  288. #define CXL_DECODER_F_LOCK BIT(4)
  289. #define CXL_DECODER_F_ENABLE BIT(5)
  290. #define CXL_DECODER_F_MASK GENMASK(5, 0)
  291. enum cxl_decoder_type {
  292. CXL_DECODER_DEVMEM = 2,
  293. CXL_DECODER_HOSTONLYMEM = 3,
  294. };
  295. /*
  296. * Current specification goes up to 8, double that seems a reasonable
  297. * software max for the foreseeable future
  298. */
  299. #define CXL_DECODER_MAX_INTERLEAVE 16
  300. #define CXL_QOS_CLASS_INVALID -1
  301. /**
  302. * struct cxl_decoder - Common CXL HDM Decoder Attributes
  303. * @dev: this decoder's device
  304. * @id: kernel device name id
  305. * @hpa_range: Host physical address range mapped by this decoder
  306. * @interleave_ways: number of cxl_dports in this decode
  307. * @interleave_granularity: data stride per dport
  308. * @target_type: accelerator vs expander (type2 vs type3) selector
  309. * @region: currently assigned region for this decoder
  310. * @flags: memory type capabilities and locking
  311. * @commit: device/decoder-type specific callback to commit settings to hw
  312. * @reset: device/decoder-type specific callback to reset hw settings
  313. */
  314. struct cxl_decoder {
  315. struct device dev;
  316. int id;
  317. struct range hpa_range;
  318. int interleave_ways;
  319. int interleave_granularity;
  320. enum cxl_decoder_type target_type;
  321. struct cxl_region *region;
  322. unsigned long flags;
  323. int (*commit)(struct cxl_decoder *cxld);
  324. void (*reset)(struct cxl_decoder *cxld);
  325. };
  326. /*
  327. * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
  328. * while cxld_unregister() is running
  329. */
  330. enum cxl_decoder_mode {
  331. CXL_DECODER_NONE,
  332. CXL_DECODER_RAM,
  333. CXL_DECODER_PMEM,
  334. CXL_DECODER_MIXED,
  335. CXL_DECODER_DEAD,
  336. };
  337. static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode)
  338. {
  339. static const char * const names[] = {
  340. [CXL_DECODER_NONE] = "none",
  341. [CXL_DECODER_RAM] = "ram",
  342. [CXL_DECODER_PMEM] = "pmem",
  343. [CXL_DECODER_MIXED] = "mixed",
  344. };
  345. if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED)
  346. return names[mode];
  347. return "mixed";
  348. }
  349. /*
  350. * Track whether this decoder is reserved for region autodiscovery, or
  351. * free for userspace provisioning.
  352. */
  353. enum cxl_decoder_state {
  354. CXL_DECODER_STATE_MANUAL,
  355. CXL_DECODER_STATE_AUTO,
  356. };
  357. /**
  358. * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
  359. * @cxld: base cxl_decoder_object
  360. * @dpa_res: actively claimed DPA span of this decoder
  361. * @skip: offset into @dpa_res where @cxld.hpa_range maps
  362. * @mode: which memory type / access-mode-partition this decoder targets
  363. * @state: autodiscovery state
  364. * @pos: interleave position in @cxld.region
  365. */
  366. struct cxl_endpoint_decoder {
  367. struct cxl_decoder cxld;
  368. struct resource *dpa_res;
  369. resource_size_t skip;
  370. enum cxl_decoder_mode mode;
  371. enum cxl_decoder_state state;
  372. int pos;
  373. };
  374. /**
  375. * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
  376. * @cxld: base cxl_decoder object
  377. * @nr_targets: number of elements in @target
  378. * @target: active ordered target list in current decoder configuration
  379. *
  380. * The 'switch' decoder type represents the decoder instances of cxl_port's that
  381. * route from the root of a CXL memory decode topology to the endpoints. They
  382. * come in two flavors, root-level decoders, statically defined by platform
  383. * firmware, and mid-level decoders, where interleave-granularity,
  384. * interleave-width, and the target list are mutable.
  385. */
  386. struct cxl_switch_decoder {
  387. struct cxl_decoder cxld;
  388. int nr_targets;
  389. struct cxl_dport *target[];
  390. };
  391. struct cxl_root_decoder;
  392. typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
  393. /**
  394. * struct cxl_root_decoder - Static platform CXL address decoder
  395. * @res: host / parent resource for region allocations
  396. * @region_id: region id for next region provisioning event
  397. * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
  398. * @platform_data: platform specific configuration data
  399. * @range_lock: sync region autodiscovery by address range
  400. * @qos_class: QoS performance class cookie
  401. * @cxlsd: base cxl switch decoder
  402. */
  403. struct cxl_root_decoder {
  404. struct resource *res;
  405. atomic_t region_id;
  406. cxl_hpa_to_spa_fn hpa_to_spa;
  407. void *platform_data;
  408. struct mutex range_lock;
  409. int qos_class;
  410. struct cxl_switch_decoder cxlsd;
  411. };
  412. /*
  413. * enum cxl_config_state - State machine for region configuration
  414. * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
  415. * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
  416. * changes to interleave_ways or interleave_granularity
  417. * @CXL_CONFIG_ACTIVE: All targets have been added the region is now
  418. * active
  419. * @CXL_CONFIG_RESET_PENDING: see commit_store()
  420. * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware
  421. */
  422. enum cxl_config_state {
  423. CXL_CONFIG_IDLE,
  424. CXL_CONFIG_INTERLEAVE_ACTIVE,
  425. CXL_CONFIG_ACTIVE,
  426. CXL_CONFIG_RESET_PENDING,
  427. CXL_CONFIG_COMMIT,
  428. };
  429. /**
  430. * struct cxl_region_params - region settings
  431. * @state: allow the driver to lockdown further parameter changes
  432. * @uuid: unique id for persistent regions
  433. * @interleave_ways: number of endpoints in the region
  434. * @interleave_granularity: capacity each endpoint contributes to a stripe
  435. * @res: allocated iomem capacity for this region
  436. * @targets: active ordered targets in current decoder configuration
  437. * @nr_targets: number of targets
  438. *
  439. * State transitions are protected by the cxl_region_rwsem
  440. */
  441. struct cxl_region_params {
  442. enum cxl_config_state state;
  443. uuid_t uuid;
  444. int interleave_ways;
  445. int interleave_granularity;
  446. struct resource *res;
  447. struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
  448. int nr_targets;
  449. };
  450. /*
  451. * Indicate whether this region has been assembled by autodetection or
  452. * userspace assembly. Prevent endpoint decoders outside of automatic
  453. * detection from being added to the region.
  454. */
  455. #define CXL_REGION_F_AUTO 0
  456. /*
  457. * Require that a committed region successfully complete a teardown once
  458. * any of its associated decoders have been torn down. This maintains
  459. * the commit state for the region since there are committed decoders,
  460. * but blocks cxl_region_probe().
  461. */
  462. #define CXL_REGION_F_NEEDS_RESET 1
  463. /**
  464. * struct cxl_region - CXL region
  465. * @dev: This region's device
  466. * @id: This region's id. Id is globally unique across all regions
  467. * @mode: Endpoint decoder allocation / access mode
  468. * @type: Endpoint decoder target type
  469. * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
  470. * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
  471. * @flags: Region state flags
  472. * @params: active + config params for the region
  473. * @coord: QoS access coordinates for the region
  474. * @memory_notifier: notifier for setting the access coordinates to node
  475. * @adist_notifier: notifier for calculating the abstract distance of node
  476. */
  477. struct cxl_region {
  478. struct device dev;
  479. int id;
  480. enum cxl_decoder_mode mode;
  481. enum cxl_decoder_type type;
  482. struct cxl_nvdimm_bridge *cxl_nvb;
  483. struct cxl_pmem_region *cxlr_pmem;
  484. unsigned long flags;
  485. struct cxl_region_params params;
  486. struct access_coordinate coord[ACCESS_COORDINATE_MAX];
  487. struct notifier_block memory_notifier;
  488. struct notifier_block adist_notifier;
  489. };
  490. struct cxl_nvdimm_bridge {
  491. int id;
  492. struct device dev;
  493. struct cxl_port *port;
  494. struct nvdimm_bus *nvdimm_bus;
  495. struct nvdimm_bus_descriptor nd_desc;
  496. };
  497. #define CXL_DEV_ID_LEN 19
  498. struct cxl_nvdimm {
  499. struct device dev;
  500. struct cxl_memdev *cxlmd;
  501. u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
  502. };
  503. struct cxl_pmem_region_mapping {
  504. struct cxl_memdev *cxlmd;
  505. struct cxl_nvdimm *cxl_nvd;
  506. u64 start;
  507. u64 size;
  508. int position;
  509. };
  510. struct cxl_pmem_region {
  511. struct device dev;
  512. struct cxl_region *cxlr;
  513. struct nd_region *nd_region;
  514. struct range hpa_range;
  515. int nr_mappings;
  516. struct cxl_pmem_region_mapping mapping[];
  517. };
  518. struct cxl_dax_region {
  519. struct device dev;
  520. struct cxl_region *cxlr;
  521. struct range hpa_range;
  522. };
  523. /**
  524. * struct cxl_port - logical collection of upstream port devices and
  525. * downstream port devices to construct a CXL memory
  526. * decode hierarchy.
  527. * @dev: this port's device
  528. * @uport_dev: PCI or platform device implementing the upstream port capability
  529. * @host_bridge: Shortcut to the platform attach point for this port
  530. * @id: id for port device-name
  531. * @dports: cxl_dport instances referenced by decoders
  532. * @endpoints: cxl_ep instances, endpoints that are a descendant of this port
  533. * @regions: cxl_region_ref instances, regions mapped by this port
  534. * @parent_dport: dport that points to this port in the parent
  535. * @decoder_ida: allocator for decoder ids
  536. * @reg_map: component and ras register mapping parameters
  537. * @nr_dports: number of entries in @dports
  538. * @hdm_end: track last allocated HDM decoder instance for allocation ordering
  539. * @commit_end: cursor to track highest committed decoder for commit ordering
  540. * @dead: last ep has been removed, force port re-creation
  541. * @depth: How deep this port is relative to the root. depth 0 is the root.
  542. * @cdat: Cached CDAT data
  543. * @cdat_available: Should a CDAT attribute be available in sysfs
  544. * @pci_latency: Upstream latency in picoseconds
  545. */
  546. struct cxl_port {
  547. struct device dev;
  548. struct device *uport_dev;
  549. struct device *host_bridge;
  550. int id;
  551. struct xarray dports;
  552. struct xarray endpoints;
  553. struct xarray regions;
  554. struct cxl_dport *parent_dport;
  555. struct ida decoder_ida;
  556. struct cxl_register_map reg_map;
  557. int nr_dports;
  558. int hdm_end;
  559. int commit_end;
  560. bool dead;
  561. unsigned int depth;
  562. struct cxl_cdat {
  563. void *table;
  564. size_t length;
  565. } cdat;
  566. bool cdat_available;
  567. long pci_latency;
  568. };
  569. /**
  570. * struct cxl_root - logical collection of root cxl_port items
  571. *
  572. * @port: cxl_port member
  573. * @ops: cxl root operations
  574. */
  575. struct cxl_root {
  576. struct cxl_port port;
  577. const struct cxl_root_ops *ops;
  578. };
  579. static inline struct cxl_root *
  580. to_cxl_root(const struct cxl_port *port)
  581. {
  582. return container_of(port, struct cxl_root, port);
  583. }
  584. struct cxl_root_ops {
  585. int (*qos_class)(struct cxl_root *cxl_root,
  586. struct access_coordinate *coord, int entries,
  587. int *qos_class);
  588. };
  589. static inline struct cxl_dport *
  590. cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
  591. {
  592. return xa_load(&port->dports, (unsigned long)dport_dev);
  593. }
  594. struct cxl_rcrb_info {
  595. resource_size_t base;
  596. u16 aer_cap;
  597. };
  598. /**
  599. * struct cxl_dport - CXL downstream port
  600. * @dport_dev: PCI bridge or firmware device representing the downstream link
  601. * @reg_map: component and ras register mapping parameters
  602. * @port_id: unique hardware identifier for dport in decoder target list
  603. * @rcrb: Data about the Root Complex Register Block layout
  604. * @rch: Indicate whether this dport was enumerated in RCH or VH mode
  605. * @port: reference to cxl_port that contains this downstream port
  606. * @regs: Dport parsed register blocks
  607. * @coord: access coordinates (bandwidth and latency performance attributes)
  608. * @link_latency: calculated PCIe downstream latency
  609. */
  610. struct cxl_dport {
  611. struct device *dport_dev;
  612. struct cxl_register_map reg_map;
  613. int port_id;
  614. struct cxl_rcrb_info rcrb;
  615. bool rch;
  616. struct cxl_port *port;
  617. struct cxl_regs regs;
  618. struct access_coordinate coord[ACCESS_COORDINATE_MAX];
  619. long link_latency;
  620. };
  621. /**
  622. * struct cxl_ep - track an endpoint's interest in a port
  623. * @ep: device that hosts a generic CXL endpoint (expander or accelerator)
  624. * @dport: which dport routes to this endpoint on @port
  625. * @next: cxl switch port across the link attached to @dport NULL if
  626. * attached to an endpoint
  627. */
  628. struct cxl_ep {
  629. struct device *ep;
  630. struct cxl_dport *dport;
  631. struct cxl_port *next;
  632. };
  633. /**
  634. * struct cxl_region_ref - track a region's interest in a port
  635. * @port: point in topology to install this reference
  636. * @decoder: decoder assigned for @region in @port
  637. * @region: region for this reference
  638. * @endpoints: cxl_ep references for region members beneath @port
  639. * @nr_targets_set: track how many targets have been programmed during setup
  640. * @nr_eps: number of endpoints beneath @port
  641. * @nr_targets: number of distinct targets needed to reach @nr_eps
  642. */
  643. struct cxl_region_ref {
  644. struct cxl_port *port;
  645. struct cxl_decoder *decoder;
  646. struct cxl_region *region;
  647. struct xarray endpoints;
  648. int nr_targets_set;
  649. int nr_eps;
  650. int nr_targets;
  651. };
  652. /*
  653. * The platform firmware device hosting the root is also the top of the
  654. * CXL port topology. All other CXL ports have another CXL port as their
  655. * parent and their ->uport_dev / host device is out-of-line of the port
  656. * ancestry.
  657. */
  658. static inline bool is_cxl_root(struct cxl_port *port)
  659. {
  660. return port->uport_dev == port->dev.parent;
  661. }
  662. int cxl_num_decoders_committed(struct cxl_port *port);
  663. bool is_cxl_port(const struct device *dev);
  664. struct cxl_port *to_cxl_port(const struct device *dev);
  665. void cxl_port_commit_reap(struct cxl_decoder *cxld);
  666. struct pci_bus;
  667. int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
  668. struct pci_bus *bus);
  669. struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
  670. struct cxl_port *devm_cxl_add_port(struct device *host,
  671. struct device *uport_dev,
  672. resource_size_t component_reg_phys,
  673. struct cxl_dport *parent_dport);
  674. struct cxl_root *devm_cxl_add_root(struct device *host,
  675. const struct cxl_root_ops *ops);
  676. struct cxl_root *find_cxl_root(struct cxl_port *port);
  677. void put_cxl_root(struct cxl_root *cxl_root);
  678. DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
  679. DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
  680. int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
  681. void cxl_bus_rescan(void);
  682. void cxl_bus_drain(void);
  683. struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
  684. struct cxl_dport **dport);
  685. struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
  686. struct cxl_dport **dport);
  687. bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
  688. struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
  689. struct device *dport, int port_id,
  690. resource_size_t component_reg_phys);
  691. struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
  692. struct device *dport_dev, int port_id,
  693. resource_size_t rcrb);
  694. #ifdef CONFIG_PCIEAER_CXL
  695. void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport);
  696. void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host);
  697. #else
  698. static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
  699. struct device *host) { }
  700. #endif
  701. struct cxl_decoder *to_cxl_decoder(struct device *dev);
  702. struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
  703. struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
  704. struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
  705. bool is_root_decoder(struct device *dev);
  706. bool is_switch_decoder(struct device *dev);
  707. bool is_endpoint_decoder(struct device *dev);
  708. struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
  709. unsigned int nr_targets);
  710. struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
  711. unsigned int nr_targets);
  712. int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
  713. struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
  714. int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
  715. int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
  716. static inline int cxl_root_decoder_autoremove(struct device *host,
  717. struct cxl_root_decoder *cxlrd)
  718. {
  719. return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld);
  720. }
  721. int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
  722. /**
  723. * struct cxl_endpoint_dvsec_info - Cached DVSEC info
  724. * @mem_enabled: cached value of mem_enabled in the DVSEC at init time
  725. * @ranges: Number of active HDM ranges this device uses.
  726. * @port: endpoint port associated with this info instance
  727. * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
  728. */
  729. struct cxl_endpoint_dvsec_info {
  730. bool mem_enabled;
  731. int ranges;
  732. struct cxl_port *port;
  733. struct range dvsec_range[2];
  734. };
  735. struct cxl_hdm;
  736. struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
  737. struct cxl_endpoint_dvsec_info *info);
  738. int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
  739. struct cxl_endpoint_dvsec_info *info);
  740. int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
  741. int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
  742. struct cxl_endpoint_dvsec_info *info);
  743. bool is_cxl_region(struct device *dev);
  744. extern struct bus_type cxl_bus_type;
  745. struct cxl_driver {
  746. const char *name;
  747. int (*probe)(struct device *dev);
  748. void (*remove)(struct device *dev);
  749. struct device_driver drv;
  750. int id;
  751. };
  752. #define to_cxl_drv(__drv) container_of_const(__drv, struct cxl_driver, drv)
  753. int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
  754. const char *modname);
  755. #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
  756. void cxl_driver_unregister(struct cxl_driver *cxl_drv);
  757. #define module_cxl_driver(__cxl_driver) \
  758. module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister)
  759. #define CXL_DEVICE_NVDIMM_BRIDGE 1
  760. #define CXL_DEVICE_NVDIMM 2
  761. #define CXL_DEVICE_PORT 3
  762. #define CXL_DEVICE_ROOT 4
  763. #define CXL_DEVICE_MEMORY_EXPANDER 5
  764. #define CXL_DEVICE_REGION 6
  765. #define CXL_DEVICE_PMEM_REGION 7
  766. #define CXL_DEVICE_DAX_REGION 8
  767. #define CXL_DEVICE_PMU 9
  768. #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
  769. #define CXL_MODALIAS_FMT "cxl:t%d"
  770. struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
  771. struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
  772. struct cxl_port *port);
  773. struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
  774. bool is_cxl_nvdimm(struct device *dev);
  775. bool is_cxl_nvdimm_bridge(struct device *dev);
  776. int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
  777. struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
  778. #ifdef CONFIG_CXL_REGION
  779. bool is_cxl_pmem_region(struct device *dev);
  780. struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
  781. int cxl_add_to_region(struct cxl_port *root,
  782. struct cxl_endpoint_decoder *cxled);
  783. struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
  784. #else
  785. static inline bool is_cxl_pmem_region(struct device *dev)
  786. {
  787. return false;
  788. }
  789. static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
  790. {
  791. return NULL;
  792. }
  793. static inline int cxl_add_to_region(struct cxl_port *root,
  794. struct cxl_endpoint_decoder *cxled)
  795. {
  796. return 0;
  797. }
  798. static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
  799. {
  800. return NULL;
  801. }
  802. #endif
  803. void cxl_endpoint_parse_cdat(struct cxl_port *port);
  804. void cxl_switch_parse_cdat(struct cxl_port *port);
  805. int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
  806. struct access_coordinate *coord);
  807. void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
  808. struct cxl_endpoint_decoder *cxled);
  809. void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr);
  810. void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
  811. void cxl_coordinates_combine(struct access_coordinate *out,
  812. struct access_coordinate *c1,
  813. struct access_coordinate *c2);
  814. bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
  815. /*
  816. * Unit test builds overrides this to __weak, find the 'strong' version
  817. * of these symbols in tools/testing/cxl/.
  818. */
  819. #ifndef __mock
  820. #define __mock static
  821. #endif
  822. #endif /* __CXL_H__ */