acpi.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
  3. #include <linux/platform_device.h>
  4. #include <linux/module.h>
  5. #include <linux/device.h>
  6. #include <linux/kernel.h>
  7. #include <linux/acpi.h>
  8. #include <linux/pci.h>
  9. #include <linux/node.h>
  10. #include <asm/div64.h>
  11. #include "cxlpci.h"
  12. #include "cxl.h"
  13. #define CXL_RCRB_SIZE SZ_8K
  14. struct cxl_cxims_data {
  15. int nr_maps;
  16. u64 xormaps[] __counted_by(nr_maps);
  17. };
  18. static const guid_t acpi_cxl_qtg_id_guid =
  19. GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
  20. 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
  21. static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
  22. {
  23. struct cxl_cxims_data *cximsd = cxlrd->platform_data;
  24. int hbiw = cxlrd->cxlsd.nr_targets;
  25. u64 val;
  26. int pos;
  27. /* No xormaps for host bridge interleave ways of 1 or 3 */
  28. if (hbiw == 1 || hbiw == 3)
  29. return hpa;
  30. /*
  31. * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
  32. * the position bit to its value before the xormap was applied at
  33. * HPA->DPA translation.
  34. *
  35. * pos is the lowest set bit in an XORMAP
  36. * val is the XORALLBITS(HPA & XORMAP)
  37. *
  38. * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
  39. * as an operation that outputs a single bit by XORing all the
  40. * bits in the input (hpa & xormap). Implement XORALLBITS using
  41. * hweight64(). If the hamming weight is even the XOR of those
  42. * bits results in val==0, if odd the XOR result is val==1.
  43. */
  44. for (int i = 0; i < cximsd->nr_maps; i++) {
  45. if (!cximsd->xormaps[i])
  46. continue;
  47. pos = __ffs(cximsd->xormaps[i]);
  48. val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
  49. hpa = (hpa & ~(1ULL << pos)) | (val << pos);
  50. }
  51. return hpa;
  52. }
  53. struct cxl_cxims_context {
  54. struct device *dev;
  55. struct cxl_root_decoder *cxlrd;
  56. };
  57. static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
  58. const unsigned long end)
  59. {
  60. struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header;
  61. struct cxl_cxims_context *ctx = arg;
  62. struct cxl_root_decoder *cxlrd = ctx->cxlrd;
  63. struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
  64. struct device *dev = ctx->dev;
  65. struct cxl_cxims_data *cximsd;
  66. unsigned int hbig, nr_maps;
  67. int rc;
  68. rc = eig_to_granularity(cxims->hbig, &hbig);
  69. if (rc)
  70. return rc;
  71. /* Does this CXIMS entry apply to the given CXL Window? */
  72. if (hbig != cxld->interleave_granularity)
  73. return 0;
  74. /* IW 1,3 do not use xormaps and skip this parsing entirely */
  75. if (is_power_of_2(cxld->interleave_ways))
  76. /* 2, 4, 8, 16 way */
  77. nr_maps = ilog2(cxld->interleave_ways);
  78. else
  79. /* 6, 12 way */
  80. nr_maps = ilog2(cxld->interleave_ways / 3);
  81. if (cxims->nr_xormaps < nr_maps) {
  82. dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n",
  83. cxims->nr_xormaps, nr_maps);
  84. return -ENXIO;
  85. }
  86. cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps),
  87. GFP_KERNEL);
  88. if (!cximsd)
  89. return -ENOMEM;
  90. cximsd->nr_maps = nr_maps;
  91. memcpy(cximsd->xormaps, cxims->xormap_list,
  92. nr_maps * sizeof(*cximsd->xormaps));
  93. cxlrd->platform_data = cximsd;
  94. return 0;
  95. }
  96. static unsigned long cfmws_to_decoder_flags(int restrictions)
  97. {
  98. unsigned long flags = CXL_DECODER_F_ENABLE;
  99. if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
  100. flags |= CXL_DECODER_F_TYPE2;
  101. if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
  102. flags |= CXL_DECODER_F_TYPE3;
  103. if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
  104. flags |= CXL_DECODER_F_RAM;
  105. if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
  106. flags |= CXL_DECODER_F_PMEM;
  107. if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
  108. flags |= CXL_DECODER_F_LOCK;
  109. return flags;
  110. }
  111. static int cxl_acpi_cfmws_verify(struct device *dev,
  112. struct acpi_cedt_cfmws *cfmws)
  113. {
  114. int rc, expected_len;
  115. unsigned int ways;
  116. if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO &&
  117. cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
  118. dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n",
  119. cfmws->interleave_arithmetic);
  120. return -EINVAL;
  121. }
  122. if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
  123. dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
  124. return -EINVAL;
  125. }
  126. if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
  127. dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
  128. return -EINVAL;
  129. }
  130. rc = eiw_to_ways(cfmws->interleave_ways, &ways);
  131. if (rc) {
  132. dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
  133. cfmws->interleave_ways);
  134. return -EINVAL;
  135. }
  136. expected_len = struct_size(cfmws, interleave_targets, ways);
  137. if (cfmws->header.length < expected_len) {
  138. dev_err(dev, "CFMWS length %d less than expected %d\n",
  139. cfmws->header.length, expected_len);
  140. return -EINVAL;
  141. }
  142. if (cfmws->header.length > expected_len)
  143. dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
  144. cfmws->header.length, expected_len);
  145. return 0;
  146. }
  147. /*
  148. * Note, @dev must be the first member, see 'struct cxl_chbs_context'
  149. * and mock_acpi_table_parse_cedt()
  150. */
  151. struct cxl_cfmws_context {
  152. struct device *dev;
  153. struct cxl_port *root_port;
  154. struct resource *cxl_res;
  155. int id;
  156. };
  157. /**
  158. * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
  159. * @handle: ACPI handle
  160. * @coord: performance access coordinates
  161. * @entries: number of QTG IDs to return
  162. * @qos_class: int array provided by caller to return QTG IDs
  163. *
  164. * Return: number of QTG IDs returned, or -errno for errors
  165. *
  166. * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
  167. * the QTG IDs that are suitable for the performance point in order of most
  168. * suitable to least suitable. Write back array of QTG IDs and return the
  169. * actual number of QTG IDs written back.
  170. */
  171. static int
  172. cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
  173. int entries, int *qos_class)
  174. {
  175. union acpi_object *out_obj, *out_buf, *obj;
  176. union acpi_object in_array[4] = {
  177. [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
  178. [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
  179. [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
  180. [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
  181. };
  182. union acpi_object in_obj = {
  183. .package = {
  184. .type = ACPI_TYPE_PACKAGE,
  185. .count = 4,
  186. .elements = in_array,
  187. },
  188. };
  189. int count, pkg_entries, i;
  190. u16 max_qtg;
  191. int rc;
  192. if (!entries)
  193. return -EINVAL;
  194. out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
  195. if (!out_obj)
  196. return -ENXIO;
  197. if (out_obj->type != ACPI_TYPE_PACKAGE) {
  198. rc = -ENXIO;
  199. goto out;
  200. }
  201. /* Check Max QTG ID */
  202. obj = &out_obj->package.elements[0];
  203. if (obj->type != ACPI_TYPE_INTEGER) {
  204. rc = -ENXIO;
  205. goto out;
  206. }
  207. max_qtg = obj->integer.value;
  208. /* It's legal to have 0 QTG entries */
  209. pkg_entries = out_obj->package.count;
  210. if (pkg_entries <= 1) {
  211. rc = 0;
  212. goto out;
  213. }
  214. /* Retrieve QTG IDs package */
  215. obj = &out_obj->package.elements[1];
  216. if (obj->type != ACPI_TYPE_PACKAGE) {
  217. rc = -ENXIO;
  218. goto out;
  219. }
  220. pkg_entries = obj->package.count;
  221. count = min(entries, pkg_entries);
  222. for (i = 0; i < count; i++) {
  223. u16 qtg_id;
  224. out_buf = &obj->package.elements[i];
  225. if (out_buf->type != ACPI_TYPE_INTEGER) {
  226. rc = -ENXIO;
  227. goto out;
  228. }
  229. qtg_id = out_buf->integer.value;
  230. if (qtg_id > max_qtg)
  231. pr_warn("QTG ID %u greater than MAX %u\n",
  232. qtg_id, max_qtg);
  233. qos_class[i] = qtg_id;
  234. }
  235. rc = count;
  236. out:
  237. ACPI_FREE(out_obj);
  238. return rc;
  239. }
  240. static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
  241. struct access_coordinate *coord, int entries,
  242. int *qos_class)
  243. {
  244. struct device *dev = cxl_root->port.uport_dev;
  245. acpi_handle handle;
  246. if (!dev_is_platform(dev))
  247. return -ENODEV;
  248. handle = ACPI_HANDLE(dev);
  249. if (!handle)
  250. return -ENODEV;
  251. return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
  252. }
  253. static const struct cxl_root_ops acpi_root_ops = {
  254. .qos_class = cxl_acpi_qos_class,
  255. };
  256. static void del_cxl_resource(struct resource *res)
  257. {
  258. if (!res)
  259. return;
  260. kfree(res->name);
  261. kfree(res);
  262. }
  263. static struct resource *alloc_cxl_resource(resource_size_t base,
  264. resource_size_t n, int id)
  265. {
  266. struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
  267. if (!res)
  268. return NULL;
  269. res->start = base;
  270. res->end = base + n - 1;
  271. res->flags = IORESOURCE_MEM;
  272. res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id);
  273. if (!res->name)
  274. return NULL;
  275. return no_free_ptr(res);
  276. }
  277. static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
  278. {
  279. int rc = insert_resource(parent, res);
  280. if (rc)
  281. del_cxl_resource(res);
  282. return rc;
  283. }
  284. DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
  285. if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
  286. DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
  287. static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
  288. struct cxl_cfmws_context *ctx)
  289. {
  290. int target_map[CXL_DECODER_MAX_INTERLEAVE];
  291. struct cxl_port *root_port = ctx->root_port;
  292. struct cxl_cxims_context cxims_ctx;
  293. struct device *dev = ctx->dev;
  294. struct cxl_decoder *cxld;
  295. unsigned int ways, i, ig;
  296. int rc;
  297. rc = cxl_acpi_cfmws_verify(dev, cfmws);
  298. if (rc)
  299. return rc;
  300. rc = eiw_to_ways(cfmws->interleave_ways, &ways);
  301. if (rc)
  302. return rc;
  303. rc = eig_to_granularity(cfmws->granularity, &ig);
  304. if (rc)
  305. return rc;
  306. for (i = 0; i < ways; i++)
  307. target_map[i] = cfmws->interleave_targets[i];
  308. struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
  309. cfmws->base_hpa, cfmws->window_size, ctx->id++);
  310. if (!res)
  311. return -ENOMEM;
  312. /* add to the local resource tracking to establish a sort order */
  313. rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res));
  314. if (rc)
  315. return rc;
  316. struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
  317. cxl_root_decoder_alloc(root_port, ways);
  318. if (IS_ERR(cxlrd))
  319. return PTR_ERR(cxlrd);
  320. cxld = &cxlrd->cxlsd.cxld;
  321. cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
  322. cxld->target_type = CXL_DECODER_HOSTONLYMEM;
  323. cxld->hpa_range = (struct range) {
  324. .start = cfmws->base_hpa,
  325. .end = cfmws->base_hpa + cfmws->window_size - 1,
  326. };
  327. cxld->interleave_ways = ways;
  328. /*
  329. * Minimize the x1 granularity to advertise support for any
  330. * valid region granularity
  331. */
  332. if (ways == 1)
  333. ig = CXL_DECODER_MIN_GRANULARITY;
  334. cxld->interleave_granularity = ig;
  335. if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
  336. if (ways != 1 && ways != 3) {
  337. cxims_ctx = (struct cxl_cxims_context) {
  338. .dev = dev,
  339. .cxlrd = cxlrd,
  340. };
  341. rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
  342. cxl_parse_cxims, &cxims_ctx);
  343. if (rc < 0)
  344. return rc;
  345. if (!cxlrd->platform_data) {
  346. dev_err(dev, "No CXIMS for HBIG %u\n", ig);
  347. return -EINVAL;
  348. }
  349. }
  350. }
  351. cxlrd->qos_class = cfmws->qtg_id;
  352. if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
  353. cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
  354. rc = cxl_decoder_add(cxld, target_map);
  355. if (rc)
  356. return rc;
  357. return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
  358. }
  359. static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
  360. const unsigned long end)
  361. {
  362. struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
  363. struct cxl_cfmws_context *ctx = arg;
  364. struct device *dev = ctx->dev;
  365. int rc;
  366. rc = __cxl_parse_cfmws(cfmws, ctx);
  367. if (rc)
  368. dev_err(dev,
  369. "Failed to add decode range: [%#llx - %#llx] (%d)\n",
  370. cfmws->base_hpa,
  371. cfmws->base_hpa + cfmws->window_size - 1, rc);
  372. else
  373. dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
  374. phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
  375. cfmws->base_hpa + cfmws->window_size - 1);
  376. /* never fail cxl_acpi load for a single window failure */
  377. return 0;
  378. }
  379. __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
  380. struct device *dev)
  381. {
  382. struct acpi_device *adev = to_acpi_device(dev);
  383. if (!acpi_pci_find_root(adev->handle))
  384. return NULL;
  385. if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
  386. return adev;
  387. return NULL;
  388. }
  389. /* Note, @dev is used by mock_acpi_table_parse_cedt() */
  390. struct cxl_chbs_context {
  391. struct device *dev;
  392. unsigned long long uid;
  393. resource_size_t base;
  394. u32 cxl_version;
  395. int nr_versions;
  396. u32 saved_version;
  397. };
  398. static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
  399. const unsigned long end)
  400. {
  401. struct cxl_chbs_context *ctx = arg;
  402. struct acpi_cedt_chbs *chbs;
  403. chbs = (struct acpi_cedt_chbs *) header;
  404. if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
  405. chbs->length != CXL_RCRB_SIZE)
  406. return 0;
  407. if (!chbs->base)
  408. return 0;
  409. if (ctx->saved_version != chbs->cxl_version) {
  410. /*
  411. * cxl_version cannot be overwritten before the next two
  412. * checks, then use saved_version
  413. */
  414. ctx->saved_version = chbs->cxl_version;
  415. ctx->nr_versions++;
  416. }
  417. if (ctx->base != CXL_RESOURCE_NONE)
  418. return 0;
  419. if (ctx->uid != chbs->uid)
  420. return 0;
  421. ctx->cxl_version = chbs->cxl_version;
  422. ctx->base = chbs->base;
  423. return 0;
  424. }
  425. static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
  426. struct cxl_chbs_context *ctx)
  427. {
  428. unsigned long long uid;
  429. int rc;
  430. rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
  431. if (rc != AE_OK) {
  432. dev_err(dev, "unable to retrieve _UID\n");
  433. return -ENOENT;
  434. }
  435. dev_dbg(dev, "UID found: %lld\n", uid);
  436. *ctx = (struct cxl_chbs_context) {
  437. .dev = dev,
  438. .uid = uid,
  439. .base = CXL_RESOURCE_NONE,
  440. .cxl_version = UINT_MAX,
  441. .saved_version = UINT_MAX,
  442. };
  443. acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx);
  444. if (ctx->nr_versions > 1) {
  445. /*
  446. * Disclaim eRCD support given some component register may
  447. * only be found via CHBCR
  448. */
  449. dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy.");
  450. }
  451. return 0;
  452. }
  453. static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
  454. {
  455. struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
  456. u32 uid;
  457. if (kstrtou32(acpi_device_uid(hb), 0, &uid))
  458. return -EINVAL;
  459. return acpi_get_genport_coordinates(uid, dport->coord);
  460. }
  461. static int add_host_bridge_dport(struct device *match, void *arg)
  462. {
  463. int ret;
  464. acpi_status rc;
  465. struct device *bridge;
  466. struct cxl_dport *dport;
  467. struct cxl_chbs_context ctx;
  468. struct acpi_pci_root *pci_root;
  469. struct cxl_port *root_port = arg;
  470. struct device *host = root_port->dev.parent;
  471. struct acpi_device *hb = to_cxl_host_bridge(host, match);
  472. if (!hb)
  473. return 0;
  474. rc = cxl_get_chbs(match, hb, &ctx);
  475. if (rc)
  476. return rc;
  477. if (ctx.cxl_version == UINT_MAX) {
  478. dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
  479. ctx.uid);
  480. return 0;
  481. }
  482. if (ctx.base == CXL_RESOURCE_NONE) {
  483. dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
  484. ctx.uid);
  485. return 0;
  486. }
  487. pci_root = acpi_pci_find_root(hb->handle);
  488. bridge = pci_root->bus->bridge;
  489. /*
  490. * In RCH mode, bind the component regs base to the dport. In
  491. * VH mode it will be bound to the CXL host bridge's port
  492. * object later in add_host_bridge_uport().
  493. */
  494. if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
  495. dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
  496. &ctx.base);
  497. dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
  498. ctx.base);
  499. } else {
  500. dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
  501. CXL_RESOURCE_NONE);
  502. }
  503. if (IS_ERR(dport))
  504. return PTR_ERR(dport);
  505. ret = get_genport_coordinates(match, dport);
  506. if (ret)
  507. dev_dbg(match, "Failed to get generic port perf coordinates.\n");
  508. return 0;
  509. }
  510. /*
  511. * A host bridge is a dport to a CFMWS decode and it is a uport to the
  512. * dport (PCIe Root Ports) in the host bridge.
  513. */
  514. static int add_host_bridge_uport(struct device *match, void *arg)
  515. {
  516. struct cxl_port *root_port = arg;
  517. struct device *host = root_port->dev.parent;
  518. struct acpi_device *hb = to_cxl_host_bridge(host, match);
  519. struct acpi_pci_root *pci_root;
  520. struct cxl_dport *dport;
  521. struct cxl_port *port;
  522. struct device *bridge;
  523. struct cxl_chbs_context ctx;
  524. resource_size_t component_reg_phys;
  525. int rc;
  526. if (!hb)
  527. return 0;
  528. pci_root = acpi_pci_find_root(hb->handle);
  529. bridge = pci_root->bus->bridge;
  530. dport = cxl_find_dport_by_dev(root_port, bridge);
  531. if (!dport) {
  532. dev_dbg(host, "host bridge expected and not found\n");
  533. return 0;
  534. }
  535. if (dport->rch) {
  536. dev_info(bridge, "host supports CXL (restricted)\n");
  537. return 0;
  538. }
  539. rc = cxl_get_chbs(match, hb, &ctx);
  540. if (rc)
  541. return rc;
  542. if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
  543. dev_warn(bridge,
  544. "CXL CHBS version mismatch, skip port registration\n");
  545. return 0;
  546. }
  547. component_reg_phys = ctx.base;
  548. if (component_reg_phys != CXL_RESOURCE_NONE)
  549. dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
  550. ctx.uid, &component_reg_phys);
  551. rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
  552. if (rc)
  553. return rc;
  554. port = devm_cxl_add_port(host, bridge, component_reg_phys, dport);
  555. if (IS_ERR(port))
  556. return PTR_ERR(port);
  557. dev_info(bridge, "host supports CXL\n");
  558. return 0;
  559. }
  560. static int add_root_nvdimm_bridge(struct device *match, void *data)
  561. {
  562. struct cxl_decoder *cxld;
  563. struct cxl_port *root_port = data;
  564. struct cxl_nvdimm_bridge *cxl_nvb;
  565. struct device *host = root_port->dev.parent;
  566. if (!is_root_decoder(match))
  567. return 0;
  568. cxld = to_cxl_decoder(match);
  569. if (!(cxld->flags & CXL_DECODER_F_PMEM))
  570. return 0;
  571. cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
  572. if (IS_ERR(cxl_nvb)) {
  573. dev_dbg(host, "failed to register pmem\n");
  574. return PTR_ERR(cxl_nvb);
  575. }
  576. dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
  577. dev_name(&cxl_nvb->dev));
  578. return 1;
  579. }
  580. static struct lock_class_key cxl_root_key;
  581. static void cxl_acpi_lock_reset_class(void *dev)
  582. {
  583. device_lock_reset_class(dev);
  584. }
  585. static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
  586. {
  587. priv->desc = (unsigned long) pub;
  588. }
  589. static struct resource *cxl_get_public_resource(struct resource *priv)
  590. {
  591. return (struct resource *) priv->desc;
  592. }
  593. static void remove_cxl_resources(void *data)
  594. {
  595. struct resource *res, *next, *cxl = data;
  596. for (res = cxl->child; res; res = next) {
  597. struct resource *victim = cxl_get_public_resource(res);
  598. next = res->sibling;
  599. remove_resource(res);
  600. if (victim) {
  601. remove_resource(victim);
  602. kfree(victim);
  603. }
  604. del_cxl_resource(res);
  605. }
  606. }
  607. /**
  608. * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
  609. * @cxl_res: A standalone resource tree where each CXL window is a sibling
  610. *
  611. * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
  612. * expanding its boundaries to ensure that any conflicting resources become
  613. * children. If a window is expanded it may then conflict with a another window
  614. * entry and require the window to be truncated or trimmed. Consider this
  615. * situation:
  616. *
  617. * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
  618. * |--------------- "System RAM" -------------|
  619. *
  620. * ...where platform firmware has established as System RAM resource across 2
  621. * windows, but has left some portion of window 1 for dynamic CXL region
  622. * provisioning. In this case "Window 0" will span the entirety of the "System
  623. * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
  624. * of that "System RAM" resource.
  625. */
  626. static int add_cxl_resources(struct resource *cxl_res)
  627. {
  628. struct resource *res, *new, *next;
  629. for (res = cxl_res->child; res; res = next) {
  630. new = kzalloc(sizeof(*new), GFP_KERNEL);
  631. if (!new)
  632. return -ENOMEM;
  633. new->name = res->name;
  634. new->start = res->start;
  635. new->end = res->end;
  636. new->flags = IORESOURCE_MEM;
  637. new->desc = IORES_DESC_CXL;
  638. /*
  639. * Record the public resource in the private cxl_res tree for
  640. * later removal.
  641. */
  642. cxl_set_public_resource(res, new);
  643. insert_resource_expand_to_fit(&iomem_resource, new);
  644. next = res->sibling;
  645. while (next && resource_overlaps(new, next)) {
  646. if (resource_contains(new, next)) {
  647. struct resource *_next = next->sibling;
  648. remove_resource(next);
  649. del_cxl_resource(next);
  650. next = _next;
  651. } else
  652. next->start = new->end + 1;
  653. }
  654. }
  655. return 0;
  656. }
  657. static int pair_cxl_resource(struct device *dev, void *data)
  658. {
  659. struct resource *cxl_res = data;
  660. struct resource *p;
  661. if (!is_root_decoder(dev))
  662. return 0;
  663. for (p = cxl_res->child; p; p = p->sibling) {
  664. struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
  665. struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
  666. struct resource res = {
  667. .start = cxld->hpa_range.start,
  668. .end = cxld->hpa_range.end,
  669. .flags = IORESOURCE_MEM,
  670. };
  671. if (resource_contains(p, &res)) {
  672. cxlrd->res = cxl_get_public_resource(p);
  673. break;
  674. }
  675. }
  676. return 0;
  677. }
  678. static int cxl_acpi_probe(struct platform_device *pdev)
  679. {
  680. int rc;
  681. struct resource *cxl_res;
  682. struct cxl_root *cxl_root;
  683. struct cxl_port *root_port;
  684. struct device *host = &pdev->dev;
  685. struct acpi_device *adev = ACPI_COMPANION(host);
  686. struct cxl_cfmws_context ctx;
  687. device_lock_set_class(&pdev->dev, &cxl_root_key);
  688. rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
  689. &pdev->dev);
  690. if (rc)
  691. return rc;
  692. cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
  693. if (!cxl_res)
  694. return -ENOMEM;
  695. cxl_res->name = "CXL mem";
  696. cxl_res->start = 0;
  697. cxl_res->end = -1;
  698. cxl_res->flags = IORESOURCE_MEM;
  699. cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
  700. if (IS_ERR(cxl_root))
  701. return PTR_ERR(cxl_root);
  702. root_port = &cxl_root->port;
  703. rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
  704. add_host_bridge_dport);
  705. if (rc < 0)
  706. return rc;
  707. rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
  708. if (rc)
  709. return rc;
  710. ctx = (struct cxl_cfmws_context) {
  711. .dev = host,
  712. .root_port = root_port,
  713. .cxl_res = cxl_res,
  714. };
  715. rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
  716. if (rc < 0)
  717. return -ENXIO;
  718. rc = add_cxl_resources(cxl_res);
  719. if (rc)
  720. return rc;
  721. /*
  722. * Populate the root decoders with their related iomem resource,
  723. * if present
  724. */
  725. device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
  726. /*
  727. * Root level scanned with host-bridge as dports, now scan host-bridges
  728. * for their role as CXL uports to their CXL-capable PCIe Root Ports.
  729. */
  730. rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
  731. add_host_bridge_uport);
  732. if (rc < 0)
  733. return rc;
  734. if (IS_ENABLED(CONFIG_CXL_PMEM))
  735. rc = device_for_each_child(&root_port->dev, root_port,
  736. add_root_nvdimm_bridge);
  737. if (rc < 0)
  738. return rc;
  739. /* In case PCI is scanned before ACPI re-trigger memdev attach */
  740. cxl_bus_rescan();
  741. return 0;
  742. }
  743. static const struct acpi_device_id cxl_acpi_ids[] = {
  744. { "ACPI0017" },
  745. { },
  746. };
  747. MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
  748. static const struct platform_device_id cxl_test_ids[] = {
  749. { "cxl_acpi" },
  750. { },
  751. };
  752. MODULE_DEVICE_TABLE(platform, cxl_test_ids);
  753. static struct platform_driver cxl_acpi_driver = {
  754. .probe = cxl_acpi_probe,
  755. .driver = {
  756. .name = KBUILD_MODNAME,
  757. .acpi_match_table = cxl_acpi_ids,
  758. },
  759. .id_table = cxl_test_ids,
  760. };
  761. static int __init cxl_acpi_init(void)
  762. {
  763. return platform_driver_register(&cxl_acpi_driver);
  764. }
  765. static void __exit cxl_acpi_exit(void)
  766. {
  767. platform_driver_unregister(&cxl_acpi_driver);
  768. cxl_bus_drain();
  769. }
  770. /* load before dax_hmem sees 'Soft Reserved' CXL ranges */
  771. subsys_initcall(cxl_acpi_init);
  772. /*
  773. * Arrange for host-bridge ports to be active synchronous with
  774. * cxl_acpi_probe() exit.
  775. */
  776. MODULE_SOFTDEP("pre: cxl_port");
  777. module_exit(cxl_acpi_exit);
  778. MODULE_DESCRIPTION("CXL ACPI: Platform Support");
  779. MODULE_LICENSE("GPL v2");
  780. MODULE_IMPORT_NS(CXL);
  781. MODULE_IMPORT_NS(ACPI);