port.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
  3. #include <linux/platform_device.h>
  4. #include <linux/memregion.h>
  5. #include <linux/workqueue.h>
  6. #include <linux/debugfs.h>
  7. #include <linux/device.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/slab.h>
  11. #include <linux/idr.h>
  12. #include <linux/node.h>
  13. #include <cxl/einj.h>
  14. #include <cxlmem.h>
  15. #include <cxlpci.h>
  16. #include <cxl.h>
  17. #include "core.h"
  18. /**
  19. * DOC: cxl core
  20. *
  21. * The CXL core provides a set of interfaces that can be consumed by CXL aware
  22. * drivers. The interfaces allow for creation, modification, and destruction of
  23. * regions, memory devices, ports, and decoders. CXL aware drivers must register
  24. * with the CXL core via these interfaces in order to be able to participate in
  25. * cross-device interleave coordination. The CXL core also establishes and
  26. * maintains the bridge to the nvdimm subsystem.
  27. *
  28. * CXL core introduces sysfs hierarchy to control the devices that are
  29. * instantiated by the core.
  30. */
  31. /*
  32. * All changes to the interleave configuration occur with this lock held
  33. * for write.
  34. */
  35. DECLARE_RWSEM(cxl_region_rwsem);
  36. static DEFINE_IDA(cxl_port_ida);
  37. static DEFINE_XARRAY(cxl_root_buses);
  38. int cxl_num_decoders_committed(struct cxl_port *port)
  39. {
  40. lockdep_assert_held(&cxl_region_rwsem);
  41. return port->commit_end + 1;
  42. }
  43. static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
  44. char *buf)
  45. {
  46. return sysfs_emit(buf, "%s\n", dev->type->name);
  47. }
  48. static DEVICE_ATTR_RO(devtype);
  49. static int cxl_device_id(const struct device *dev)
  50. {
  51. if (dev->type == &cxl_nvdimm_bridge_type)
  52. return CXL_DEVICE_NVDIMM_BRIDGE;
  53. if (dev->type == &cxl_nvdimm_type)
  54. return CXL_DEVICE_NVDIMM;
  55. if (dev->type == CXL_PMEM_REGION_TYPE())
  56. return CXL_DEVICE_PMEM_REGION;
  57. if (dev->type == CXL_DAX_REGION_TYPE())
  58. return CXL_DEVICE_DAX_REGION;
  59. if (is_cxl_port(dev)) {
  60. if (is_cxl_root(to_cxl_port(dev)))
  61. return CXL_DEVICE_ROOT;
  62. return CXL_DEVICE_PORT;
  63. }
  64. if (is_cxl_memdev(dev))
  65. return CXL_DEVICE_MEMORY_EXPANDER;
  66. if (dev->type == CXL_REGION_TYPE())
  67. return CXL_DEVICE_REGION;
  68. if (dev->type == &cxl_pmu_type)
  69. return CXL_DEVICE_PMU;
  70. return 0;
  71. }
  72. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  73. char *buf)
  74. {
  75. return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
  76. }
  77. static DEVICE_ATTR_RO(modalias);
  78. static struct attribute *cxl_base_attributes[] = {
  79. &dev_attr_devtype.attr,
  80. &dev_attr_modalias.attr,
  81. NULL,
  82. };
  83. struct attribute_group cxl_base_attribute_group = {
  84. .attrs = cxl_base_attributes,
  85. };
  86. static ssize_t start_show(struct device *dev, struct device_attribute *attr,
  87. char *buf)
  88. {
  89. struct cxl_decoder *cxld = to_cxl_decoder(dev);
  90. return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
  91. }
  92. static DEVICE_ATTR_ADMIN_RO(start);
  93. static ssize_t size_show(struct device *dev, struct device_attribute *attr,
  94. char *buf)
  95. {
  96. struct cxl_decoder *cxld = to_cxl_decoder(dev);
  97. return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
  98. }
  99. static DEVICE_ATTR_RO(size);
  100. #define CXL_DECODER_FLAG_ATTR(name, flag) \
  101. static ssize_t name##_show(struct device *dev, \
  102. struct device_attribute *attr, char *buf) \
  103. { \
  104. struct cxl_decoder *cxld = to_cxl_decoder(dev); \
  105. \
  106. return sysfs_emit(buf, "%s\n", \
  107. (cxld->flags & (flag)) ? "1" : "0"); \
  108. } \
  109. static DEVICE_ATTR_RO(name)
  110. CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
  111. CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
  112. CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
  113. CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
  114. CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
  115. static ssize_t target_type_show(struct device *dev,
  116. struct device_attribute *attr, char *buf)
  117. {
  118. struct cxl_decoder *cxld = to_cxl_decoder(dev);
  119. switch (cxld->target_type) {
  120. case CXL_DECODER_DEVMEM:
  121. return sysfs_emit(buf, "accelerator\n");
  122. case CXL_DECODER_HOSTONLYMEM:
  123. return sysfs_emit(buf, "expander\n");
  124. }
  125. return -ENXIO;
  126. }
  127. static DEVICE_ATTR_RO(target_type);
  128. static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
  129. {
  130. struct cxl_decoder *cxld = &cxlsd->cxld;
  131. ssize_t offset = 0;
  132. int i, rc = 0;
  133. for (i = 0; i < cxld->interleave_ways; i++) {
  134. struct cxl_dport *dport = cxlsd->target[i];
  135. struct cxl_dport *next = NULL;
  136. if (!dport)
  137. break;
  138. if (i + 1 < cxld->interleave_ways)
  139. next = cxlsd->target[i + 1];
  140. rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
  141. next ? "," : "");
  142. if (rc < 0)
  143. return rc;
  144. offset += rc;
  145. }
  146. return offset;
  147. }
  148. static ssize_t target_list_show(struct device *dev,
  149. struct device_attribute *attr, char *buf)
  150. {
  151. struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
  152. ssize_t offset;
  153. int rc;
  154. guard(rwsem_read)(&cxl_region_rwsem);
  155. rc = emit_target_list(cxlsd, buf);
  156. if (rc < 0)
  157. return rc;
  158. offset = rc;
  159. rc = sysfs_emit_at(buf, offset, "\n");
  160. if (rc < 0)
  161. return rc;
  162. return offset + rc;
  163. }
  164. static DEVICE_ATTR_RO(target_list);
  165. static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
  166. char *buf)
  167. {
  168. struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
  169. return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
  170. }
  171. static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
  172. const char *buf, size_t len)
  173. {
  174. struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
  175. enum cxl_decoder_mode mode;
  176. ssize_t rc;
  177. if (sysfs_streq(buf, "pmem"))
  178. mode = CXL_DECODER_PMEM;
  179. else if (sysfs_streq(buf, "ram"))
  180. mode = CXL_DECODER_RAM;
  181. else
  182. return -EINVAL;
  183. rc = cxl_dpa_set_mode(cxled, mode);
  184. if (rc)
  185. return rc;
  186. return len;
  187. }
  188. static DEVICE_ATTR_RW(mode);
  189. static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
  190. char *buf)
  191. {
  192. struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
  193. guard(rwsem_read)(&cxl_dpa_rwsem);
  194. return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
  195. }
  196. static DEVICE_ATTR_RO(dpa_resource);
  197. static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
  198. char *buf)
  199. {
  200. struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
  201. resource_size_t size = cxl_dpa_size(cxled);
  202. return sysfs_emit(buf, "%pa\n", &size);
  203. }
  204. static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
  205. const char *buf, size_t len)
  206. {
  207. struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
  208. unsigned long long size;
  209. ssize_t rc;
  210. rc = kstrtoull(buf, 0, &size);
  211. if (rc)
  212. return rc;
  213. if (!IS_ALIGNED(size, SZ_256M))
  214. return -EINVAL;
  215. rc = cxl_dpa_free(cxled);
  216. if (rc)
  217. return rc;
  218. if (size == 0)
  219. return len;
  220. rc = cxl_dpa_alloc(cxled, size);
  221. if (rc)
  222. return rc;
  223. return len;
  224. }
  225. static DEVICE_ATTR_RW(dpa_size);
  226. static ssize_t interleave_granularity_show(struct device *dev,
  227. struct device_attribute *attr,
  228. char *buf)
  229. {
  230. struct cxl_decoder *cxld = to_cxl_decoder(dev);
  231. return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
  232. }
  233. static DEVICE_ATTR_RO(interleave_granularity);
  234. static ssize_t interleave_ways_show(struct device *dev,
  235. struct device_attribute *attr, char *buf)
  236. {
  237. struct cxl_decoder *cxld = to_cxl_decoder(dev);
  238. return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
  239. }
  240. static DEVICE_ATTR_RO(interleave_ways);
  241. static ssize_t qos_class_show(struct device *dev,
  242. struct device_attribute *attr, char *buf)
  243. {
  244. struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
  245. return sysfs_emit(buf, "%d\n", cxlrd->qos_class);
  246. }
  247. static DEVICE_ATTR_RO(qos_class);
  248. static struct attribute *cxl_decoder_base_attrs[] = {
  249. &dev_attr_start.attr,
  250. &dev_attr_size.attr,
  251. &dev_attr_locked.attr,
  252. &dev_attr_interleave_granularity.attr,
  253. &dev_attr_interleave_ways.attr,
  254. NULL,
  255. };
  256. static struct attribute_group cxl_decoder_base_attribute_group = {
  257. .attrs = cxl_decoder_base_attrs,
  258. };
  259. static struct attribute *cxl_decoder_root_attrs[] = {
  260. &dev_attr_cap_pmem.attr,
  261. &dev_attr_cap_ram.attr,
  262. &dev_attr_cap_type2.attr,
  263. &dev_attr_cap_type3.attr,
  264. &dev_attr_target_list.attr,
  265. &dev_attr_qos_class.attr,
  266. SET_CXL_REGION_ATTR(create_pmem_region)
  267. SET_CXL_REGION_ATTR(create_ram_region)
  268. SET_CXL_REGION_ATTR(delete_region)
  269. NULL,
  270. };
  271. static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
  272. {
  273. unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
  274. return (cxlrd->cxlsd.cxld.flags & flags) == flags;
  275. }
  276. static bool can_create_ram(struct cxl_root_decoder *cxlrd)
  277. {
  278. unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
  279. return (cxlrd->cxlsd.cxld.flags & flags) == flags;
  280. }
  281. static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
  282. {
  283. struct device *dev = kobj_to_dev(kobj);
  284. struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
  285. if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
  286. return 0;
  287. if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
  288. return 0;
  289. if (a == CXL_REGION_ATTR(delete_region) &&
  290. !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
  291. return 0;
  292. return a->mode;
  293. }
  294. static struct attribute_group cxl_decoder_root_attribute_group = {
  295. .attrs = cxl_decoder_root_attrs,
  296. .is_visible = cxl_root_decoder_visible,
  297. };
  298. static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
  299. &cxl_decoder_root_attribute_group,
  300. &cxl_decoder_base_attribute_group,
  301. &cxl_base_attribute_group,
  302. NULL,
  303. };
  304. static struct attribute *cxl_decoder_switch_attrs[] = {
  305. &dev_attr_target_type.attr,
  306. &dev_attr_target_list.attr,
  307. SET_CXL_REGION_ATTR(region)
  308. NULL,
  309. };
  310. static struct attribute_group cxl_decoder_switch_attribute_group = {
  311. .attrs = cxl_decoder_switch_attrs,
  312. };
  313. static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
  314. &cxl_decoder_switch_attribute_group,
  315. &cxl_decoder_base_attribute_group,
  316. &cxl_base_attribute_group,
  317. NULL,
  318. };
  319. static struct attribute *cxl_decoder_endpoint_attrs[] = {
  320. &dev_attr_target_type.attr,
  321. &dev_attr_mode.attr,
  322. &dev_attr_dpa_size.attr,
  323. &dev_attr_dpa_resource.attr,
  324. SET_CXL_REGION_ATTR(region)
  325. NULL,
  326. };
  327. static struct attribute_group cxl_decoder_endpoint_attribute_group = {
  328. .attrs = cxl_decoder_endpoint_attrs,
  329. };
  330. static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
  331. &cxl_decoder_base_attribute_group,
  332. &cxl_decoder_endpoint_attribute_group,
  333. &cxl_base_attribute_group,
  334. NULL,
  335. };
  336. static void __cxl_decoder_release(struct cxl_decoder *cxld)
  337. {
  338. struct cxl_port *port = to_cxl_port(cxld->dev.parent);
  339. ida_free(&port->decoder_ida, cxld->id);
  340. put_device(&port->dev);
  341. }
  342. static void cxl_endpoint_decoder_release(struct device *dev)
  343. {
  344. struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
  345. __cxl_decoder_release(&cxled->cxld);
  346. kfree(cxled);
  347. }
  348. static void cxl_switch_decoder_release(struct device *dev)
  349. {
  350. struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
  351. __cxl_decoder_release(&cxlsd->cxld);
  352. kfree(cxlsd);
  353. }
  354. struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
  355. {
  356. if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
  357. "not a cxl_root_decoder device\n"))
  358. return NULL;
  359. return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
  360. }
  361. EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
  362. static void cxl_root_decoder_release(struct device *dev)
  363. {
  364. struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
  365. if (atomic_read(&cxlrd->region_id) >= 0)
  366. memregion_free(atomic_read(&cxlrd->region_id));
  367. __cxl_decoder_release(&cxlrd->cxlsd.cxld);
  368. kfree(cxlrd);
  369. }
  370. static const struct device_type cxl_decoder_endpoint_type = {
  371. .name = "cxl_decoder_endpoint",
  372. .release = cxl_endpoint_decoder_release,
  373. .groups = cxl_decoder_endpoint_attribute_groups,
  374. };
  375. static const struct device_type cxl_decoder_switch_type = {
  376. .name = "cxl_decoder_switch",
  377. .release = cxl_switch_decoder_release,
  378. .groups = cxl_decoder_switch_attribute_groups,
  379. };
  380. static const struct device_type cxl_decoder_root_type = {
  381. .name = "cxl_decoder_root",
  382. .release = cxl_root_decoder_release,
  383. .groups = cxl_decoder_root_attribute_groups,
  384. };
  385. bool is_endpoint_decoder(struct device *dev)
  386. {
  387. return dev->type == &cxl_decoder_endpoint_type;
  388. }
  389. EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
  390. bool is_root_decoder(struct device *dev)
  391. {
  392. return dev->type == &cxl_decoder_root_type;
  393. }
  394. EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
  395. bool is_switch_decoder(struct device *dev)
  396. {
  397. return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
  398. }
  399. EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
  400. struct cxl_decoder *to_cxl_decoder(struct device *dev)
  401. {
  402. if (dev_WARN_ONCE(dev,
  403. !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
  404. "not a cxl_decoder device\n"))
  405. return NULL;
  406. return container_of(dev, struct cxl_decoder, dev);
  407. }
  408. EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
  409. struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
  410. {
  411. if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
  412. "not a cxl_endpoint_decoder device\n"))
  413. return NULL;
  414. return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
  415. }
  416. EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
  417. struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
  418. {
  419. if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
  420. "not a cxl_switch_decoder device\n"))
  421. return NULL;
  422. return container_of(dev, struct cxl_switch_decoder, cxld.dev);
  423. }
  424. EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
  425. static void cxl_ep_release(struct cxl_ep *ep)
  426. {
  427. put_device(ep->ep);
  428. kfree(ep);
  429. }
  430. static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
  431. {
  432. if (!ep)
  433. return;
  434. xa_erase(&port->endpoints, (unsigned long) ep->ep);
  435. cxl_ep_release(ep);
  436. }
  437. static void cxl_port_release(struct device *dev)
  438. {
  439. struct cxl_port *port = to_cxl_port(dev);
  440. unsigned long index;
  441. struct cxl_ep *ep;
  442. xa_for_each(&port->endpoints, index, ep)
  443. cxl_ep_remove(port, ep);
  444. xa_destroy(&port->endpoints);
  445. xa_destroy(&port->dports);
  446. xa_destroy(&port->regions);
  447. ida_free(&cxl_port_ida, port->id);
  448. if (is_cxl_root(port))
  449. kfree(to_cxl_root(port));
  450. else
  451. kfree(port);
  452. }
  453. static ssize_t decoders_committed_show(struct device *dev,
  454. struct device_attribute *attr, char *buf)
  455. {
  456. struct cxl_port *port = to_cxl_port(dev);
  457. int rc;
  458. down_read(&cxl_region_rwsem);
  459. rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
  460. up_read(&cxl_region_rwsem);
  461. return rc;
  462. }
  463. static DEVICE_ATTR_RO(decoders_committed);
  464. static struct attribute *cxl_port_attrs[] = {
  465. &dev_attr_decoders_committed.attr,
  466. NULL,
  467. };
  468. static struct attribute_group cxl_port_attribute_group = {
  469. .attrs = cxl_port_attrs,
  470. };
  471. static const struct attribute_group *cxl_port_attribute_groups[] = {
  472. &cxl_base_attribute_group,
  473. &cxl_port_attribute_group,
  474. NULL,
  475. };
  476. static const struct device_type cxl_port_type = {
  477. .name = "cxl_port",
  478. .release = cxl_port_release,
  479. .groups = cxl_port_attribute_groups,
  480. };
  481. bool is_cxl_port(const struct device *dev)
  482. {
  483. return dev->type == &cxl_port_type;
  484. }
  485. EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
  486. struct cxl_port *to_cxl_port(const struct device *dev)
  487. {
  488. if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
  489. "not a cxl_port device\n"))
  490. return NULL;
  491. return container_of(dev, struct cxl_port, dev);
  492. }
  493. EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
  494. static void unregister_port(void *_port)
  495. {
  496. struct cxl_port *port = _port;
  497. struct cxl_port *parent;
  498. struct device *lock_dev;
  499. if (is_cxl_root(port))
  500. parent = NULL;
  501. else
  502. parent = to_cxl_port(port->dev.parent);
  503. /*
  504. * CXL root port's and the first level of ports are unregistered
  505. * under the platform firmware device lock, all other ports are
  506. * unregistered while holding their parent port lock.
  507. */
  508. if (!parent)
  509. lock_dev = port->uport_dev;
  510. else if (is_cxl_root(parent))
  511. lock_dev = parent->uport_dev;
  512. else
  513. lock_dev = &parent->dev;
  514. device_lock_assert(lock_dev);
  515. port->dead = true;
  516. device_unregister(&port->dev);
  517. }
  518. static void cxl_unlink_uport(void *_port)
  519. {
  520. struct cxl_port *port = _port;
  521. sysfs_remove_link(&port->dev.kobj, "uport");
  522. }
  523. static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
  524. {
  525. int rc;
  526. rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj,
  527. "uport");
  528. if (rc)
  529. return rc;
  530. return devm_add_action_or_reset(host, cxl_unlink_uport, port);
  531. }
  532. static void cxl_unlink_parent_dport(void *_port)
  533. {
  534. struct cxl_port *port = _port;
  535. sysfs_remove_link(&port->dev.kobj, "parent_dport");
  536. }
  537. static int devm_cxl_link_parent_dport(struct device *host,
  538. struct cxl_port *port,
  539. struct cxl_dport *parent_dport)
  540. {
  541. int rc;
  542. if (!parent_dport)
  543. return 0;
  544. rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj,
  545. "parent_dport");
  546. if (rc)
  547. return rc;
  548. return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
  549. }
  550. static struct lock_class_key cxl_port_key;
  551. static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
  552. struct cxl_dport *parent_dport)
  553. {
  554. struct cxl_root *cxl_root __free(kfree) = NULL;
  555. struct cxl_port *port, *_port __free(kfree) = NULL;
  556. struct device *dev;
  557. int rc;
  558. /* No parent_dport, root cxl_port */
  559. if (!parent_dport) {
  560. cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL);
  561. if (!cxl_root)
  562. return ERR_PTR(-ENOMEM);
  563. } else {
  564. _port = kzalloc(sizeof(*port), GFP_KERNEL);
  565. if (!_port)
  566. return ERR_PTR(-ENOMEM);
  567. }
  568. rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
  569. if (rc < 0)
  570. return ERR_PTR(rc);
  571. if (cxl_root)
  572. port = &no_free_ptr(cxl_root)->port;
  573. else
  574. port = no_free_ptr(_port);
  575. port->id = rc;
  576. port->uport_dev = uport_dev;
  577. /*
  578. * The top-level cxl_port "cxl_root" does not have a cxl_port as
  579. * its parent and it does not have any corresponding component
  580. * registers as its decode is described by a fixed platform
  581. * description.
  582. */
  583. dev = &port->dev;
  584. if (parent_dport) {
  585. struct cxl_port *parent_port = parent_dport->port;
  586. struct cxl_port *iter;
  587. dev->parent = &parent_port->dev;
  588. port->depth = parent_port->depth + 1;
  589. port->parent_dport = parent_dport;
  590. /*
  591. * walk to the host bridge, or the first ancestor that knows
  592. * the host bridge
  593. */
  594. iter = port;
  595. while (!iter->host_bridge &&
  596. !is_cxl_root(to_cxl_port(iter->dev.parent)))
  597. iter = to_cxl_port(iter->dev.parent);
  598. if (iter->host_bridge)
  599. port->host_bridge = iter->host_bridge;
  600. else if (parent_dport->rch)
  601. port->host_bridge = parent_dport->dport_dev;
  602. else
  603. port->host_bridge = iter->uport_dev;
  604. dev_dbg(uport_dev, "host-bridge: %s\n",
  605. dev_name(port->host_bridge));
  606. } else
  607. dev->parent = uport_dev;
  608. ida_init(&port->decoder_ida);
  609. port->hdm_end = -1;
  610. port->commit_end = -1;
  611. xa_init(&port->dports);
  612. xa_init(&port->endpoints);
  613. xa_init(&port->regions);
  614. device_initialize(dev);
  615. lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
  616. device_set_pm_not_required(dev);
  617. dev->bus = &cxl_bus_type;
  618. dev->type = &cxl_port_type;
  619. return port;
  620. }
  621. static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
  622. resource_size_t component_reg_phys)
  623. {
  624. *map = (struct cxl_register_map) {
  625. .host = host,
  626. .reg_type = CXL_REGLOC_RBI_EMPTY,
  627. .resource = component_reg_phys,
  628. };
  629. if (component_reg_phys == CXL_RESOURCE_NONE)
  630. return 0;
  631. map->reg_type = CXL_REGLOC_RBI_COMPONENT;
  632. map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
  633. return cxl_setup_regs(map);
  634. }
  635. static int cxl_port_setup_regs(struct cxl_port *port,
  636. resource_size_t component_reg_phys)
  637. {
  638. if (dev_is_platform(port->uport_dev))
  639. return 0;
  640. return cxl_setup_comp_regs(&port->dev, &port->reg_map,
  641. component_reg_phys);
  642. }
  643. static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
  644. resource_size_t component_reg_phys)
  645. {
  646. int rc;
  647. if (dev_is_platform(dport->dport_dev))
  648. return 0;
  649. /*
  650. * use @dport->dport_dev for the context for error messages during
  651. * register probing, and fixup @host after the fact, since @host may be
  652. * NULL.
  653. */
  654. rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map,
  655. component_reg_phys);
  656. dport->reg_map.host = host;
  657. return rc;
  658. }
  659. DEFINE_SHOW_ATTRIBUTE(einj_cxl_available_error_type);
  660. static int cxl_einj_inject(void *data, u64 type)
  661. {
  662. struct cxl_dport *dport = data;
  663. if (dport->rch)
  664. return einj_cxl_inject_rch_error(dport->rcrb.base, type);
  665. return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type);
  666. }
  667. DEFINE_DEBUGFS_ATTRIBUTE(cxl_einj_inject_fops, NULL, cxl_einj_inject,
  668. "0x%llx\n");
  669. static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport)
  670. {
  671. struct dentry *dir;
  672. if (!einj_cxl_is_initialized())
  673. return;
  674. /*
  675. * dport_dev needs to be a PCIe port for CXL 2.0+ ports because
  676. * EINJ expects a dport SBDF to be specified for 2.0 error injection.
  677. */
  678. if (!dport->rch && !dev_is_pci(dport->dport_dev))
  679. return;
  680. dir = cxl_debugfs_create_dir(dev_name(dport->dport_dev));
  681. debugfs_create_file("einj_inject", 0200, dir, dport,
  682. &cxl_einj_inject_fops);
  683. }
  684. static int cxl_port_add(struct cxl_port *port,
  685. resource_size_t component_reg_phys,
  686. struct cxl_dport *parent_dport)
  687. {
  688. struct device *dev __free(put_device) = &port->dev;
  689. int rc;
  690. if (is_cxl_memdev(port->uport_dev)) {
  691. struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
  692. struct cxl_dev_state *cxlds = cxlmd->cxlds;
  693. rc = dev_set_name(dev, "endpoint%d", port->id);
  694. if (rc)
  695. return rc;
  696. /*
  697. * The endpoint driver already enumerated the component and RAS
  698. * registers. Reuse that enumeration while prepping them to be
  699. * mapped by the cxl_port driver.
  700. */
  701. port->reg_map = cxlds->reg_map;
  702. port->reg_map.host = &port->dev;
  703. cxlmd->endpoint = port;
  704. } else if (parent_dport) {
  705. rc = dev_set_name(dev, "port%d", port->id);
  706. if (rc)
  707. return rc;
  708. rc = cxl_port_setup_regs(port, component_reg_phys);
  709. if (rc)
  710. return rc;
  711. } else {
  712. rc = dev_set_name(dev, "root%d", port->id);
  713. if (rc)
  714. return rc;
  715. }
  716. rc = device_add(dev);
  717. if (rc)
  718. return rc;
  719. /* Inhibit the cleanup function invoked */
  720. dev = NULL;
  721. return 0;
  722. }
  723. static struct cxl_port *__devm_cxl_add_port(struct device *host,
  724. struct device *uport_dev,
  725. resource_size_t component_reg_phys,
  726. struct cxl_dport *parent_dport)
  727. {
  728. struct cxl_port *port;
  729. int rc;
  730. port = cxl_port_alloc(uport_dev, parent_dport);
  731. if (IS_ERR(port))
  732. return port;
  733. rc = cxl_port_add(port, component_reg_phys, parent_dport);
  734. if (rc)
  735. return ERR_PTR(rc);
  736. rc = devm_add_action_or_reset(host, unregister_port, port);
  737. if (rc)
  738. return ERR_PTR(rc);
  739. rc = devm_cxl_link_uport(host, port);
  740. if (rc)
  741. return ERR_PTR(rc);
  742. rc = devm_cxl_link_parent_dport(host, port, parent_dport);
  743. if (rc)
  744. return ERR_PTR(rc);
  745. if (parent_dport && dev_is_pci(uport_dev))
  746. port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
  747. return port;
  748. }
  749. /**
  750. * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
  751. * @host: host device for devm operations
  752. * @uport_dev: "physical" device implementing this upstream port
  753. * @component_reg_phys: (optional) for configurable cxl_port instances
  754. * @parent_dport: next hop up in the CXL memory decode hierarchy
  755. */
  756. struct cxl_port *devm_cxl_add_port(struct device *host,
  757. struct device *uport_dev,
  758. resource_size_t component_reg_phys,
  759. struct cxl_dport *parent_dport)
  760. {
  761. struct cxl_port *port, *parent_port;
  762. port = __devm_cxl_add_port(host, uport_dev, component_reg_phys,
  763. parent_dport);
  764. parent_port = parent_dport ? parent_dport->port : NULL;
  765. if (IS_ERR(port)) {
  766. dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n",
  767. parent_port ? " port to " : "",
  768. parent_port ? dev_name(&parent_port->dev) : "",
  769. parent_port ? "" : " root port",
  770. PTR_ERR(port));
  771. } else {
  772. dev_dbg(uport_dev, "%s added%s%s%s\n",
  773. dev_name(&port->dev),
  774. parent_port ? " to " : "",
  775. parent_port ? dev_name(&parent_port->dev) : "",
  776. parent_port ? "" : " (root port)");
  777. }
  778. return port;
  779. }
  780. EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
  781. struct cxl_root *devm_cxl_add_root(struct device *host,
  782. const struct cxl_root_ops *ops)
  783. {
  784. struct cxl_root *cxl_root;
  785. struct cxl_port *port;
  786. port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
  787. if (IS_ERR(port))
  788. return ERR_CAST(port);
  789. cxl_root = to_cxl_root(port);
  790. cxl_root->ops = ops;
  791. return cxl_root;
  792. }
  793. EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
  794. struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
  795. {
  796. /* There is no pci_bus associated with a CXL platform-root port */
  797. if (is_cxl_root(port))
  798. return NULL;
  799. if (dev_is_pci(port->uport_dev)) {
  800. struct pci_dev *pdev = to_pci_dev(port->uport_dev);
  801. return pdev->subordinate;
  802. }
  803. return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
  804. }
  805. EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
  806. static void unregister_pci_bus(void *uport_dev)
  807. {
  808. xa_erase(&cxl_root_buses, (unsigned long)uport_dev);
  809. }
  810. int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
  811. struct pci_bus *bus)
  812. {
  813. int rc;
  814. if (dev_is_pci(uport_dev))
  815. return -EINVAL;
  816. rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus,
  817. GFP_KERNEL);
  818. if (rc)
  819. return rc;
  820. return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
  821. }
  822. EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
  823. static bool dev_is_cxl_root_child(struct device *dev)
  824. {
  825. struct cxl_port *port, *parent;
  826. if (!is_cxl_port(dev))
  827. return false;
  828. port = to_cxl_port(dev);
  829. if (is_cxl_root(port))
  830. return false;
  831. parent = to_cxl_port(port->dev.parent);
  832. if (is_cxl_root(parent))
  833. return true;
  834. return false;
  835. }
  836. struct cxl_root *find_cxl_root(struct cxl_port *port)
  837. {
  838. struct cxl_port *iter = port;
  839. while (iter && !is_cxl_root(iter))
  840. iter = to_cxl_port(iter->dev.parent);
  841. if (!iter)
  842. return NULL;
  843. get_device(&iter->dev);
  844. return to_cxl_root(iter);
  845. }
  846. EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
  847. void put_cxl_root(struct cxl_root *cxl_root)
  848. {
  849. if (!cxl_root)
  850. return;
  851. put_device(&cxl_root->port.dev);
  852. }
  853. EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
  854. static struct cxl_dport *find_dport(struct cxl_port *port, int id)
  855. {
  856. struct cxl_dport *dport;
  857. unsigned long index;
  858. device_lock_assert(&port->dev);
  859. xa_for_each(&port->dports, index, dport)
  860. if (dport->port_id == id)
  861. return dport;
  862. return NULL;
  863. }
  864. static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
  865. {
  866. struct cxl_dport *dup;
  867. int rc;
  868. device_lock_assert(&port->dev);
  869. dup = find_dport(port, dport->port_id);
  870. if (dup) {
  871. dev_err(&port->dev,
  872. "unable to add dport%d-%s non-unique port id (%s)\n",
  873. dport->port_id, dev_name(dport->dport_dev),
  874. dev_name(dup->dport_dev));
  875. return -EBUSY;
  876. }
  877. rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
  878. GFP_KERNEL);
  879. if (rc)
  880. return rc;
  881. port->nr_dports++;
  882. return 0;
  883. }
  884. /*
  885. * Since root-level CXL dports cannot be enumerated by PCI they are not
  886. * enumerated by the common port driver that acquires the port lock over
  887. * dport add/remove. Instead, root dports are manually added by a
  888. * platform driver and cond_cxl_root_lock() is used to take the missing
  889. * port lock in that case.
  890. */
  891. static void cond_cxl_root_lock(struct cxl_port *port)
  892. {
  893. if (is_cxl_root(port))
  894. device_lock(&port->dev);
  895. }
  896. static void cond_cxl_root_unlock(struct cxl_port *port)
  897. {
  898. if (is_cxl_root(port))
  899. device_unlock(&port->dev);
  900. }
  901. static void cxl_dport_remove(void *data)
  902. {
  903. struct cxl_dport *dport = data;
  904. struct cxl_port *port = dport->port;
  905. xa_erase(&port->dports, (unsigned long) dport->dport_dev);
  906. put_device(dport->dport_dev);
  907. }
  908. static void cxl_dport_unlink(void *data)
  909. {
  910. struct cxl_dport *dport = data;
  911. struct cxl_port *port = dport->port;
  912. char link_name[CXL_TARGET_STRLEN];
  913. sprintf(link_name, "dport%d", dport->port_id);
  914. sysfs_remove_link(&port->dev.kobj, link_name);
  915. }
  916. static struct cxl_dport *
  917. __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
  918. int port_id, resource_size_t component_reg_phys,
  919. resource_size_t rcrb)
  920. {
  921. char link_name[CXL_TARGET_STRLEN];
  922. struct cxl_dport *dport;
  923. struct device *host;
  924. int rc;
  925. if (is_cxl_root(port))
  926. host = port->uport_dev;
  927. else
  928. host = &port->dev;
  929. if (!host->driver) {
  930. dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
  931. dev_name(dport_dev));
  932. return ERR_PTR(-ENXIO);
  933. }
  934. if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
  935. CXL_TARGET_STRLEN)
  936. return ERR_PTR(-EINVAL);
  937. dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
  938. if (!dport)
  939. return ERR_PTR(-ENOMEM);
  940. dport->dport_dev = dport_dev;
  941. dport->port_id = port_id;
  942. dport->port = port;
  943. if (rcrb == CXL_RESOURCE_NONE) {
  944. rc = cxl_dport_setup_regs(&port->dev, dport,
  945. component_reg_phys);
  946. if (rc)
  947. return ERR_PTR(rc);
  948. } else {
  949. dport->rcrb.base = rcrb;
  950. component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
  951. CXL_RCRB_DOWNSTREAM);
  952. if (component_reg_phys == CXL_RESOURCE_NONE) {
  953. dev_warn(dport_dev, "Invalid Component Registers in RCRB");
  954. return ERR_PTR(-ENXIO);
  955. }
  956. /*
  957. * RCH @dport is not ready to map until associated with its
  958. * memdev
  959. */
  960. rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
  961. if (rc)
  962. return ERR_PTR(rc);
  963. dport->rch = true;
  964. }
  965. if (component_reg_phys != CXL_RESOURCE_NONE)
  966. dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
  967. &component_reg_phys);
  968. cond_cxl_root_lock(port);
  969. rc = add_dport(port, dport);
  970. cond_cxl_root_unlock(port);
  971. if (rc)
  972. return ERR_PTR(rc);
  973. get_device(dport_dev);
  974. rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
  975. if (rc)
  976. return ERR_PTR(rc);
  977. rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
  978. if (rc)
  979. return ERR_PTR(rc);
  980. rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
  981. if (rc)
  982. return ERR_PTR(rc);
  983. if (dev_is_pci(dport_dev))
  984. dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
  985. cxl_debugfs_create_dport_dir(dport);
  986. return dport;
  987. }
  988. /**
  989. * devm_cxl_add_dport - append VH downstream port data to a cxl_port
  990. * @port: the cxl_port that references this dport
  991. * @dport_dev: firmware or PCI device representing the dport
  992. * @port_id: identifier for this dport in a decoder's target list
  993. * @component_reg_phys: optional location of CXL component registers
  994. *
  995. * Note that dports are appended to the devm release action's of the
  996. * either the port's host (for root ports), or the port itself (for
  997. * switch ports)
  998. */
  999. struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
  1000. struct device *dport_dev, int port_id,
  1001. resource_size_t component_reg_phys)
  1002. {
  1003. struct cxl_dport *dport;
  1004. dport = __devm_cxl_add_dport(port, dport_dev, port_id,
  1005. component_reg_phys, CXL_RESOURCE_NONE);
  1006. if (IS_ERR(dport)) {
  1007. dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
  1008. dev_name(&port->dev), PTR_ERR(dport));
  1009. } else {
  1010. dev_dbg(dport_dev, "dport added to %s\n",
  1011. dev_name(&port->dev));
  1012. }
  1013. return dport;
  1014. }
  1015. EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
  1016. /**
  1017. * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
  1018. * @port: the cxl_port that references this dport
  1019. * @dport_dev: firmware or PCI device representing the dport
  1020. * @port_id: identifier for this dport in a decoder's target list
  1021. * @rcrb: mandatory location of a Root Complex Register Block
  1022. *
  1023. * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
  1024. */
  1025. struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
  1026. struct device *dport_dev, int port_id,
  1027. resource_size_t rcrb)
  1028. {
  1029. struct cxl_dport *dport;
  1030. if (rcrb == CXL_RESOURCE_NONE) {
  1031. dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
  1032. return ERR_PTR(-EINVAL);
  1033. }
  1034. dport = __devm_cxl_add_dport(port, dport_dev, port_id,
  1035. CXL_RESOURCE_NONE, rcrb);
  1036. if (IS_ERR(dport)) {
  1037. dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
  1038. dev_name(&port->dev), PTR_ERR(dport));
  1039. } else {
  1040. dev_dbg(dport_dev, "RCH dport added to %s\n",
  1041. dev_name(&port->dev));
  1042. }
  1043. return dport;
  1044. }
  1045. EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
  1046. static int add_ep(struct cxl_ep *new)
  1047. {
  1048. struct cxl_port *port = new->dport->port;
  1049. guard(device)(&port->dev);
  1050. if (port->dead)
  1051. return -ENXIO;
  1052. return xa_insert(&port->endpoints, (unsigned long)new->ep,
  1053. new, GFP_KERNEL);
  1054. }
  1055. /**
  1056. * cxl_add_ep - register an endpoint's interest in a port
  1057. * @dport: the dport that routes to @ep_dev
  1058. * @ep_dev: device representing the endpoint
  1059. *
  1060. * Intermediate CXL ports are scanned based on the arrival of endpoints.
  1061. * When those endpoints depart the port can be destroyed once all
  1062. * endpoints that care about that port have been removed.
  1063. */
  1064. static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
  1065. {
  1066. struct cxl_ep *ep;
  1067. int rc;
  1068. ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  1069. if (!ep)
  1070. return -ENOMEM;
  1071. ep->ep = get_device(ep_dev);
  1072. ep->dport = dport;
  1073. rc = add_ep(ep);
  1074. if (rc)
  1075. cxl_ep_release(ep);
  1076. return rc;
  1077. }
  1078. struct cxl_find_port_ctx {
  1079. const struct device *dport_dev;
  1080. const struct cxl_port *parent_port;
  1081. struct cxl_dport **dport;
  1082. };
  1083. static int match_port_by_dport(struct device *dev, const void *data)
  1084. {
  1085. const struct cxl_find_port_ctx *ctx = data;
  1086. struct cxl_dport *dport;
  1087. struct cxl_port *port;
  1088. if (!is_cxl_port(dev))
  1089. return 0;
  1090. if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
  1091. return 0;
  1092. port = to_cxl_port(dev);
  1093. dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
  1094. if (ctx->dport)
  1095. *ctx->dport = dport;
  1096. return dport != NULL;
  1097. }
  1098. static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
  1099. {
  1100. struct device *dev;
  1101. if (!ctx->dport_dev)
  1102. return NULL;
  1103. dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
  1104. if (dev)
  1105. return to_cxl_port(dev);
  1106. return NULL;
  1107. }
  1108. static struct cxl_port *find_cxl_port(struct device *dport_dev,
  1109. struct cxl_dport **dport)
  1110. {
  1111. struct cxl_find_port_ctx ctx = {
  1112. .dport_dev = dport_dev,
  1113. .dport = dport,
  1114. };
  1115. struct cxl_port *port;
  1116. port = __find_cxl_port(&ctx);
  1117. return port;
  1118. }
  1119. static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
  1120. struct device *dport_dev,
  1121. struct cxl_dport **dport)
  1122. {
  1123. struct cxl_find_port_ctx ctx = {
  1124. .dport_dev = dport_dev,
  1125. .parent_port = parent_port,
  1126. .dport = dport,
  1127. };
  1128. struct cxl_port *port;
  1129. port = __find_cxl_port(&ctx);
  1130. return port;
  1131. }
  1132. /*
  1133. * All users of grandparent() are using it to walk PCIe-like switch port
  1134. * hierarchy. A PCIe switch is comprised of a bridge device representing the
  1135. * upstream switch port and N bridges representing downstream switch ports. When
  1136. * bridges stack the grand-parent of a downstream switch port is another
  1137. * downstream switch port in the immediate ancestor switch.
  1138. */
  1139. static struct device *grandparent(struct device *dev)
  1140. {
  1141. if (dev && dev->parent)
  1142. return dev->parent->parent;
  1143. return NULL;
  1144. }
  1145. static struct device *endpoint_host(struct cxl_port *endpoint)
  1146. {
  1147. struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
  1148. if (is_cxl_root(port))
  1149. return port->uport_dev;
  1150. return &port->dev;
  1151. }
  1152. static void delete_endpoint(void *data)
  1153. {
  1154. struct cxl_memdev *cxlmd = data;
  1155. struct cxl_port *endpoint = cxlmd->endpoint;
  1156. struct device *host = endpoint_host(endpoint);
  1157. scoped_guard(device, host) {
  1158. if (host->driver && !endpoint->dead) {
  1159. devm_release_action(host, cxl_unlink_parent_dport, endpoint);
  1160. devm_release_action(host, cxl_unlink_uport, endpoint);
  1161. devm_release_action(host, unregister_port, endpoint);
  1162. }
  1163. cxlmd->endpoint = NULL;
  1164. }
  1165. put_device(&endpoint->dev);
  1166. put_device(host);
  1167. }
  1168. int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
  1169. {
  1170. struct device *host = endpoint_host(endpoint);
  1171. struct device *dev = &cxlmd->dev;
  1172. get_device(host);
  1173. get_device(&endpoint->dev);
  1174. cxlmd->depth = endpoint->depth;
  1175. return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
  1176. }
  1177. EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
  1178. /*
  1179. * The natural end of life of a non-root 'cxl_port' is when its parent port goes
  1180. * through a ->remove() event ("top-down" unregistration). The unnatural trigger
  1181. * for a port to be unregistered is when all memdevs beneath that port have gone
  1182. * through ->remove(). This "bottom-up" removal selectively removes individual
  1183. * child ports manually. This depends on devm_cxl_add_port() to not change is
  1184. * devm action registration order, and for dports to have already been
  1185. * destroyed by reap_dports().
  1186. */
  1187. static void delete_switch_port(struct cxl_port *port)
  1188. {
  1189. devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
  1190. devm_release_action(port->dev.parent, cxl_unlink_uport, port);
  1191. devm_release_action(port->dev.parent, unregister_port, port);
  1192. }
  1193. static void reap_dports(struct cxl_port *port)
  1194. {
  1195. struct cxl_dport *dport;
  1196. unsigned long index;
  1197. device_lock_assert(&port->dev);
  1198. xa_for_each(&port->dports, index, dport) {
  1199. devm_release_action(&port->dev, cxl_dport_unlink, dport);
  1200. devm_release_action(&port->dev, cxl_dport_remove, dport);
  1201. devm_kfree(&port->dev, dport);
  1202. }
  1203. }
  1204. struct detach_ctx {
  1205. struct cxl_memdev *cxlmd;
  1206. int depth;
  1207. };
  1208. static int port_has_memdev(struct device *dev, const void *data)
  1209. {
  1210. const struct detach_ctx *ctx = data;
  1211. struct cxl_port *port;
  1212. if (!is_cxl_port(dev))
  1213. return 0;
  1214. port = to_cxl_port(dev);
  1215. if (port->depth != ctx->depth)
  1216. return 0;
  1217. return !!cxl_ep_load(port, ctx->cxlmd);
  1218. }
  1219. static void cxl_detach_ep(void *data)
  1220. {
  1221. struct cxl_memdev *cxlmd = data;
  1222. for (int i = cxlmd->depth - 1; i >= 1; i--) {
  1223. struct cxl_port *port, *parent_port;
  1224. struct detach_ctx ctx = {
  1225. .cxlmd = cxlmd,
  1226. .depth = i,
  1227. };
  1228. struct cxl_ep *ep;
  1229. bool died = false;
  1230. struct device *dev __free(put_device) =
  1231. bus_find_device(&cxl_bus_type, NULL, &ctx, port_has_memdev);
  1232. if (!dev)
  1233. continue;
  1234. port = to_cxl_port(dev);
  1235. parent_port = to_cxl_port(port->dev.parent);
  1236. device_lock(&parent_port->dev);
  1237. device_lock(&port->dev);
  1238. ep = cxl_ep_load(port, cxlmd);
  1239. dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
  1240. ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
  1241. cxl_ep_remove(port, ep);
  1242. if (ep && !port->dead && xa_empty(&port->endpoints) &&
  1243. !is_cxl_root(parent_port) && parent_port->dev.driver) {
  1244. /*
  1245. * This was the last ep attached to a dynamically
  1246. * enumerated port. Block new cxl_add_ep() and garbage
  1247. * collect the port.
  1248. */
  1249. died = true;
  1250. port->dead = true;
  1251. reap_dports(port);
  1252. }
  1253. device_unlock(&port->dev);
  1254. if (died) {
  1255. dev_dbg(&cxlmd->dev, "delete %s\n",
  1256. dev_name(&port->dev));
  1257. delete_switch_port(port);
  1258. }
  1259. device_unlock(&parent_port->dev);
  1260. }
  1261. }
  1262. static resource_size_t find_component_registers(struct device *dev)
  1263. {
  1264. struct cxl_register_map map;
  1265. struct pci_dev *pdev;
  1266. /*
  1267. * Theoretically, CXL component registers can be hosted on a
  1268. * non-PCI device, in practice, only cxl_test hits this case.
  1269. */
  1270. if (!dev_is_pci(dev))
  1271. return CXL_RESOURCE_NONE;
  1272. pdev = to_pci_dev(dev);
  1273. cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
  1274. return map.resource;
  1275. }
  1276. static int add_port_attach_ep(struct cxl_memdev *cxlmd,
  1277. struct device *uport_dev,
  1278. struct device *dport_dev)
  1279. {
  1280. struct device *dparent = grandparent(dport_dev);
  1281. struct cxl_dport *dport, *parent_dport;
  1282. resource_size_t component_reg_phys;
  1283. int rc;
  1284. if (!dparent) {
  1285. /*
  1286. * The iteration reached the topology root without finding the
  1287. * CXL-root 'cxl_port' on a previous iteration, fail for now to
  1288. * be re-probed after platform driver attaches.
  1289. */
  1290. dev_dbg(&cxlmd->dev, "%s is a root dport\n",
  1291. dev_name(dport_dev));
  1292. return -ENXIO;
  1293. }
  1294. struct cxl_port *parent_port __free(put_cxl_port) =
  1295. find_cxl_port(dparent, &parent_dport);
  1296. if (!parent_port) {
  1297. /* iterate to create this parent_port */
  1298. return -EAGAIN;
  1299. }
  1300. /*
  1301. * Definition with __free() here to keep the sequence of
  1302. * dereferencing the device of the port before the parent_port releasing.
  1303. */
  1304. struct cxl_port *port __free(put_cxl_port) = NULL;
  1305. scoped_guard(device, &parent_port->dev) {
  1306. if (!parent_port->dev.driver) {
  1307. dev_warn(&cxlmd->dev,
  1308. "port %s:%s disabled, failed to enumerate CXL.mem\n",
  1309. dev_name(&parent_port->dev), dev_name(uport_dev));
  1310. return -ENXIO;
  1311. }
  1312. port = find_cxl_port_at(parent_port, dport_dev, &dport);
  1313. if (!port) {
  1314. component_reg_phys = find_component_registers(uport_dev);
  1315. port = devm_cxl_add_port(&parent_port->dev, uport_dev,
  1316. component_reg_phys, parent_dport);
  1317. if (IS_ERR(port))
  1318. return PTR_ERR(port);
  1319. /* retry find to pick up the new dport information */
  1320. port = find_cxl_port_at(parent_port, dport_dev, &dport);
  1321. if (!port)
  1322. return -ENXIO;
  1323. }
  1324. }
  1325. dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
  1326. dev_name(&port->dev), dev_name(port->uport_dev));
  1327. rc = cxl_add_ep(dport, &cxlmd->dev);
  1328. if (rc == -EBUSY) {
  1329. /*
  1330. * "can't" happen, but this error code means
  1331. * something to the caller, so translate it.
  1332. */
  1333. rc = -ENXIO;
  1334. }
  1335. return rc;
  1336. }
  1337. int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
  1338. {
  1339. struct device *dev = &cxlmd->dev;
  1340. struct device *iter;
  1341. int rc;
  1342. /*
  1343. * Skip intermediate port enumeration in the RCH case, there
  1344. * are no ports in between a host bridge and an endpoint.
  1345. */
  1346. if (cxlmd->cxlds->rcd)
  1347. return 0;
  1348. rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
  1349. if (rc)
  1350. return rc;
  1351. /*
  1352. * Scan for and add all cxl_ports in this device's ancestry.
  1353. * Repeat until no more ports are added. Abort if a port add
  1354. * attempt fails.
  1355. */
  1356. retry:
  1357. for (iter = dev; iter; iter = grandparent(iter)) {
  1358. struct device *dport_dev = grandparent(iter);
  1359. struct device *uport_dev;
  1360. struct cxl_dport *dport;
  1361. /*
  1362. * The terminal "grandparent" in PCI is NULL and @platform_bus
  1363. * for platform devices
  1364. */
  1365. if (!dport_dev || dport_dev == &platform_bus)
  1366. return 0;
  1367. uport_dev = dport_dev->parent;
  1368. if (!uport_dev) {
  1369. dev_warn(dev, "at %s no parent for dport: %s\n",
  1370. dev_name(iter), dev_name(dport_dev));
  1371. return -ENXIO;
  1372. }
  1373. dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
  1374. dev_name(iter), dev_name(dport_dev),
  1375. dev_name(uport_dev));
  1376. struct cxl_port *port __free(put_cxl_port) =
  1377. find_cxl_port(dport_dev, &dport);
  1378. if (port) {
  1379. dev_dbg(&cxlmd->dev,
  1380. "found already registered port %s:%s\n",
  1381. dev_name(&port->dev),
  1382. dev_name(port->uport_dev));
  1383. rc = cxl_add_ep(dport, &cxlmd->dev);
  1384. /*
  1385. * If the endpoint already exists in the port's list,
  1386. * that's ok, it was added on a previous pass.
  1387. * Otherwise, retry in add_port_attach_ep() after taking
  1388. * the parent_port lock as the current port may be being
  1389. * reaped.
  1390. */
  1391. if (rc && rc != -EBUSY)
  1392. return rc;
  1393. /* Any more ports to add between this one and the root? */
  1394. if (!dev_is_cxl_root_child(&port->dev))
  1395. continue;
  1396. return 0;
  1397. }
  1398. rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
  1399. /* port missing, try to add parent */
  1400. if (rc == -EAGAIN)
  1401. continue;
  1402. /* failed to add ep or port */
  1403. if (rc)
  1404. return rc;
  1405. /* port added, new descendants possible, start over */
  1406. goto retry;
  1407. }
  1408. return 0;
  1409. }
  1410. EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
  1411. struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
  1412. struct cxl_dport **dport)
  1413. {
  1414. return find_cxl_port(pdev->dev.parent, dport);
  1415. }
  1416. EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
  1417. struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
  1418. struct cxl_dport **dport)
  1419. {
  1420. return find_cxl_port(grandparent(&cxlmd->dev), dport);
  1421. }
  1422. EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
  1423. static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
  1424. struct cxl_port *port, int *target_map)
  1425. {
  1426. int i;
  1427. if (!target_map)
  1428. return 0;
  1429. device_lock_assert(&port->dev);
  1430. if (xa_empty(&port->dports))
  1431. return -EINVAL;
  1432. guard(rwsem_write)(&cxl_region_rwsem);
  1433. for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
  1434. struct cxl_dport *dport = find_dport(port, target_map[i]);
  1435. if (!dport)
  1436. return -ENXIO;
  1437. cxlsd->target[i] = dport;
  1438. }
  1439. return 0;
  1440. }
  1441. static struct lock_class_key cxl_decoder_key;
  1442. /**
  1443. * cxl_decoder_init - Common decoder setup / initialization
  1444. * @port: owning port of this decoder
  1445. * @cxld: common decoder properties to initialize
  1446. *
  1447. * A port may contain one or more decoders. Each of those decoders
  1448. * enable some address space for CXL.mem utilization. A decoder is
  1449. * expected to be configured by the caller before registering via
  1450. * cxl_decoder_add()
  1451. */
  1452. static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
  1453. {
  1454. struct device *dev;
  1455. int rc;
  1456. rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
  1457. if (rc < 0)
  1458. return rc;
  1459. /* need parent to stick around to release the id */
  1460. get_device(&port->dev);
  1461. cxld->id = rc;
  1462. dev = &cxld->dev;
  1463. device_initialize(dev);
  1464. lockdep_set_class(&dev->mutex, &cxl_decoder_key);
  1465. device_set_pm_not_required(dev);
  1466. dev->parent = &port->dev;
  1467. dev->bus = &cxl_bus_type;
  1468. /* Pre initialize an "empty" decoder */
  1469. cxld->interleave_ways = 1;
  1470. cxld->interleave_granularity = PAGE_SIZE;
  1471. cxld->target_type = CXL_DECODER_HOSTONLYMEM;
  1472. cxld->hpa_range = (struct range) {
  1473. .start = 0,
  1474. .end = -1,
  1475. };
  1476. return 0;
  1477. }
  1478. static int cxl_switch_decoder_init(struct cxl_port *port,
  1479. struct cxl_switch_decoder *cxlsd,
  1480. int nr_targets)
  1481. {
  1482. if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
  1483. return -EINVAL;
  1484. cxlsd->nr_targets = nr_targets;
  1485. return cxl_decoder_init(port, &cxlsd->cxld);
  1486. }
  1487. /**
  1488. * cxl_root_decoder_alloc - Allocate a root level decoder
  1489. * @port: owning CXL root of this decoder
  1490. * @nr_targets: static number of downstream targets
  1491. *
  1492. * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
  1493. * 'CXL root' decoder is one that decodes from a top-level / static platform
  1494. * firmware description of CXL resources into a CXL standard decode
  1495. * topology.
  1496. */
  1497. struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
  1498. unsigned int nr_targets)
  1499. {
  1500. struct cxl_root_decoder *cxlrd;
  1501. struct cxl_switch_decoder *cxlsd;
  1502. struct cxl_decoder *cxld;
  1503. int rc;
  1504. if (!is_cxl_root(port))
  1505. return ERR_PTR(-EINVAL);
  1506. cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
  1507. GFP_KERNEL);
  1508. if (!cxlrd)
  1509. return ERR_PTR(-ENOMEM);
  1510. cxlsd = &cxlrd->cxlsd;
  1511. rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
  1512. if (rc) {
  1513. kfree(cxlrd);
  1514. return ERR_PTR(rc);
  1515. }
  1516. mutex_init(&cxlrd->range_lock);
  1517. cxld = &cxlsd->cxld;
  1518. cxld->dev.type = &cxl_decoder_root_type;
  1519. /*
  1520. * cxl_root_decoder_release() special cases negative ids to
  1521. * detect memregion_alloc() failures.
  1522. */
  1523. atomic_set(&cxlrd->region_id, -1);
  1524. rc = memregion_alloc(GFP_KERNEL);
  1525. if (rc < 0) {
  1526. put_device(&cxld->dev);
  1527. return ERR_PTR(rc);
  1528. }
  1529. atomic_set(&cxlrd->region_id, rc);
  1530. cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
  1531. return cxlrd;
  1532. }
  1533. EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
  1534. /**
  1535. * cxl_switch_decoder_alloc - Allocate a switch level decoder
  1536. * @port: owning CXL switch port of this decoder
  1537. * @nr_targets: max number of dynamically addressable downstream targets
  1538. *
  1539. * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
  1540. * 'switch' decoder is any decoder that can be enumerated by PCIe
  1541. * topology and the HDM Decoder Capability. This includes the decoders
  1542. * that sit between Switch Upstream Ports / Switch Downstream Ports and
  1543. * Host Bridges / Root Ports.
  1544. */
  1545. struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
  1546. unsigned int nr_targets)
  1547. {
  1548. struct cxl_switch_decoder *cxlsd;
  1549. struct cxl_decoder *cxld;
  1550. int rc;
  1551. if (is_cxl_root(port) || is_cxl_endpoint(port))
  1552. return ERR_PTR(-EINVAL);
  1553. cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
  1554. if (!cxlsd)
  1555. return ERR_PTR(-ENOMEM);
  1556. rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
  1557. if (rc) {
  1558. kfree(cxlsd);
  1559. return ERR_PTR(rc);
  1560. }
  1561. cxld = &cxlsd->cxld;
  1562. cxld->dev.type = &cxl_decoder_switch_type;
  1563. return cxlsd;
  1564. }
  1565. EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
  1566. /**
  1567. * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
  1568. * @port: owning port of this decoder
  1569. *
  1570. * Return: A new cxl decoder to be registered by cxl_decoder_add()
  1571. */
  1572. struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
  1573. {
  1574. struct cxl_endpoint_decoder *cxled;
  1575. struct cxl_decoder *cxld;
  1576. int rc;
  1577. if (!is_cxl_endpoint(port))
  1578. return ERR_PTR(-EINVAL);
  1579. cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
  1580. if (!cxled)
  1581. return ERR_PTR(-ENOMEM);
  1582. cxled->pos = -1;
  1583. cxld = &cxled->cxld;
  1584. rc = cxl_decoder_init(port, cxld);
  1585. if (rc) {
  1586. kfree(cxled);
  1587. return ERR_PTR(rc);
  1588. }
  1589. cxld->dev.type = &cxl_decoder_endpoint_type;
  1590. return cxled;
  1591. }
  1592. EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
  1593. /**
  1594. * cxl_decoder_add_locked - Add a decoder with targets
  1595. * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
  1596. * @target_map: A list of downstream ports that this decoder can direct memory
  1597. * traffic to. These numbers should correspond with the port number
  1598. * in the PCIe Link Capabilities structure.
  1599. *
  1600. * Certain types of decoders may not have any targets. The main example of this
  1601. * is an endpoint device. A more awkward example is a hostbridge whose root
  1602. * ports get hot added (technically possible, though unlikely).
  1603. *
  1604. * This is the locked variant of cxl_decoder_add().
  1605. *
  1606. * Context: Process context. Expects the device lock of the port that owns the
  1607. * @cxld to be held.
  1608. *
  1609. * Return: Negative error code if the decoder wasn't properly configured; else
  1610. * returns 0.
  1611. */
  1612. int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
  1613. {
  1614. struct cxl_port *port;
  1615. struct device *dev;
  1616. int rc;
  1617. if (WARN_ON_ONCE(!cxld))
  1618. return -EINVAL;
  1619. if (WARN_ON_ONCE(IS_ERR(cxld)))
  1620. return PTR_ERR(cxld);
  1621. if (cxld->interleave_ways < 1)
  1622. return -EINVAL;
  1623. dev = &cxld->dev;
  1624. port = to_cxl_port(cxld->dev.parent);
  1625. if (!is_endpoint_decoder(dev)) {
  1626. struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
  1627. rc = decoder_populate_targets(cxlsd, port, target_map);
  1628. if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
  1629. dev_err(&port->dev,
  1630. "Failed to populate active decoder targets\n");
  1631. return rc;
  1632. }
  1633. }
  1634. rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
  1635. if (rc)
  1636. return rc;
  1637. return device_add(dev);
  1638. }
  1639. EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
  1640. /**
  1641. * cxl_decoder_add - Add a decoder with targets
  1642. * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
  1643. * @target_map: A list of downstream ports that this decoder can direct memory
  1644. * traffic to. These numbers should correspond with the port number
  1645. * in the PCIe Link Capabilities structure.
  1646. *
  1647. * This is the unlocked variant of cxl_decoder_add_locked().
  1648. * See cxl_decoder_add_locked().
  1649. *
  1650. * Context: Process context. Takes and releases the device lock of the port that
  1651. * owns the @cxld.
  1652. */
  1653. int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
  1654. {
  1655. struct cxl_port *port;
  1656. if (WARN_ON_ONCE(!cxld))
  1657. return -EINVAL;
  1658. if (WARN_ON_ONCE(IS_ERR(cxld)))
  1659. return PTR_ERR(cxld);
  1660. port = to_cxl_port(cxld->dev.parent);
  1661. guard(device)(&port->dev);
  1662. return cxl_decoder_add_locked(cxld, target_map);
  1663. }
  1664. EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
  1665. static void cxld_unregister(void *dev)
  1666. {
  1667. struct cxl_endpoint_decoder *cxled;
  1668. if (is_endpoint_decoder(dev)) {
  1669. cxled = to_cxl_endpoint_decoder(dev);
  1670. cxl_decoder_kill_region(cxled);
  1671. }
  1672. device_unregister(dev);
  1673. }
  1674. int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
  1675. {
  1676. return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
  1677. }
  1678. EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
  1679. /**
  1680. * __cxl_driver_register - register a driver for the cxl bus
  1681. * @cxl_drv: cxl driver structure to attach
  1682. * @owner: owning module/driver
  1683. * @modname: KBUILD_MODNAME for parent driver
  1684. */
  1685. int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
  1686. const char *modname)
  1687. {
  1688. if (!cxl_drv->probe) {
  1689. pr_debug("%s ->probe() must be specified\n", modname);
  1690. return -EINVAL;
  1691. }
  1692. if (!cxl_drv->name) {
  1693. pr_debug("%s ->name must be specified\n", modname);
  1694. return -EINVAL;
  1695. }
  1696. if (!cxl_drv->id) {
  1697. pr_debug("%s ->id must be specified\n", modname);
  1698. return -EINVAL;
  1699. }
  1700. cxl_drv->drv.bus = &cxl_bus_type;
  1701. cxl_drv->drv.owner = owner;
  1702. cxl_drv->drv.mod_name = modname;
  1703. cxl_drv->drv.name = cxl_drv->name;
  1704. return driver_register(&cxl_drv->drv);
  1705. }
  1706. EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
  1707. void cxl_driver_unregister(struct cxl_driver *cxl_drv)
  1708. {
  1709. driver_unregister(&cxl_drv->drv);
  1710. }
  1711. EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
  1712. static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
  1713. {
  1714. return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
  1715. cxl_device_id(dev));
  1716. }
  1717. static int cxl_bus_match(struct device *dev, const struct device_driver *drv)
  1718. {
  1719. return cxl_device_id(dev) == to_cxl_drv(drv)->id;
  1720. }
  1721. static int cxl_bus_probe(struct device *dev)
  1722. {
  1723. int rc;
  1724. rc = to_cxl_drv(dev->driver)->probe(dev);
  1725. dev_dbg(dev, "probe: %d\n", rc);
  1726. return rc;
  1727. }
  1728. static void cxl_bus_remove(struct device *dev)
  1729. {
  1730. struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
  1731. if (cxl_drv->remove)
  1732. cxl_drv->remove(dev);
  1733. }
  1734. static struct workqueue_struct *cxl_bus_wq;
  1735. static int cxl_rescan_attach(struct device *dev, void *data)
  1736. {
  1737. int rc = device_attach(dev);
  1738. dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached");
  1739. return 0;
  1740. }
  1741. static void cxl_bus_rescan_queue(struct work_struct *w)
  1742. {
  1743. bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach);
  1744. }
  1745. void cxl_bus_rescan(void)
  1746. {
  1747. static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
  1748. queue_work(cxl_bus_wq, &rescan_work);
  1749. }
  1750. EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
  1751. void cxl_bus_drain(void)
  1752. {
  1753. drain_workqueue(cxl_bus_wq);
  1754. }
  1755. EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
  1756. bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
  1757. {
  1758. return queue_work(cxl_bus_wq, &cxlmd->detach_work);
  1759. }
  1760. EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
  1761. static void add_latency(struct access_coordinate *c, long latency)
  1762. {
  1763. for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
  1764. c[i].write_latency += latency;
  1765. c[i].read_latency += latency;
  1766. }
  1767. }
  1768. static bool coordinates_valid(struct access_coordinate *c)
  1769. {
  1770. for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
  1771. if (c[i].read_bandwidth && c[i].write_bandwidth &&
  1772. c[i].read_latency && c[i].write_latency)
  1773. continue;
  1774. return false;
  1775. }
  1776. return true;
  1777. }
  1778. static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
  1779. {
  1780. for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
  1781. c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
  1782. c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
  1783. }
  1784. }
  1785. static void set_access_coordinates(struct access_coordinate *out,
  1786. struct access_coordinate *in)
  1787. {
  1788. for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
  1789. out[i] = in[i];
  1790. }
  1791. static bool parent_port_is_cxl_root(struct cxl_port *port)
  1792. {
  1793. return is_cxl_root(to_cxl_port(port->dev.parent));
  1794. }
  1795. /**
  1796. * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
  1797. * of CXL path
  1798. * @port: endpoint cxl_port
  1799. * @coord: output performance data
  1800. *
  1801. * Return: errno on failure, 0 on success.
  1802. */
  1803. int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
  1804. struct access_coordinate *coord)
  1805. {
  1806. struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
  1807. struct access_coordinate c[] = {
  1808. {
  1809. .read_bandwidth = UINT_MAX,
  1810. .write_bandwidth = UINT_MAX,
  1811. },
  1812. {
  1813. .read_bandwidth = UINT_MAX,
  1814. .write_bandwidth = UINT_MAX,
  1815. },
  1816. };
  1817. struct cxl_port *iter = port;
  1818. struct cxl_dport *dport;
  1819. struct pci_dev *pdev;
  1820. struct device *dev;
  1821. unsigned int bw;
  1822. bool is_cxl_root;
  1823. if (!is_cxl_endpoint(port))
  1824. return -EINVAL;
  1825. /*
  1826. * Skip calculation for RCD. Expectation is HMAT already covers RCD case
  1827. * since RCH does not support hotplug.
  1828. */
  1829. if (cxlmd->cxlds->rcd)
  1830. return 0;
  1831. /*
  1832. * Exit the loop when the parent port of the current iter port is cxl
  1833. * root. The iterative loop starts at the endpoint and gathers the
  1834. * latency of the CXL link from the current device/port to the connected
  1835. * downstream port each iteration.
  1836. */
  1837. do {
  1838. dport = iter->parent_dport;
  1839. iter = to_cxl_port(iter->dev.parent);
  1840. is_cxl_root = parent_port_is_cxl_root(iter);
  1841. /*
  1842. * There's no valid access_coordinate for a root port since RPs do not
  1843. * have CDAT and therefore needs to be skipped.
  1844. */
  1845. if (!is_cxl_root) {
  1846. if (!coordinates_valid(dport->coord))
  1847. return -EINVAL;
  1848. cxl_coordinates_combine(c, c, dport->coord);
  1849. }
  1850. add_latency(c, dport->link_latency);
  1851. } while (!is_cxl_root);
  1852. dport = iter->parent_dport;
  1853. /* Retrieve HB coords */
  1854. if (!coordinates_valid(dport->coord))
  1855. return -EINVAL;
  1856. cxl_coordinates_combine(c, c, dport->coord);
  1857. dev = port->uport_dev->parent;
  1858. if (!dev_is_pci(dev))
  1859. return -ENODEV;
  1860. /* Get the calculated PCI paths bandwidth */
  1861. pdev = to_pci_dev(dev);
  1862. bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
  1863. if (bw == 0)
  1864. return -ENXIO;
  1865. bw /= BITS_PER_BYTE;
  1866. set_min_bandwidth(c, bw);
  1867. set_access_coordinates(coord, c);
  1868. return 0;
  1869. }
  1870. EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
  1871. int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
  1872. struct access_coordinate *c)
  1873. {
  1874. struct cxl_dport *dport = port->parent_dport;
  1875. /* Check this port is connected to a switch DSP and not an RP */
  1876. if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent)))
  1877. return -ENODEV;
  1878. if (!coordinates_valid(dport->coord))
  1879. return -EINVAL;
  1880. for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
  1881. c[i].read_bandwidth = dport->coord[i].read_bandwidth;
  1882. c[i].write_bandwidth = dport->coord[i].write_bandwidth;
  1883. }
  1884. return 0;
  1885. }
  1886. /* for user tooling to ensure port disable work has completed */
  1887. static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
  1888. {
  1889. if (sysfs_streq(buf, "1")) {
  1890. flush_workqueue(cxl_bus_wq);
  1891. return count;
  1892. }
  1893. return -EINVAL;
  1894. }
  1895. static BUS_ATTR_WO(flush);
  1896. static struct attribute *cxl_bus_attributes[] = {
  1897. &bus_attr_flush.attr,
  1898. NULL,
  1899. };
  1900. static struct attribute_group cxl_bus_attribute_group = {
  1901. .attrs = cxl_bus_attributes,
  1902. };
  1903. static const struct attribute_group *cxl_bus_attribute_groups[] = {
  1904. &cxl_bus_attribute_group,
  1905. NULL,
  1906. };
  1907. struct bus_type cxl_bus_type = {
  1908. .name = "cxl",
  1909. .uevent = cxl_bus_uevent,
  1910. .match = cxl_bus_match,
  1911. .probe = cxl_bus_probe,
  1912. .remove = cxl_bus_remove,
  1913. .bus_groups = cxl_bus_attribute_groups,
  1914. };
  1915. EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
  1916. static struct dentry *cxl_debugfs;
  1917. struct dentry *cxl_debugfs_create_dir(const char *dir)
  1918. {
  1919. return debugfs_create_dir(dir, cxl_debugfs);
  1920. }
  1921. EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
  1922. static __init int cxl_core_init(void)
  1923. {
  1924. int rc;
  1925. cxl_debugfs = debugfs_create_dir("cxl", NULL);
  1926. if (einj_cxl_is_initialized())
  1927. debugfs_create_file("einj_types", 0400, cxl_debugfs, NULL,
  1928. &einj_cxl_available_error_type_fops);
  1929. cxl_mbox_init();
  1930. rc = cxl_memdev_init();
  1931. if (rc)
  1932. return rc;
  1933. cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
  1934. if (!cxl_bus_wq) {
  1935. rc = -ENOMEM;
  1936. goto err_wq;
  1937. }
  1938. rc = bus_register(&cxl_bus_type);
  1939. if (rc)
  1940. goto err_bus;
  1941. rc = cxl_region_init();
  1942. if (rc)
  1943. goto err_region;
  1944. return 0;
  1945. err_region:
  1946. bus_unregister(&cxl_bus_type);
  1947. err_bus:
  1948. destroy_workqueue(cxl_bus_wq);
  1949. err_wq:
  1950. cxl_memdev_exit();
  1951. return rc;
  1952. }
  1953. static void cxl_core_exit(void)
  1954. {
  1955. cxl_region_exit();
  1956. bus_unregister(&cxl_bus_type);
  1957. destroy_workqueue(cxl_bus_wq);
  1958. cxl_memdev_exit();
  1959. debugfs_remove_recursive(cxl_debugfs);
  1960. }
  1961. subsys_initcall(cxl_core_init);
  1962. module_exit(cxl_core_exit);
  1963. MODULE_DESCRIPTION("CXL: Core Compute Express Link support");
  1964. MODULE_LICENSE("GPL v2");
  1965. MODULE_IMPORT_NS(CXL);