pci-sysfs.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
  4. * (C) Copyright 2002-2004 IBM Corp.
  5. * (C) Copyright 2003 Matthew Wilcox
  6. * (C) Copyright 2003 Hewlett-Packard
  7. * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
  8. * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
  9. *
  10. * File attributes for PCI devices
  11. *
  12. * Modeled after usb's driverfs.c
  13. */
  14. #include <linux/bitfield.h>
  15. #include <linux/kernel.h>
  16. #include <linux/sched.h>
  17. #include <linux/pci.h>
  18. #include <linux/stat.h>
  19. #include <linux/export.h>
  20. #include <linux/topology.h>
  21. #include <linux/mm.h>
  22. #include <linux/fs.h>
  23. #include <linux/capability.h>
  24. #include <linux/security.h>
  25. #include <linux/slab.h>
  26. #include <linux/vgaarb.h>
  27. #include <linux/pm_runtime.h>
  28. #include <linux/msi.h>
  29. #include <linux/of.h>
  30. #include <linux/aperture.h>
  31. #include "pci.h"
  32. #ifndef ARCH_PCI_DEV_GROUPS
  33. #define ARCH_PCI_DEV_GROUPS
  34. #endif
  35. static int sysfs_initialized; /* = 0 */
  36. /* show configuration fields */
  37. #define pci_config_attr(field, format_string) \
  38. static ssize_t \
  39. field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
  40. { \
  41. struct pci_dev *pdev; \
  42. \
  43. pdev = to_pci_dev(dev); \
  44. return sysfs_emit(buf, format_string, pdev->field); \
  45. } \
  46. static DEVICE_ATTR_RO(field)
  47. pci_config_attr(vendor, "0x%04x\n");
  48. pci_config_attr(device, "0x%04x\n");
  49. pci_config_attr(subsystem_vendor, "0x%04x\n");
  50. pci_config_attr(subsystem_device, "0x%04x\n");
  51. pci_config_attr(revision, "0x%02x\n");
  52. pci_config_attr(class, "0x%06x\n");
  53. static ssize_t irq_show(struct device *dev,
  54. struct device_attribute *attr,
  55. char *buf)
  56. {
  57. struct pci_dev *pdev = to_pci_dev(dev);
  58. #ifdef CONFIG_PCI_MSI
  59. /*
  60. * For MSI, show the first MSI IRQ; for all other cases including
  61. * MSI-X, show the legacy INTx IRQ.
  62. */
  63. if (pdev->msi_enabled)
  64. return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
  65. #endif
  66. return sysfs_emit(buf, "%u\n", pdev->irq);
  67. }
  68. static DEVICE_ATTR_RO(irq);
  69. static ssize_t broken_parity_status_show(struct device *dev,
  70. struct device_attribute *attr,
  71. char *buf)
  72. {
  73. struct pci_dev *pdev = to_pci_dev(dev);
  74. return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
  75. }
  76. static ssize_t broken_parity_status_store(struct device *dev,
  77. struct device_attribute *attr,
  78. const char *buf, size_t count)
  79. {
  80. struct pci_dev *pdev = to_pci_dev(dev);
  81. unsigned long val;
  82. if (kstrtoul(buf, 0, &val) < 0)
  83. return -EINVAL;
  84. pdev->broken_parity_status = !!val;
  85. return count;
  86. }
  87. static DEVICE_ATTR_RW(broken_parity_status);
  88. static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
  89. struct device_attribute *attr, char *buf)
  90. {
  91. const struct cpumask *mask;
  92. #ifdef CONFIG_NUMA
  93. if (dev_to_node(dev) == NUMA_NO_NODE)
  94. mask = cpu_online_mask;
  95. else
  96. mask = cpumask_of_node(dev_to_node(dev));
  97. #else
  98. mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
  99. #endif
  100. return cpumap_print_to_pagebuf(list, buf, mask);
  101. }
  102. static ssize_t local_cpus_show(struct device *dev,
  103. struct device_attribute *attr, char *buf)
  104. {
  105. return pci_dev_show_local_cpu(dev, false, attr, buf);
  106. }
  107. static DEVICE_ATTR_RO(local_cpus);
  108. static ssize_t local_cpulist_show(struct device *dev,
  109. struct device_attribute *attr, char *buf)
  110. {
  111. return pci_dev_show_local_cpu(dev, true, attr, buf);
  112. }
  113. static DEVICE_ATTR_RO(local_cpulist);
  114. /*
  115. * PCI Bus Class Devices
  116. */
  117. static ssize_t cpuaffinity_show(struct device *dev,
  118. struct device_attribute *attr, char *buf)
  119. {
  120. const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
  121. return cpumap_print_to_pagebuf(false, buf, cpumask);
  122. }
  123. static DEVICE_ATTR_RO(cpuaffinity);
  124. static ssize_t cpulistaffinity_show(struct device *dev,
  125. struct device_attribute *attr, char *buf)
  126. {
  127. const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
  128. return cpumap_print_to_pagebuf(true, buf, cpumask);
  129. }
  130. static DEVICE_ATTR_RO(cpulistaffinity);
  131. static ssize_t power_state_show(struct device *dev,
  132. struct device_attribute *attr, char *buf)
  133. {
  134. struct pci_dev *pdev = to_pci_dev(dev);
  135. return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
  136. }
  137. static DEVICE_ATTR_RO(power_state);
  138. /* show resources */
  139. static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
  140. char *buf)
  141. {
  142. struct pci_dev *pci_dev = to_pci_dev(dev);
  143. int i;
  144. int max;
  145. resource_size_t start, end;
  146. size_t len = 0;
  147. if (pci_dev->subordinate)
  148. max = DEVICE_COUNT_RESOURCE;
  149. else
  150. max = PCI_BRIDGE_RESOURCES;
  151. for (i = 0; i < max; i++) {
  152. struct resource *res = &pci_dev->resource[i];
  153. pci_resource_to_user(pci_dev, i, res, &start, &end);
  154. len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
  155. (unsigned long long)start,
  156. (unsigned long long)end,
  157. (unsigned long long)res->flags);
  158. }
  159. return len;
  160. }
  161. static DEVICE_ATTR_RO(resource);
  162. static ssize_t max_link_speed_show(struct device *dev,
  163. struct device_attribute *attr, char *buf)
  164. {
  165. struct pci_dev *pdev = to_pci_dev(dev);
  166. return sysfs_emit(buf, "%s\n",
  167. pci_speed_string(pcie_get_speed_cap(pdev)));
  168. }
  169. static DEVICE_ATTR_RO(max_link_speed);
  170. static ssize_t max_link_width_show(struct device *dev,
  171. struct device_attribute *attr, char *buf)
  172. {
  173. struct pci_dev *pdev = to_pci_dev(dev);
  174. ssize_t ret;
  175. /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
  176. pci_config_pm_runtime_get(pdev);
  177. ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
  178. pci_config_pm_runtime_put(pdev);
  179. return ret;
  180. }
  181. static DEVICE_ATTR_RO(max_link_width);
  182. static ssize_t current_link_speed_show(struct device *dev,
  183. struct device_attribute *attr, char *buf)
  184. {
  185. struct pci_dev *pci_dev = to_pci_dev(dev);
  186. u16 linkstat;
  187. int err;
  188. enum pci_bus_speed speed;
  189. pci_config_pm_runtime_get(pci_dev);
  190. err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
  191. pci_config_pm_runtime_put(pci_dev);
  192. if (err)
  193. return -EINVAL;
  194. speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
  195. return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
  196. }
  197. static DEVICE_ATTR_RO(current_link_speed);
  198. static ssize_t current_link_width_show(struct device *dev,
  199. struct device_attribute *attr, char *buf)
  200. {
  201. struct pci_dev *pci_dev = to_pci_dev(dev);
  202. u16 linkstat;
  203. int err;
  204. pci_config_pm_runtime_get(pci_dev);
  205. err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
  206. pci_config_pm_runtime_put(pci_dev);
  207. if (err)
  208. return -EINVAL;
  209. return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
  210. }
  211. static DEVICE_ATTR_RO(current_link_width);
  212. static ssize_t secondary_bus_number_show(struct device *dev,
  213. struct device_attribute *attr,
  214. char *buf)
  215. {
  216. struct pci_dev *pci_dev = to_pci_dev(dev);
  217. u8 sec_bus;
  218. int err;
  219. pci_config_pm_runtime_get(pci_dev);
  220. err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
  221. pci_config_pm_runtime_put(pci_dev);
  222. if (err)
  223. return -EINVAL;
  224. return sysfs_emit(buf, "%u\n", sec_bus);
  225. }
  226. static DEVICE_ATTR_RO(secondary_bus_number);
  227. static ssize_t subordinate_bus_number_show(struct device *dev,
  228. struct device_attribute *attr,
  229. char *buf)
  230. {
  231. struct pci_dev *pci_dev = to_pci_dev(dev);
  232. u8 sub_bus;
  233. int err;
  234. pci_config_pm_runtime_get(pci_dev);
  235. err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
  236. pci_config_pm_runtime_put(pci_dev);
  237. if (err)
  238. return -EINVAL;
  239. return sysfs_emit(buf, "%u\n", sub_bus);
  240. }
  241. static DEVICE_ATTR_RO(subordinate_bus_number);
  242. static ssize_t ari_enabled_show(struct device *dev,
  243. struct device_attribute *attr,
  244. char *buf)
  245. {
  246. struct pci_dev *pci_dev = to_pci_dev(dev);
  247. return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
  248. }
  249. static DEVICE_ATTR_RO(ari_enabled);
  250. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  251. char *buf)
  252. {
  253. struct pci_dev *pci_dev = to_pci_dev(dev);
  254. return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
  255. pci_dev->vendor, pci_dev->device,
  256. pci_dev->subsystem_vendor, pci_dev->subsystem_device,
  257. (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
  258. (u8)(pci_dev->class));
  259. }
  260. static DEVICE_ATTR_RO(modalias);
  261. static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
  262. const char *buf, size_t count)
  263. {
  264. struct pci_dev *pdev = to_pci_dev(dev);
  265. unsigned long val;
  266. ssize_t result = 0;
  267. /* this can crash the machine when done on the "wrong" device */
  268. if (!capable(CAP_SYS_ADMIN))
  269. return -EPERM;
  270. if (kstrtoul(buf, 0, &val) < 0)
  271. return -EINVAL;
  272. device_lock(dev);
  273. if (dev->driver)
  274. result = -EBUSY;
  275. else if (val)
  276. result = pci_enable_device(pdev);
  277. else if (pci_is_enabled(pdev))
  278. pci_disable_device(pdev);
  279. else
  280. result = -EIO;
  281. device_unlock(dev);
  282. return result < 0 ? result : count;
  283. }
  284. static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
  285. char *buf)
  286. {
  287. struct pci_dev *pdev;
  288. pdev = to_pci_dev(dev);
  289. return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
  290. }
  291. static DEVICE_ATTR_RW(enable);
  292. #ifdef CONFIG_NUMA
  293. static ssize_t numa_node_store(struct device *dev,
  294. struct device_attribute *attr, const char *buf,
  295. size_t count)
  296. {
  297. struct pci_dev *pdev = to_pci_dev(dev);
  298. int node;
  299. if (!capable(CAP_SYS_ADMIN))
  300. return -EPERM;
  301. if (kstrtoint(buf, 0, &node) < 0)
  302. return -EINVAL;
  303. if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
  304. return -EINVAL;
  305. if (node != NUMA_NO_NODE && !node_online(node))
  306. return -EINVAL;
  307. add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
  308. pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
  309. node);
  310. dev->numa_node = node;
  311. return count;
  312. }
  313. static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
  314. char *buf)
  315. {
  316. return sysfs_emit(buf, "%d\n", dev->numa_node);
  317. }
  318. static DEVICE_ATTR_RW(numa_node);
  319. #endif
  320. static ssize_t dma_mask_bits_show(struct device *dev,
  321. struct device_attribute *attr, char *buf)
  322. {
  323. struct pci_dev *pdev = to_pci_dev(dev);
  324. return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
  325. }
  326. static DEVICE_ATTR_RO(dma_mask_bits);
  327. static ssize_t consistent_dma_mask_bits_show(struct device *dev,
  328. struct device_attribute *attr,
  329. char *buf)
  330. {
  331. return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
  332. }
  333. static DEVICE_ATTR_RO(consistent_dma_mask_bits);
  334. static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
  335. char *buf)
  336. {
  337. struct pci_dev *pdev = to_pci_dev(dev);
  338. struct pci_bus *subordinate = pdev->subordinate;
  339. return sysfs_emit(buf, "%u\n", subordinate ?
  340. !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  341. : !pdev->no_msi);
  342. }
  343. static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
  344. const char *buf, size_t count)
  345. {
  346. struct pci_dev *pdev = to_pci_dev(dev);
  347. struct pci_bus *subordinate = pdev->subordinate;
  348. unsigned long val;
  349. if (!capable(CAP_SYS_ADMIN))
  350. return -EPERM;
  351. if (kstrtoul(buf, 0, &val) < 0)
  352. return -EINVAL;
  353. /*
  354. * "no_msi" and "bus_flags" only affect what happens when a driver
  355. * requests MSI or MSI-X. They don't affect any drivers that have
  356. * already requested MSI or MSI-X.
  357. */
  358. if (!subordinate) {
  359. pdev->no_msi = !val;
  360. pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
  361. val ? "allowed" : "disallowed");
  362. return count;
  363. }
  364. if (val)
  365. subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
  366. else
  367. subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
  368. dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
  369. val ? "allowed" : "disallowed");
  370. return count;
  371. }
  372. static DEVICE_ATTR_RW(msi_bus);
  373. static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
  374. {
  375. unsigned long val;
  376. struct pci_bus *b = NULL;
  377. if (kstrtoul(buf, 0, &val) < 0)
  378. return -EINVAL;
  379. if (val) {
  380. pci_lock_rescan_remove();
  381. while ((b = pci_find_next_bus(b)) != NULL)
  382. pci_rescan_bus(b);
  383. pci_unlock_rescan_remove();
  384. }
  385. return count;
  386. }
  387. static BUS_ATTR_WO(rescan);
  388. static struct attribute *pci_bus_attrs[] = {
  389. &bus_attr_rescan.attr,
  390. NULL,
  391. };
  392. static const struct attribute_group pci_bus_group = {
  393. .attrs = pci_bus_attrs,
  394. };
  395. const struct attribute_group *pci_bus_groups[] = {
  396. &pci_bus_group,
  397. NULL,
  398. };
  399. static ssize_t dev_rescan_store(struct device *dev,
  400. struct device_attribute *attr, const char *buf,
  401. size_t count)
  402. {
  403. unsigned long val;
  404. struct pci_dev *pdev = to_pci_dev(dev);
  405. if (kstrtoul(buf, 0, &val) < 0)
  406. return -EINVAL;
  407. if (val) {
  408. pci_lock_rescan_remove();
  409. pci_rescan_bus(pdev->bus);
  410. pci_unlock_rescan_remove();
  411. }
  412. return count;
  413. }
  414. static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
  415. dev_rescan_store);
  416. static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
  417. const char *buf, size_t count)
  418. {
  419. unsigned long val;
  420. if (kstrtoul(buf, 0, &val) < 0)
  421. return -EINVAL;
  422. if (val && device_remove_file_self(dev, attr))
  423. pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
  424. return count;
  425. }
  426. static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
  427. remove_store);
  428. static ssize_t bus_rescan_store(struct device *dev,
  429. struct device_attribute *attr,
  430. const char *buf, size_t count)
  431. {
  432. unsigned long val;
  433. struct pci_bus *bus = to_pci_bus(dev);
  434. if (kstrtoul(buf, 0, &val) < 0)
  435. return -EINVAL;
  436. if (val) {
  437. pci_lock_rescan_remove();
  438. if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
  439. pci_rescan_bus_bridge_resize(bus->self);
  440. else
  441. pci_rescan_bus(bus);
  442. pci_unlock_rescan_remove();
  443. }
  444. return count;
  445. }
  446. static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
  447. bus_rescan_store);
  448. static ssize_t reset_subordinate_store(struct device *dev,
  449. struct device_attribute *attr,
  450. const char *buf, size_t count)
  451. {
  452. struct pci_dev *pdev = to_pci_dev(dev);
  453. struct pci_bus *bus = pdev->subordinate;
  454. unsigned long val;
  455. if (!capable(CAP_SYS_ADMIN))
  456. return -EPERM;
  457. if (kstrtoul(buf, 0, &val) < 0)
  458. return -EINVAL;
  459. if (val) {
  460. int ret = __pci_reset_bus(bus);
  461. if (ret)
  462. return ret;
  463. }
  464. return count;
  465. }
  466. static DEVICE_ATTR_WO(reset_subordinate);
  467. #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
  468. static ssize_t d3cold_allowed_store(struct device *dev,
  469. struct device_attribute *attr,
  470. const char *buf, size_t count)
  471. {
  472. struct pci_dev *pdev = to_pci_dev(dev);
  473. unsigned long val;
  474. if (kstrtoul(buf, 0, &val) < 0)
  475. return -EINVAL;
  476. pdev->d3cold_allowed = !!val;
  477. pci_bridge_d3_update(pdev);
  478. pm_runtime_resume(dev);
  479. return count;
  480. }
  481. static ssize_t d3cold_allowed_show(struct device *dev,
  482. struct device_attribute *attr, char *buf)
  483. {
  484. struct pci_dev *pdev = to_pci_dev(dev);
  485. return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
  486. }
  487. static DEVICE_ATTR_RW(d3cold_allowed);
  488. #endif
  489. #ifdef CONFIG_OF
  490. static ssize_t devspec_show(struct device *dev,
  491. struct device_attribute *attr, char *buf)
  492. {
  493. struct pci_dev *pdev = to_pci_dev(dev);
  494. struct device_node *np = pci_device_to_OF_node(pdev);
  495. if (np == NULL)
  496. return 0;
  497. return sysfs_emit(buf, "%pOF\n", np);
  498. }
  499. static DEVICE_ATTR_RO(devspec);
  500. #endif
  501. static ssize_t driver_override_store(struct device *dev,
  502. struct device_attribute *attr,
  503. const char *buf, size_t count)
  504. {
  505. struct pci_dev *pdev = to_pci_dev(dev);
  506. int ret;
  507. ret = driver_set_override(dev, &pdev->driver_override, buf, count);
  508. if (ret)
  509. return ret;
  510. return count;
  511. }
  512. static ssize_t driver_override_show(struct device *dev,
  513. struct device_attribute *attr, char *buf)
  514. {
  515. struct pci_dev *pdev = to_pci_dev(dev);
  516. ssize_t len;
  517. device_lock(dev);
  518. len = sysfs_emit(buf, "%s\n", pdev->driver_override);
  519. device_unlock(dev);
  520. return len;
  521. }
  522. static DEVICE_ATTR_RW(driver_override);
  523. static struct attribute *pci_dev_attrs[] = {
  524. &dev_attr_power_state.attr,
  525. &dev_attr_resource.attr,
  526. &dev_attr_vendor.attr,
  527. &dev_attr_device.attr,
  528. &dev_attr_subsystem_vendor.attr,
  529. &dev_attr_subsystem_device.attr,
  530. &dev_attr_revision.attr,
  531. &dev_attr_class.attr,
  532. &dev_attr_irq.attr,
  533. &dev_attr_local_cpus.attr,
  534. &dev_attr_local_cpulist.attr,
  535. &dev_attr_modalias.attr,
  536. #ifdef CONFIG_NUMA
  537. &dev_attr_numa_node.attr,
  538. #endif
  539. &dev_attr_dma_mask_bits.attr,
  540. &dev_attr_consistent_dma_mask_bits.attr,
  541. &dev_attr_enable.attr,
  542. &dev_attr_broken_parity_status.attr,
  543. &dev_attr_msi_bus.attr,
  544. #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
  545. &dev_attr_d3cold_allowed.attr,
  546. #endif
  547. #ifdef CONFIG_OF
  548. &dev_attr_devspec.attr,
  549. #endif
  550. &dev_attr_driver_override.attr,
  551. &dev_attr_ari_enabled.attr,
  552. NULL,
  553. };
  554. static struct attribute *pci_bridge_attrs[] = {
  555. &dev_attr_subordinate_bus_number.attr,
  556. &dev_attr_secondary_bus_number.attr,
  557. &dev_attr_reset_subordinate.attr,
  558. NULL,
  559. };
  560. static struct attribute *pcie_dev_attrs[] = {
  561. &dev_attr_current_link_speed.attr,
  562. &dev_attr_current_link_width.attr,
  563. &dev_attr_max_link_width.attr,
  564. &dev_attr_max_link_speed.attr,
  565. NULL,
  566. };
  567. static struct attribute *pcibus_attrs[] = {
  568. &dev_attr_bus_rescan.attr,
  569. &dev_attr_cpuaffinity.attr,
  570. &dev_attr_cpulistaffinity.attr,
  571. NULL,
  572. };
  573. static const struct attribute_group pcibus_group = {
  574. .attrs = pcibus_attrs,
  575. };
  576. const struct attribute_group *pcibus_groups[] = {
  577. &pcibus_group,
  578. NULL,
  579. };
  580. static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
  581. char *buf)
  582. {
  583. struct pci_dev *pdev = to_pci_dev(dev);
  584. struct pci_dev *vga_dev = vga_default_device();
  585. if (vga_dev)
  586. return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
  587. return sysfs_emit(buf, "%u\n",
  588. !!(pdev->resource[PCI_ROM_RESOURCE].flags &
  589. IORESOURCE_ROM_SHADOW));
  590. }
  591. static DEVICE_ATTR_RO(boot_vga);
  592. static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
  593. struct bin_attribute *bin_attr, char *buf,
  594. loff_t off, size_t count)
  595. {
  596. struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
  597. unsigned int size = 64;
  598. loff_t init_off = off;
  599. u8 *data = (u8 *) buf;
  600. /* Several chips lock up trying to read undefined config space */
  601. if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
  602. size = dev->cfg_size;
  603. else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
  604. size = 128;
  605. if (off > size)
  606. return 0;
  607. if (off + count > size) {
  608. size -= off;
  609. count = size;
  610. } else {
  611. size = count;
  612. }
  613. pci_config_pm_runtime_get(dev);
  614. if ((off & 1) && size) {
  615. u8 val;
  616. pci_user_read_config_byte(dev, off, &val);
  617. data[off - init_off] = val;
  618. off++;
  619. size--;
  620. }
  621. if ((off & 3) && size > 2) {
  622. u16 val;
  623. pci_user_read_config_word(dev, off, &val);
  624. data[off - init_off] = val & 0xff;
  625. data[off - init_off + 1] = (val >> 8) & 0xff;
  626. off += 2;
  627. size -= 2;
  628. }
  629. while (size > 3) {
  630. u32 val;
  631. pci_user_read_config_dword(dev, off, &val);
  632. data[off - init_off] = val & 0xff;
  633. data[off - init_off + 1] = (val >> 8) & 0xff;
  634. data[off - init_off + 2] = (val >> 16) & 0xff;
  635. data[off - init_off + 3] = (val >> 24) & 0xff;
  636. off += 4;
  637. size -= 4;
  638. cond_resched();
  639. }
  640. if (size >= 2) {
  641. u16 val;
  642. pci_user_read_config_word(dev, off, &val);
  643. data[off - init_off] = val & 0xff;
  644. data[off - init_off + 1] = (val >> 8) & 0xff;
  645. off += 2;
  646. size -= 2;
  647. }
  648. if (size > 0) {
  649. u8 val;
  650. pci_user_read_config_byte(dev, off, &val);
  651. data[off - init_off] = val;
  652. }
  653. pci_config_pm_runtime_put(dev);
  654. return count;
  655. }
  656. static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
  657. struct bin_attribute *bin_attr, char *buf,
  658. loff_t off, size_t count)
  659. {
  660. struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
  661. unsigned int size = count;
  662. loff_t init_off = off;
  663. u8 *data = (u8 *) buf;
  664. int ret;
  665. ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
  666. if (ret)
  667. return ret;
  668. if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
  669. count)) {
  670. pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
  671. current->comm, off);
  672. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  673. }
  674. if (off > dev->cfg_size)
  675. return 0;
  676. if (off + count > dev->cfg_size) {
  677. size = dev->cfg_size - off;
  678. count = size;
  679. }
  680. pci_config_pm_runtime_get(dev);
  681. if ((off & 1) && size) {
  682. pci_user_write_config_byte(dev, off, data[off - init_off]);
  683. off++;
  684. size--;
  685. }
  686. if ((off & 3) && size > 2) {
  687. u16 val = data[off - init_off];
  688. val |= (u16) data[off - init_off + 1] << 8;
  689. pci_user_write_config_word(dev, off, val);
  690. off += 2;
  691. size -= 2;
  692. }
  693. while (size > 3) {
  694. u32 val = data[off - init_off];
  695. val |= (u32) data[off - init_off + 1] << 8;
  696. val |= (u32) data[off - init_off + 2] << 16;
  697. val |= (u32) data[off - init_off + 3] << 24;
  698. pci_user_write_config_dword(dev, off, val);
  699. off += 4;
  700. size -= 4;
  701. }
  702. if (size >= 2) {
  703. u16 val = data[off - init_off];
  704. val |= (u16) data[off - init_off + 1] << 8;
  705. pci_user_write_config_word(dev, off, val);
  706. off += 2;
  707. size -= 2;
  708. }
  709. if (size)
  710. pci_user_write_config_byte(dev, off, data[off - init_off]);
  711. pci_config_pm_runtime_put(dev);
  712. return count;
  713. }
  714. static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
  715. static struct bin_attribute *pci_dev_config_attrs[] = {
  716. &bin_attr_config,
  717. NULL,
  718. };
  719. static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
  720. struct bin_attribute *a, int n)
  721. {
  722. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  723. a->size = PCI_CFG_SPACE_SIZE;
  724. if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
  725. a->size = PCI_CFG_SPACE_EXP_SIZE;
  726. return a->attr.mode;
  727. }
  728. static const struct attribute_group pci_dev_config_attr_group = {
  729. .bin_attrs = pci_dev_config_attrs,
  730. .is_bin_visible = pci_dev_config_attr_is_visible,
  731. };
  732. /*
  733. * llseek operation for mmappable PCI resources.
  734. * May be left unused if the arch doesn't provide them.
  735. */
  736. static __maybe_unused loff_t
  737. pci_llseek_resource(struct file *filep,
  738. struct kobject *kobj __always_unused,
  739. struct bin_attribute *attr,
  740. loff_t offset, int whence)
  741. {
  742. return fixed_size_llseek(filep, offset, whence, attr->size);
  743. }
  744. #ifdef HAVE_PCI_LEGACY
  745. /**
  746. * pci_read_legacy_io - read byte(s) from legacy I/O port space
  747. * @filp: open sysfs file
  748. * @kobj: kobject corresponding to file to read from
  749. * @bin_attr: struct bin_attribute for this file
  750. * @buf: buffer to store results
  751. * @off: offset into legacy I/O port space
  752. * @count: number of bytes to read
  753. *
  754. * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
  755. * callback routine (pci_legacy_read).
  756. */
  757. static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
  758. struct bin_attribute *bin_attr, char *buf,
  759. loff_t off, size_t count)
  760. {
  761. struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
  762. /* Only support 1, 2 or 4 byte accesses */
  763. if (count != 1 && count != 2 && count != 4)
  764. return -EINVAL;
  765. return pci_legacy_read(bus, off, (u32 *)buf, count);
  766. }
  767. /**
  768. * pci_write_legacy_io - write byte(s) to legacy I/O port space
  769. * @filp: open sysfs file
  770. * @kobj: kobject corresponding to file to read from
  771. * @bin_attr: struct bin_attribute for this file
  772. * @buf: buffer containing value to be written
  773. * @off: offset into legacy I/O port space
  774. * @count: number of bytes to write
  775. *
  776. * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
  777. * callback routine (pci_legacy_write).
  778. */
  779. static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
  780. struct bin_attribute *bin_attr, char *buf,
  781. loff_t off, size_t count)
  782. {
  783. struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
  784. /* Only support 1, 2 or 4 byte accesses */
  785. if (count != 1 && count != 2 && count != 4)
  786. return -EINVAL;
  787. return pci_legacy_write(bus, off, *(u32 *)buf, count);
  788. }
  789. /**
  790. * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
  791. * @filp: open sysfs file
  792. * @kobj: kobject corresponding to device to be mapped
  793. * @attr: struct bin_attribute for this file
  794. * @vma: struct vm_area_struct passed to mmap
  795. *
  796. * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
  797. * legacy memory space (first meg of bus space) into application virtual
  798. * memory space.
  799. */
  800. static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
  801. struct bin_attribute *attr,
  802. struct vm_area_struct *vma)
  803. {
  804. struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
  805. return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
  806. }
  807. /**
  808. * pci_mmap_legacy_io - map legacy PCI IO into user memory space
  809. * @filp: open sysfs file
  810. * @kobj: kobject corresponding to device to be mapped
  811. * @attr: struct bin_attribute for this file
  812. * @vma: struct vm_area_struct passed to mmap
  813. *
  814. * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
  815. * legacy IO space (first meg of bus space) into application virtual
  816. * memory space. Returns -ENOSYS if the operation isn't supported
  817. */
  818. static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
  819. struct bin_attribute *attr,
  820. struct vm_area_struct *vma)
  821. {
  822. struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
  823. return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
  824. }
  825. /**
  826. * pci_adjust_legacy_attr - adjustment of legacy file attributes
  827. * @b: bus to create files under
  828. * @mmap_type: I/O port or memory
  829. *
  830. * Stub implementation. Can be overridden by arch if necessary.
  831. */
  832. void __weak pci_adjust_legacy_attr(struct pci_bus *b,
  833. enum pci_mmap_state mmap_type)
  834. {
  835. }
  836. /**
  837. * pci_create_legacy_files - create legacy I/O port and memory files
  838. * @b: bus to create files under
  839. *
  840. * Some platforms allow access to legacy I/O port and ISA memory space on
  841. * a per-bus basis. This routine creates the files and ties them into
  842. * their associated read, write and mmap files from pci-sysfs.c
  843. *
  844. * On error unwind, but don't propagate the error to the caller
  845. * as it is ok to set up the PCI bus without these files.
  846. */
  847. void pci_create_legacy_files(struct pci_bus *b)
  848. {
  849. int error;
  850. if (!sysfs_initialized)
  851. return;
  852. b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
  853. GFP_ATOMIC);
  854. if (!b->legacy_io)
  855. goto kzalloc_err;
  856. sysfs_bin_attr_init(b->legacy_io);
  857. b->legacy_io->attr.name = "legacy_io";
  858. b->legacy_io->size = 0xffff;
  859. b->legacy_io->attr.mode = 0600;
  860. b->legacy_io->read = pci_read_legacy_io;
  861. b->legacy_io->write = pci_write_legacy_io;
  862. /* See pci_create_attr() for motivation */
  863. b->legacy_io->llseek = pci_llseek_resource;
  864. b->legacy_io->mmap = pci_mmap_legacy_io;
  865. b->legacy_io->f_mapping = iomem_get_mapping;
  866. pci_adjust_legacy_attr(b, pci_mmap_io);
  867. error = device_create_bin_file(&b->dev, b->legacy_io);
  868. if (error)
  869. goto legacy_io_err;
  870. /* Allocated above after the legacy_io struct */
  871. b->legacy_mem = b->legacy_io + 1;
  872. sysfs_bin_attr_init(b->legacy_mem);
  873. b->legacy_mem->attr.name = "legacy_mem";
  874. b->legacy_mem->size = 1024*1024;
  875. b->legacy_mem->attr.mode = 0600;
  876. b->legacy_mem->mmap = pci_mmap_legacy_mem;
  877. /* See pci_create_attr() for motivation */
  878. b->legacy_mem->llseek = pci_llseek_resource;
  879. b->legacy_mem->f_mapping = iomem_get_mapping;
  880. pci_adjust_legacy_attr(b, pci_mmap_mem);
  881. error = device_create_bin_file(&b->dev, b->legacy_mem);
  882. if (error)
  883. goto legacy_mem_err;
  884. return;
  885. legacy_mem_err:
  886. device_remove_bin_file(&b->dev, b->legacy_io);
  887. legacy_io_err:
  888. kfree(b->legacy_io);
  889. b->legacy_io = NULL;
  890. kzalloc_err:
  891. dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
  892. }
  893. void pci_remove_legacy_files(struct pci_bus *b)
  894. {
  895. if (b->legacy_io) {
  896. device_remove_bin_file(&b->dev, b->legacy_io);
  897. device_remove_bin_file(&b->dev, b->legacy_mem);
  898. kfree(b->legacy_io); /* both are allocated here */
  899. }
  900. }
  901. #endif /* HAVE_PCI_LEGACY */
  902. #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
  903. /**
  904. * pci_mmap_resource - map a PCI resource into user memory space
  905. * @kobj: kobject for mapping
  906. * @attr: struct bin_attribute for the file being mapped
  907. * @vma: struct vm_area_struct passed into the mmap
  908. * @write_combine: 1 for write_combine mapping
  909. *
  910. * Use the regular PCI mapping routines to map a PCI resource into userspace.
  911. */
  912. static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
  913. struct vm_area_struct *vma, int write_combine)
  914. {
  915. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  916. int bar = (unsigned long)attr->private;
  917. enum pci_mmap_state mmap_type;
  918. struct resource *res = &pdev->resource[bar];
  919. int ret;
  920. ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
  921. if (ret)
  922. return ret;
  923. if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
  924. return -EINVAL;
  925. if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
  926. return -EINVAL;
  927. mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
  928. return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
  929. }
  930. static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
  931. struct bin_attribute *attr,
  932. struct vm_area_struct *vma)
  933. {
  934. return pci_mmap_resource(kobj, attr, vma, 0);
  935. }
  936. static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
  937. struct bin_attribute *attr,
  938. struct vm_area_struct *vma)
  939. {
  940. return pci_mmap_resource(kobj, attr, vma, 1);
  941. }
  942. static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
  943. struct bin_attribute *attr, char *buf,
  944. loff_t off, size_t count, bool write)
  945. {
  946. #ifdef CONFIG_HAS_IOPORT
  947. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  948. int bar = (unsigned long)attr->private;
  949. unsigned long port = off;
  950. port += pci_resource_start(pdev, bar);
  951. if (port > pci_resource_end(pdev, bar))
  952. return 0;
  953. if (port + count - 1 > pci_resource_end(pdev, bar))
  954. return -EINVAL;
  955. switch (count) {
  956. case 1:
  957. if (write)
  958. outb(*(u8 *)buf, port);
  959. else
  960. *(u8 *)buf = inb(port);
  961. return 1;
  962. case 2:
  963. if (write)
  964. outw(*(u16 *)buf, port);
  965. else
  966. *(u16 *)buf = inw(port);
  967. return 2;
  968. case 4:
  969. if (write)
  970. outl(*(u32 *)buf, port);
  971. else
  972. *(u32 *)buf = inl(port);
  973. return 4;
  974. }
  975. return -EINVAL;
  976. #else
  977. return -ENXIO;
  978. #endif
  979. }
  980. static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
  981. struct bin_attribute *attr, char *buf,
  982. loff_t off, size_t count)
  983. {
  984. return pci_resource_io(filp, kobj, attr, buf, off, count, false);
  985. }
  986. static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
  987. struct bin_attribute *attr, char *buf,
  988. loff_t off, size_t count)
  989. {
  990. int ret;
  991. ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
  992. if (ret)
  993. return ret;
  994. return pci_resource_io(filp, kobj, attr, buf, off, count, true);
  995. }
  996. /**
  997. * pci_remove_resource_files - cleanup resource files
  998. * @pdev: dev to cleanup
  999. *
  1000. * If we created resource files for @pdev, remove them from sysfs and
  1001. * free their resources.
  1002. */
  1003. static void pci_remove_resource_files(struct pci_dev *pdev)
  1004. {
  1005. int i;
  1006. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  1007. struct bin_attribute *res_attr;
  1008. res_attr = pdev->res_attr[i];
  1009. if (res_attr) {
  1010. sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
  1011. kfree(res_attr);
  1012. }
  1013. res_attr = pdev->res_attr_wc[i];
  1014. if (res_attr) {
  1015. sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
  1016. kfree(res_attr);
  1017. }
  1018. }
  1019. }
  1020. static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
  1021. {
  1022. /* allocate attribute structure, piggyback attribute name */
  1023. int name_len = write_combine ? 13 : 10;
  1024. struct bin_attribute *res_attr;
  1025. char *res_attr_name;
  1026. int retval;
  1027. res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
  1028. if (!res_attr)
  1029. return -ENOMEM;
  1030. res_attr_name = (char *)(res_attr + 1);
  1031. sysfs_bin_attr_init(res_attr);
  1032. if (write_combine) {
  1033. sprintf(res_attr_name, "resource%d_wc", num);
  1034. res_attr->mmap = pci_mmap_resource_wc;
  1035. } else {
  1036. sprintf(res_attr_name, "resource%d", num);
  1037. if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
  1038. res_attr->read = pci_read_resource_io;
  1039. res_attr->write = pci_write_resource_io;
  1040. if (arch_can_pci_mmap_io())
  1041. res_attr->mmap = pci_mmap_resource_uc;
  1042. } else {
  1043. res_attr->mmap = pci_mmap_resource_uc;
  1044. }
  1045. }
  1046. if (res_attr->mmap) {
  1047. res_attr->f_mapping = iomem_get_mapping;
  1048. /*
  1049. * generic_file_llseek() consults f_mapping->host to determine
  1050. * the file size. As iomem_inode knows nothing about the
  1051. * attribute, it's not going to work, so override it as well.
  1052. */
  1053. res_attr->llseek = pci_llseek_resource;
  1054. }
  1055. res_attr->attr.name = res_attr_name;
  1056. res_attr->attr.mode = 0600;
  1057. res_attr->size = pci_resource_len(pdev, num);
  1058. res_attr->private = (void *)(unsigned long)num;
  1059. retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
  1060. if (retval) {
  1061. kfree(res_attr);
  1062. return retval;
  1063. }
  1064. if (write_combine)
  1065. pdev->res_attr_wc[num] = res_attr;
  1066. else
  1067. pdev->res_attr[num] = res_attr;
  1068. return 0;
  1069. }
  1070. /**
  1071. * pci_create_resource_files - create resource files in sysfs for @dev
  1072. * @pdev: dev in question
  1073. *
  1074. * Walk the resources in @pdev creating files for each resource available.
  1075. */
  1076. static int pci_create_resource_files(struct pci_dev *pdev)
  1077. {
  1078. int i;
  1079. int retval;
  1080. /* Expose the PCI resources from this device as files */
  1081. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  1082. /* skip empty resources */
  1083. if (!pci_resource_len(pdev, i))
  1084. continue;
  1085. retval = pci_create_attr(pdev, i, 0);
  1086. /* for prefetchable resources, create a WC mappable file */
  1087. if (!retval && arch_can_pci_mmap_wc() &&
  1088. pdev->resource[i].flags & IORESOURCE_PREFETCH)
  1089. retval = pci_create_attr(pdev, i, 1);
  1090. if (retval) {
  1091. pci_remove_resource_files(pdev);
  1092. return retval;
  1093. }
  1094. }
  1095. return 0;
  1096. }
  1097. #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
  1098. int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
  1099. void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
  1100. #endif
  1101. /**
  1102. * pci_write_rom - used to enable access to the PCI ROM display
  1103. * @filp: sysfs file
  1104. * @kobj: kernel object handle
  1105. * @bin_attr: struct bin_attribute for this file
  1106. * @buf: user input
  1107. * @off: file offset
  1108. * @count: number of byte in input
  1109. *
  1110. * writing anything except 0 enables it
  1111. */
  1112. static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
  1113. struct bin_attribute *bin_attr, char *buf,
  1114. loff_t off, size_t count)
  1115. {
  1116. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  1117. if ((off == 0) && (*buf == '0') && (count == 2))
  1118. pdev->rom_attr_enabled = 0;
  1119. else
  1120. pdev->rom_attr_enabled = 1;
  1121. return count;
  1122. }
  1123. /**
  1124. * pci_read_rom - read a PCI ROM
  1125. * @filp: sysfs file
  1126. * @kobj: kernel object handle
  1127. * @bin_attr: struct bin_attribute for this file
  1128. * @buf: where to put the data we read from the ROM
  1129. * @off: file offset
  1130. * @count: number of bytes to read
  1131. *
  1132. * Put @count bytes starting at @off into @buf from the ROM in the PCI
  1133. * device corresponding to @kobj.
  1134. */
  1135. static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
  1136. struct bin_attribute *bin_attr, char *buf,
  1137. loff_t off, size_t count)
  1138. {
  1139. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  1140. void __iomem *rom;
  1141. size_t size;
  1142. if (!pdev->rom_attr_enabled)
  1143. return -EINVAL;
  1144. rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
  1145. if (!rom || !size)
  1146. return -EIO;
  1147. if (off >= size)
  1148. count = 0;
  1149. else {
  1150. if (off + count > size)
  1151. count = size - off;
  1152. memcpy_fromio(buf, rom + off, count);
  1153. }
  1154. pci_unmap_rom(pdev, rom);
  1155. return count;
  1156. }
  1157. static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
  1158. static struct bin_attribute *pci_dev_rom_attrs[] = {
  1159. &bin_attr_rom,
  1160. NULL,
  1161. };
  1162. static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
  1163. struct bin_attribute *a, int n)
  1164. {
  1165. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  1166. size_t rom_size;
  1167. /* If the device has a ROM, try to expose it in sysfs. */
  1168. rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
  1169. if (!rom_size)
  1170. return 0;
  1171. a->size = rom_size;
  1172. return a->attr.mode;
  1173. }
  1174. static const struct attribute_group pci_dev_rom_attr_group = {
  1175. .bin_attrs = pci_dev_rom_attrs,
  1176. .is_bin_visible = pci_dev_rom_attr_is_visible,
  1177. };
  1178. static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
  1179. const char *buf, size_t count)
  1180. {
  1181. struct pci_dev *pdev = to_pci_dev(dev);
  1182. unsigned long val;
  1183. ssize_t result;
  1184. if (kstrtoul(buf, 0, &val) < 0)
  1185. return -EINVAL;
  1186. if (val != 1)
  1187. return -EINVAL;
  1188. pm_runtime_get_sync(dev);
  1189. result = pci_reset_function(pdev);
  1190. pm_runtime_put(dev);
  1191. if (result < 0)
  1192. return result;
  1193. return count;
  1194. }
  1195. static DEVICE_ATTR_WO(reset);
  1196. static struct attribute *pci_dev_reset_attrs[] = {
  1197. &dev_attr_reset.attr,
  1198. NULL,
  1199. };
  1200. static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
  1201. struct attribute *a, int n)
  1202. {
  1203. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  1204. if (!pci_reset_supported(pdev))
  1205. return 0;
  1206. return a->mode;
  1207. }
  1208. static const struct attribute_group pci_dev_reset_attr_group = {
  1209. .attrs = pci_dev_reset_attrs,
  1210. .is_visible = pci_dev_reset_attr_is_visible,
  1211. };
  1212. static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
  1213. {
  1214. struct pci_dev *pdev = to_pci_dev(dev);
  1215. ssize_t ret;
  1216. pci_config_pm_runtime_get(pdev);
  1217. ret = sysfs_emit(buf, "%016llx\n",
  1218. (u64)pci_rebar_get_possible_sizes(pdev, n));
  1219. pci_config_pm_runtime_put(pdev);
  1220. return ret;
  1221. }
  1222. static ssize_t __resource_resize_store(struct device *dev, int n,
  1223. const char *buf, size_t count)
  1224. {
  1225. struct pci_dev *pdev = to_pci_dev(dev);
  1226. unsigned long size, flags;
  1227. int ret, i;
  1228. u16 cmd;
  1229. if (kstrtoul(buf, 0, &size) < 0)
  1230. return -EINVAL;
  1231. device_lock(dev);
  1232. if (dev->driver || pci_num_vf(pdev)) {
  1233. ret = -EBUSY;
  1234. goto unlock;
  1235. }
  1236. pci_config_pm_runtime_get(pdev);
  1237. if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
  1238. ret = aperture_remove_conflicting_pci_devices(pdev,
  1239. "resourceN_resize");
  1240. if (ret)
  1241. goto pm_put;
  1242. }
  1243. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  1244. pci_write_config_word(pdev, PCI_COMMAND,
  1245. cmd & ~PCI_COMMAND_MEMORY);
  1246. flags = pci_resource_flags(pdev, n);
  1247. pci_remove_resource_files(pdev);
  1248. for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
  1249. if (pci_resource_len(pdev, i) &&
  1250. pci_resource_flags(pdev, i) == flags)
  1251. pci_release_resource(pdev, i);
  1252. }
  1253. ret = pci_resize_resource(pdev, n, size);
  1254. pci_assign_unassigned_bus_resources(pdev->bus);
  1255. if (pci_create_resource_files(pdev))
  1256. pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
  1257. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  1258. pm_put:
  1259. pci_config_pm_runtime_put(pdev);
  1260. unlock:
  1261. device_unlock(dev);
  1262. return ret ? ret : count;
  1263. }
  1264. #define pci_dev_resource_resize_attr(n) \
  1265. static ssize_t resource##n##_resize_show(struct device *dev, \
  1266. struct device_attribute *attr, \
  1267. char *buf) \
  1268. { \
  1269. return __resource_resize_show(dev, n, buf); \
  1270. } \
  1271. static ssize_t resource##n##_resize_store(struct device *dev, \
  1272. struct device_attribute *attr,\
  1273. const char *buf, size_t count)\
  1274. { \
  1275. return __resource_resize_store(dev, n, buf, count); \
  1276. } \
  1277. static DEVICE_ATTR_RW(resource##n##_resize)
  1278. pci_dev_resource_resize_attr(0);
  1279. pci_dev_resource_resize_attr(1);
  1280. pci_dev_resource_resize_attr(2);
  1281. pci_dev_resource_resize_attr(3);
  1282. pci_dev_resource_resize_attr(4);
  1283. pci_dev_resource_resize_attr(5);
  1284. static struct attribute *resource_resize_attrs[] = {
  1285. &dev_attr_resource0_resize.attr,
  1286. &dev_attr_resource1_resize.attr,
  1287. &dev_attr_resource2_resize.attr,
  1288. &dev_attr_resource3_resize.attr,
  1289. &dev_attr_resource4_resize.attr,
  1290. &dev_attr_resource5_resize.attr,
  1291. NULL,
  1292. };
  1293. static umode_t resource_resize_is_visible(struct kobject *kobj,
  1294. struct attribute *a, int n)
  1295. {
  1296. struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
  1297. return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
  1298. }
  1299. static const struct attribute_group pci_dev_resource_resize_group = {
  1300. .attrs = resource_resize_attrs,
  1301. .is_visible = resource_resize_is_visible,
  1302. };
  1303. int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
  1304. {
  1305. if (!sysfs_initialized)
  1306. return -EACCES;
  1307. return pci_create_resource_files(pdev);
  1308. }
  1309. /**
  1310. * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
  1311. * @pdev: device whose entries we should free
  1312. *
  1313. * Cleanup when @pdev is removed from sysfs.
  1314. */
  1315. void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
  1316. {
  1317. if (!sysfs_initialized)
  1318. return;
  1319. pci_remove_resource_files(pdev);
  1320. }
  1321. static int __init pci_sysfs_init(void)
  1322. {
  1323. struct pci_dev *pdev = NULL;
  1324. struct pci_bus *pbus = NULL;
  1325. int retval;
  1326. sysfs_initialized = 1;
  1327. for_each_pci_dev(pdev) {
  1328. retval = pci_create_sysfs_dev_files(pdev);
  1329. if (retval) {
  1330. pci_dev_put(pdev);
  1331. return retval;
  1332. }
  1333. }
  1334. while ((pbus = pci_find_next_bus(pbus)))
  1335. pci_create_legacy_files(pbus);
  1336. return 0;
  1337. }
  1338. late_initcall(pci_sysfs_init);
  1339. static struct attribute *pci_dev_dev_attrs[] = {
  1340. &dev_attr_boot_vga.attr,
  1341. NULL,
  1342. };
  1343. static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
  1344. struct attribute *a, int n)
  1345. {
  1346. struct device *dev = kobj_to_dev(kobj);
  1347. struct pci_dev *pdev = to_pci_dev(dev);
  1348. if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
  1349. return a->mode;
  1350. return 0;
  1351. }
  1352. static struct attribute *pci_dev_hp_attrs[] = {
  1353. &dev_attr_remove.attr,
  1354. &dev_attr_dev_rescan.attr,
  1355. NULL,
  1356. };
  1357. static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
  1358. struct attribute *a, int n)
  1359. {
  1360. struct device *dev = kobj_to_dev(kobj);
  1361. struct pci_dev *pdev = to_pci_dev(dev);
  1362. if (pdev->is_virtfn)
  1363. return 0;
  1364. return a->mode;
  1365. }
  1366. static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
  1367. struct attribute *a, int n)
  1368. {
  1369. struct device *dev = kobj_to_dev(kobj);
  1370. struct pci_dev *pdev = to_pci_dev(dev);
  1371. if (pci_is_bridge(pdev))
  1372. return a->mode;
  1373. return 0;
  1374. }
  1375. static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
  1376. struct attribute *a, int n)
  1377. {
  1378. struct device *dev = kobj_to_dev(kobj);
  1379. struct pci_dev *pdev = to_pci_dev(dev);
  1380. if (pci_is_pcie(pdev))
  1381. return a->mode;
  1382. return 0;
  1383. }
  1384. static const struct attribute_group pci_dev_group = {
  1385. .attrs = pci_dev_attrs,
  1386. };
  1387. const struct attribute_group *pci_dev_groups[] = {
  1388. &pci_dev_group,
  1389. &pci_dev_config_attr_group,
  1390. &pci_dev_rom_attr_group,
  1391. &pci_dev_reset_attr_group,
  1392. &pci_dev_reset_method_attr_group,
  1393. &pci_dev_vpd_attr_group,
  1394. #ifdef CONFIG_DMI
  1395. &pci_dev_smbios_attr_group,
  1396. #endif
  1397. #ifdef CONFIG_ACPI
  1398. &pci_dev_acpi_attr_group,
  1399. #endif
  1400. &pci_dev_resource_resize_group,
  1401. ARCH_PCI_DEV_GROUPS
  1402. NULL,
  1403. };
  1404. static const struct attribute_group pci_dev_hp_attr_group = {
  1405. .attrs = pci_dev_hp_attrs,
  1406. .is_visible = pci_dev_hp_attrs_are_visible,
  1407. };
  1408. static const struct attribute_group pci_dev_attr_group = {
  1409. .attrs = pci_dev_dev_attrs,
  1410. .is_visible = pci_dev_attrs_are_visible,
  1411. };
  1412. static const struct attribute_group pci_bridge_attr_group = {
  1413. .attrs = pci_bridge_attrs,
  1414. .is_visible = pci_bridge_attrs_are_visible,
  1415. };
  1416. static const struct attribute_group pcie_dev_attr_group = {
  1417. .attrs = pcie_dev_attrs,
  1418. .is_visible = pcie_dev_attrs_are_visible,
  1419. };
  1420. const struct attribute_group *pci_dev_attr_groups[] = {
  1421. &pci_dev_attr_group,
  1422. &pci_dev_hp_attr_group,
  1423. #ifdef CONFIG_PCI_IOV
  1424. &sriov_pf_dev_attr_group,
  1425. &sriov_vf_dev_attr_group,
  1426. #endif
  1427. &pci_bridge_attr_group,
  1428. &pcie_dev_attr_group,
  1429. #ifdef CONFIG_PCIEAER
  1430. &aer_stats_attr_group,
  1431. #endif
  1432. #ifdef CONFIG_PCIEASPM
  1433. &aspm_ctrl_attr_group,
  1434. #endif
  1435. NULL,
  1436. };