dfl-fme-main.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for FPGA Management Engine (FME)
  4. *
  5. * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6. *
  7. * Authors:
  8. * Kang Luwei <luwei.kang@intel.com>
  9. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  10. * Joseph Grecco <joe.grecco@intel.com>
  11. * Enno Luebbers <enno.luebbers@intel.com>
  12. * Tim Whisonant <tim.whisonant@intel.com>
  13. * Ananda Ravuri <ananda.ravuri@intel.com>
  14. * Henry Mitchel <henry.mitchel@intel.com>
  15. */
  16. #include <linux/hwmon.h>
  17. #include <linux/hwmon-sysfs.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/units.h>
  22. #include <linux/fpga-dfl.h>
  23. #include "dfl.h"
  24. #include "dfl-fme.h"
  25. static ssize_t ports_num_show(struct device *dev,
  26. struct device_attribute *attr, char *buf)
  27. {
  28. void __iomem *base;
  29. u64 v;
  30. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
  31. v = readq(base + FME_HDR_CAP);
  32. return scnprintf(buf, PAGE_SIZE, "%u\n",
  33. (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
  34. }
  35. static DEVICE_ATTR_RO(ports_num);
  36. /*
  37. * Bitstream (static FPGA region) identifier number. It contains the
  38. * detailed version and other information of this static FPGA region.
  39. */
  40. static ssize_t bitstream_id_show(struct device *dev,
  41. struct device_attribute *attr, char *buf)
  42. {
  43. void __iomem *base;
  44. u64 v;
  45. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
  46. v = readq(base + FME_HDR_BITSTREAM_ID);
  47. return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
  48. }
  49. static DEVICE_ATTR_RO(bitstream_id);
  50. /*
  51. * Bitstream (static FPGA region) meta data. It contains the synthesis
  52. * date, seed and other information of this static FPGA region.
  53. */
  54. static ssize_t bitstream_metadata_show(struct device *dev,
  55. struct device_attribute *attr, char *buf)
  56. {
  57. void __iomem *base;
  58. u64 v;
  59. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
  60. v = readq(base + FME_HDR_BITSTREAM_MD);
  61. return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
  62. }
  63. static DEVICE_ATTR_RO(bitstream_metadata);
  64. static ssize_t cache_size_show(struct device *dev,
  65. struct device_attribute *attr, char *buf)
  66. {
  67. void __iomem *base;
  68. u64 v;
  69. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
  70. v = readq(base + FME_HDR_CAP);
  71. return sprintf(buf, "%u\n",
  72. (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
  73. }
  74. static DEVICE_ATTR_RO(cache_size);
  75. static ssize_t fabric_version_show(struct device *dev,
  76. struct device_attribute *attr, char *buf)
  77. {
  78. void __iomem *base;
  79. u64 v;
  80. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
  81. v = readq(base + FME_HDR_CAP);
  82. return sprintf(buf, "%u\n",
  83. (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
  84. }
  85. static DEVICE_ATTR_RO(fabric_version);
  86. static ssize_t socket_id_show(struct device *dev,
  87. struct device_attribute *attr, char *buf)
  88. {
  89. void __iomem *base;
  90. u64 v;
  91. base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
  92. v = readq(base + FME_HDR_CAP);
  93. return sprintf(buf, "%u\n",
  94. (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
  95. }
  96. static DEVICE_ATTR_RO(socket_id);
  97. static struct attribute *fme_hdr_attrs[] = {
  98. &dev_attr_ports_num.attr,
  99. &dev_attr_bitstream_id.attr,
  100. &dev_attr_bitstream_metadata.attr,
  101. &dev_attr_cache_size.attr,
  102. &dev_attr_fabric_version.attr,
  103. &dev_attr_socket_id.attr,
  104. NULL,
  105. };
  106. static const struct attribute_group fme_hdr_group = {
  107. .attrs = fme_hdr_attrs,
  108. };
  109. static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
  110. unsigned long arg)
  111. {
  112. struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
  113. int port_id;
  114. if (get_user(port_id, (int __user *)arg))
  115. return -EFAULT;
  116. return dfl_fpga_cdev_release_port(cdev, port_id);
  117. }
  118. static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
  119. unsigned long arg)
  120. {
  121. struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
  122. int port_id;
  123. if (get_user(port_id, (int __user *)arg))
  124. return -EFAULT;
  125. return dfl_fpga_cdev_assign_port(cdev, port_id);
  126. }
  127. static long fme_hdr_ioctl(struct platform_device *pdev,
  128. struct dfl_feature *feature,
  129. unsigned int cmd, unsigned long arg)
  130. {
  131. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  132. switch (cmd) {
  133. case DFL_FPGA_FME_PORT_RELEASE:
  134. return fme_hdr_ioctl_release_port(pdata, arg);
  135. case DFL_FPGA_FME_PORT_ASSIGN:
  136. return fme_hdr_ioctl_assign_port(pdata, arg);
  137. }
  138. return -ENODEV;
  139. }
  140. static const struct dfl_feature_id fme_hdr_id_table[] = {
  141. {.id = FME_FEATURE_ID_HEADER,},
  142. {0,}
  143. };
  144. static const struct dfl_feature_ops fme_hdr_ops = {
  145. .ioctl = fme_hdr_ioctl,
  146. };
  147. #define FME_THERM_THRESHOLD 0x8
  148. #define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
  149. #define TEMP_THRESHOLD1_EN BIT_ULL(7)
  150. #define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
  151. #define TEMP_THRESHOLD2_EN BIT_ULL(15)
  152. #define TRIP_THRESHOLD GENMASK_ULL(30, 24)
  153. #define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
  154. #define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
  155. /* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
  156. #define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
  157. #define FME_THERM_RDSENSOR_FMT1 0x10
  158. #define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
  159. #define FME_THERM_CAP 0x20
  160. #define THERM_NO_THROTTLE BIT_ULL(0)
  161. #define MD_PRE_DEG
  162. static bool fme_thermal_throttle_support(void __iomem *base)
  163. {
  164. u64 v = readq(base + FME_THERM_CAP);
  165. return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
  166. }
  167. static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
  168. enum hwmon_sensor_types type,
  169. u32 attr, int channel)
  170. {
  171. const struct dfl_feature *feature = drvdata;
  172. /* temperature is always supported, and check hardware cap for others */
  173. if (attr == hwmon_temp_input)
  174. return 0444;
  175. return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
  176. }
  177. static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
  178. u32 attr, int channel, long *val)
  179. {
  180. struct dfl_feature *feature = dev_get_drvdata(dev);
  181. u64 v;
  182. switch (attr) {
  183. case hwmon_temp_input:
  184. v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
  185. *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
  186. break;
  187. case hwmon_temp_max:
  188. v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
  189. *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
  190. break;
  191. case hwmon_temp_crit:
  192. v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
  193. *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
  194. break;
  195. case hwmon_temp_emergency:
  196. v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
  197. *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
  198. break;
  199. case hwmon_temp_max_alarm:
  200. v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
  201. *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
  202. break;
  203. case hwmon_temp_crit_alarm:
  204. v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
  205. *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
  206. break;
  207. default:
  208. return -EOPNOTSUPP;
  209. }
  210. return 0;
  211. }
  212. static const struct hwmon_ops thermal_hwmon_ops = {
  213. .is_visible = thermal_hwmon_attrs_visible,
  214. .read = thermal_hwmon_read,
  215. };
  216. static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
  217. HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
  218. HWMON_T_MAX | HWMON_T_MAX_ALARM |
  219. HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
  220. NULL
  221. };
  222. static const struct hwmon_chip_info thermal_hwmon_chip_info = {
  223. .ops = &thermal_hwmon_ops,
  224. .info = thermal_hwmon_info,
  225. };
  226. static ssize_t temp1_max_policy_show(struct device *dev,
  227. struct device_attribute *attr, char *buf)
  228. {
  229. struct dfl_feature *feature = dev_get_drvdata(dev);
  230. u64 v;
  231. v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
  232. return sprintf(buf, "%u\n",
  233. (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
  234. }
  235. static DEVICE_ATTR_RO(temp1_max_policy);
  236. static struct attribute *thermal_extra_attrs[] = {
  237. &dev_attr_temp1_max_policy.attr,
  238. NULL,
  239. };
  240. static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
  241. struct attribute *attr, int index)
  242. {
  243. struct device *dev = kobj_to_dev(kobj);
  244. struct dfl_feature *feature = dev_get_drvdata(dev);
  245. return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
  246. }
  247. static const struct attribute_group thermal_extra_group = {
  248. .attrs = thermal_extra_attrs,
  249. .is_visible = thermal_extra_attrs_visible,
  250. };
  251. __ATTRIBUTE_GROUPS(thermal_extra);
  252. static int fme_thermal_mgmt_init(struct platform_device *pdev,
  253. struct dfl_feature *feature)
  254. {
  255. struct device *hwmon;
  256. /*
  257. * create hwmon to allow userspace monitoring temperature and other
  258. * threshold information.
  259. *
  260. * temp1_input -> FPGA device temperature
  261. * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
  262. * temp1_crit -> hardware threshold 2 -> 100% throttling
  263. * temp1_emergency -> hardware trip_threshold to shutdown FPGA
  264. * temp1_max_alarm -> hardware threshold 1 alarm
  265. * temp1_crit_alarm -> hardware threshold 2 alarm
  266. *
  267. * create device specific sysfs interfaces, e.g. read temp1_max_policy
  268. * to understand the actual hardware throttling action (50% vs 90%).
  269. *
  270. * If hardware doesn't support automatic throttling per thresholds,
  271. * then all above sysfs interfaces are not visible except temp1_input
  272. * for temperature.
  273. */
  274. hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
  275. "dfl_fme_thermal", feature,
  276. &thermal_hwmon_chip_info,
  277. thermal_extra_groups);
  278. if (IS_ERR(hwmon)) {
  279. dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
  280. return PTR_ERR(hwmon);
  281. }
  282. return 0;
  283. }
  284. static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
  285. {.id = FME_FEATURE_ID_THERMAL_MGMT,},
  286. {0,}
  287. };
  288. static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
  289. .init = fme_thermal_mgmt_init,
  290. };
  291. #define FME_PWR_STATUS 0x8
  292. #define FME_LATENCY_TOLERANCE BIT_ULL(18)
  293. #define PWR_CONSUMED GENMASK_ULL(17, 0)
  294. #define FME_PWR_THRESHOLD 0x10
  295. #define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
  296. #define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
  297. #define PWR_THRESHOLD_MAX 0x7f /* in Watts */
  298. #define PWR_THRESHOLD1_STATUS BIT_ULL(16)
  299. #define PWR_THRESHOLD2_STATUS BIT_ULL(17)
  300. #define FME_PWR_XEON_LIMIT 0x18
  301. #define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
  302. #define XEON_PWR_EN BIT_ULL(15)
  303. #define FME_PWR_FPGA_LIMIT 0x20
  304. #define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
  305. #define FPGA_PWR_EN BIT_ULL(15)
  306. static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
  307. u32 attr, int channel, long *val)
  308. {
  309. struct dfl_feature *feature = dev_get_drvdata(dev);
  310. u64 v;
  311. switch (attr) {
  312. case hwmon_power_input:
  313. v = readq(feature->ioaddr + FME_PWR_STATUS);
  314. *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
  315. break;
  316. case hwmon_power_max:
  317. v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
  318. *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
  319. break;
  320. case hwmon_power_crit:
  321. v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
  322. *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
  323. break;
  324. case hwmon_power_max_alarm:
  325. v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
  326. *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
  327. break;
  328. case hwmon_power_crit_alarm:
  329. v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
  330. *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
  331. break;
  332. default:
  333. return -EOPNOTSUPP;
  334. }
  335. return 0;
  336. }
  337. static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
  338. u32 attr, int channel, long val)
  339. {
  340. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
  341. struct dfl_feature *feature = dev_get_drvdata(dev);
  342. int ret = 0;
  343. u64 v;
  344. val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
  345. mutex_lock(&pdata->lock);
  346. switch (attr) {
  347. case hwmon_power_max:
  348. v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
  349. v &= ~PWR_THRESHOLD1;
  350. v |= FIELD_PREP(PWR_THRESHOLD1, val);
  351. writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
  352. break;
  353. case hwmon_power_crit:
  354. v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
  355. v &= ~PWR_THRESHOLD2;
  356. v |= FIELD_PREP(PWR_THRESHOLD2, val);
  357. writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
  358. break;
  359. default:
  360. ret = -EOPNOTSUPP;
  361. break;
  362. }
  363. mutex_unlock(&pdata->lock);
  364. return ret;
  365. }
  366. static umode_t power_hwmon_attrs_visible(const void *drvdata,
  367. enum hwmon_sensor_types type,
  368. u32 attr, int channel)
  369. {
  370. switch (attr) {
  371. case hwmon_power_input:
  372. case hwmon_power_max_alarm:
  373. case hwmon_power_crit_alarm:
  374. return 0444;
  375. case hwmon_power_max:
  376. case hwmon_power_crit:
  377. return 0644;
  378. }
  379. return 0;
  380. }
  381. static const struct hwmon_ops power_hwmon_ops = {
  382. .is_visible = power_hwmon_attrs_visible,
  383. .read = power_hwmon_read,
  384. .write = power_hwmon_write,
  385. };
  386. static const struct hwmon_channel_info * const power_hwmon_info[] = {
  387. HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
  388. HWMON_P_MAX | HWMON_P_MAX_ALARM |
  389. HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
  390. NULL
  391. };
  392. static const struct hwmon_chip_info power_hwmon_chip_info = {
  393. .ops = &power_hwmon_ops,
  394. .info = power_hwmon_info,
  395. };
  396. static ssize_t power1_xeon_limit_show(struct device *dev,
  397. struct device_attribute *attr, char *buf)
  398. {
  399. struct dfl_feature *feature = dev_get_drvdata(dev);
  400. u16 xeon_limit = 0;
  401. u64 v;
  402. v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
  403. if (FIELD_GET(XEON_PWR_EN, v))
  404. xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
  405. return sprintf(buf, "%u\n", xeon_limit * 100000);
  406. }
  407. static ssize_t power1_fpga_limit_show(struct device *dev,
  408. struct device_attribute *attr, char *buf)
  409. {
  410. struct dfl_feature *feature = dev_get_drvdata(dev);
  411. u16 fpga_limit = 0;
  412. u64 v;
  413. v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
  414. if (FIELD_GET(FPGA_PWR_EN, v))
  415. fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
  416. return sprintf(buf, "%u\n", fpga_limit * 100000);
  417. }
  418. static ssize_t power1_ltr_show(struct device *dev,
  419. struct device_attribute *attr, char *buf)
  420. {
  421. struct dfl_feature *feature = dev_get_drvdata(dev);
  422. u64 v;
  423. v = readq(feature->ioaddr + FME_PWR_STATUS);
  424. return sprintf(buf, "%u\n",
  425. (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
  426. }
  427. static DEVICE_ATTR_RO(power1_xeon_limit);
  428. static DEVICE_ATTR_RO(power1_fpga_limit);
  429. static DEVICE_ATTR_RO(power1_ltr);
  430. static struct attribute *power_extra_attrs[] = {
  431. &dev_attr_power1_xeon_limit.attr,
  432. &dev_attr_power1_fpga_limit.attr,
  433. &dev_attr_power1_ltr.attr,
  434. NULL
  435. };
  436. ATTRIBUTE_GROUPS(power_extra);
  437. static int fme_power_mgmt_init(struct platform_device *pdev,
  438. struct dfl_feature *feature)
  439. {
  440. struct device *hwmon;
  441. hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
  442. "dfl_fme_power", feature,
  443. &power_hwmon_chip_info,
  444. power_extra_groups);
  445. if (IS_ERR(hwmon)) {
  446. dev_err(&pdev->dev, "Fail to register power hwmon\n");
  447. return PTR_ERR(hwmon);
  448. }
  449. return 0;
  450. }
  451. static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
  452. {.id = FME_FEATURE_ID_POWER_MGMT,},
  453. {0,}
  454. };
  455. static const struct dfl_feature_ops fme_power_mgmt_ops = {
  456. .init = fme_power_mgmt_init,
  457. };
  458. static struct dfl_feature_driver fme_feature_drvs[] = {
  459. {
  460. .id_table = fme_hdr_id_table,
  461. .ops = &fme_hdr_ops,
  462. },
  463. {
  464. .id_table = fme_pr_mgmt_id_table,
  465. .ops = &fme_pr_mgmt_ops,
  466. },
  467. {
  468. .id_table = fme_global_err_id_table,
  469. .ops = &fme_global_err_ops,
  470. },
  471. {
  472. .id_table = fme_thermal_mgmt_id_table,
  473. .ops = &fme_thermal_mgmt_ops,
  474. },
  475. {
  476. .id_table = fme_power_mgmt_id_table,
  477. .ops = &fme_power_mgmt_ops,
  478. },
  479. {
  480. .id_table = fme_perf_id_table,
  481. .ops = &fme_perf_ops,
  482. },
  483. {
  484. .ops = NULL,
  485. },
  486. };
  487. static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
  488. unsigned long arg)
  489. {
  490. /* No extension support for now */
  491. return 0;
  492. }
  493. static int fme_open(struct inode *inode, struct file *filp)
  494. {
  495. struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
  496. struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
  497. int ret;
  498. if (WARN_ON(!pdata))
  499. return -ENODEV;
  500. mutex_lock(&pdata->lock);
  501. ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
  502. if (!ret) {
  503. dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
  504. dfl_feature_dev_use_count(pdata));
  505. filp->private_data = pdata;
  506. }
  507. mutex_unlock(&pdata->lock);
  508. return ret;
  509. }
  510. static int fme_release(struct inode *inode, struct file *filp)
  511. {
  512. struct dfl_feature_platform_data *pdata = filp->private_data;
  513. struct platform_device *pdev = pdata->dev;
  514. struct dfl_feature *feature;
  515. dev_dbg(&pdev->dev, "Device File Release\n");
  516. mutex_lock(&pdata->lock);
  517. dfl_feature_dev_use_end(pdata);
  518. if (!dfl_feature_dev_use_count(pdata))
  519. dfl_fpga_dev_for_each_feature(pdata, feature)
  520. dfl_fpga_set_irq_triggers(feature, 0,
  521. feature->nr_irqs, NULL);
  522. mutex_unlock(&pdata->lock);
  523. return 0;
  524. }
  525. static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  526. {
  527. struct dfl_feature_platform_data *pdata = filp->private_data;
  528. struct platform_device *pdev = pdata->dev;
  529. struct dfl_feature *f;
  530. long ret;
  531. dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
  532. switch (cmd) {
  533. case DFL_FPGA_GET_API_VERSION:
  534. return DFL_FPGA_API_VERSION;
  535. case DFL_FPGA_CHECK_EXTENSION:
  536. return fme_ioctl_check_extension(pdata, arg);
  537. default:
  538. /*
  539. * Let sub-feature's ioctl function to handle the cmd.
  540. * Sub-feature's ioctl returns -ENODEV when cmd is not
  541. * handled in this sub feature, and returns 0 or other
  542. * error code if cmd is handled.
  543. */
  544. dfl_fpga_dev_for_each_feature(pdata, f) {
  545. if (f->ops && f->ops->ioctl) {
  546. ret = f->ops->ioctl(pdev, f, cmd, arg);
  547. if (ret != -ENODEV)
  548. return ret;
  549. }
  550. }
  551. }
  552. return -EINVAL;
  553. }
  554. static int fme_dev_init(struct platform_device *pdev)
  555. {
  556. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  557. struct dfl_fme *fme;
  558. fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
  559. if (!fme)
  560. return -ENOMEM;
  561. mutex_lock(&pdata->lock);
  562. dfl_fpga_pdata_set_private(pdata, fme);
  563. mutex_unlock(&pdata->lock);
  564. return 0;
  565. }
  566. static void fme_dev_destroy(struct platform_device *pdev)
  567. {
  568. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  569. mutex_lock(&pdata->lock);
  570. dfl_fpga_pdata_set_private(pdata, NULL);
  571. mutex_unlock(&pdata->lock);
  572. }
  573. static const struct file_operations fme_fops = {
  574. .owner = THIS_MODULE,
  575. .open = fme_open,
  576. .release = fme_release,
  577. .unlocked_ioctl = fme_ioctl,
  578. };
  579. static int fme_probe(struct platform_device *pdev)
  580. {
  581. int ret;
  582. ret = fme_dev_init(pdev);
  583. if (ret)
  584. goto exit;
  585. ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
  586. if (ret)
  587. goto dev_destroy;
  588. ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
  589. if (ret)
  590. goto feature_uinit;
  591. return 0;
  592. feature_uinit:
  593. dfl_fpga_dev_feature_uinit(pdev);
  594. dev_destroy:
  595. fme_dev_destroy(pdev);
  596. exit:
  597. return ret;
  598. }
  599. static void fme_remove(struct platform_device *pdev)
  600. {
  601. dfl_fpga_dev_ops_unregister(pdev);
  602. dfl_fpga_dev_feature_uinit(pdev);
  603. fme_dev_destroy(pdev);
  604. }
  605. static const struct attribute_group *fme_dev_groups[] = {
  606. &fme_hdr_group,
  607. &fme_global_err_group,
  608. NULL
  609. };
  610. static struct platform_driver fme_driver = {
  611. .driver = {
  612. .name = DFL_FPGA_FEATURE_DEV_FME,
  613. .dev_groups = fme_dev_groups,
  614. },
  615. .probe = fme_probe,
  616. .remove_new = fme_remove,
  617. };
  618. module_platform_driver(fme_driver);
  619. MODULE_DESCRIPTION("FPGA Management Engine driver");
  620. MODULE_AUTHOR("Intel Corporation");
  621. MODULE_LICENSE("GPL v2");
  622. MODULE_ALIAS("platform:dfl-fme");