kcs_bmc_cdev_ipmi.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2015-2018, Intel Corporation.
  4. */
  5. #define pr_fmt(fmt) "kcs-bmc: " fmt
  6. #include <linux/errno.h>
  7. #include <linux/io.h>
  8. #include <linux/ipmi_bmc.h>
  9. #include <linux/list.h>
  10. #include <linux/miscdevice.h>
  11. #include <linux/module.h>
  12. #include <linux/mutex.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/poll.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include "kcs_bmc_client.h"
  18. /* Different phases of the KCS BMC module.
  19. * KCS_PHASE_IDLE:
  20. * BMC should not be expecting nor sending any data.
  21. * KCS_PHASE_WRITE_START:
  22. * BMC is receiving a WRITE_START command from system software.
  23. * KCS_PHASE_WRITE_DATA:
  24. * BMC is receiving a data byte from system software.
  25. * KCS_PHASE_WRITE_END_CMD:
  26. * BMC is waiting a last data byte from system software.
  27. * KCS_PHASE_WRITE_DONE:
  28. * BMC has received the whole request from system software.
  29. * KCS_PHASE_WAIT_READ:
  30. * BMC is waiting the response from the upper IPMI service.
  31. * KCS_PHASE_READ:
  32. * BMC is transferring the response to system software.
  33. * KCS_PHASE_ABORT_ERROR1:
  34. * BMC is waiting error status request from system software.
  35. * KCS_PHASE_ABORT_ERROR2:
  36. * BMC is waiting for idle status afer error from system software.
  37. * KCS_PHASE_ERROR:
  38. * BMC has detected a protocol violation at the interface level.
  39. */
  40. enum kcs_ipmi_phases {
  41. KCS_PHASE_IDLE,
  42. KCS_PHASE_WRITE_START,
  43. KCS_PHASE_WRITE_DATA,
  44. KCS_PHASE_WRITE_END_CMD,
  45. KCS_PHASE_WRITE_DONE,
  46. KCS_PHASE_WAIT_READ,
  47. KCS_PHASE_READ,
  48. KCS_PHASE_ABORT_ERROR1,
  49. KCS_PHASE_ABORT_ERROR2,
  50. KCS_PHASE_ERROR
  51. };
  52. /* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
  53. enum kcs_ipmi_errors {
  54. KCS_NO_ERROR = 0x00,
  55. KCS_ABORTED_BY_COMMAND = 0x01,
  56. KCS_ILLEGAL_CONTROL_CODE = 0x02,
  57. KCS_LENGTH_ERROR = 0x06,
  58. KCS_UNSPECIFIED_ERROR = 0xFF
  59. };
  60. struct kcs_bmc_ipmi {
  61. struct list_head entry;
  62. struct kcs_bmc_client client;
  63. spinlock_t lock;
  64. enum kcs_ipmi_phases phase;
  65. enum kcs_ipmi_errors error;
  66. wait_queue_head_t queue;
  67. bool data_in_avail;
  68. int data_in_idx;
  69. u8 *data_in;
  70. int data_out_idx;
  71. int data_out_len;
  72. u8 *data_out;
  73. struct mutex mutex;
  74. u8 *kbuffer;
  75. struct miscdevice miscdev;
  76. };
  77. #define DEVICE_NAME "ipmi-kcs"
  78. #define KCS_MSG_BUFSIZ 1000
  79. #define KCS_ZERO_DATA 0
  80. /* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
  81. #define KCS_STATUS_STATE(state) (state << 6)
  82. #define KCS_STATUS_STATE_MASK GENMASK(7, 6)
  83. #define KCS_STATUS_CMD_DAT BIT(3)
  84. #define KCS_STATUS_SMS_ATN BIT(2)
  85. #define KCS_STATUS_IBF BIT(1)
  86. #define KCS_STATUS_OBF BIT(0)
  87. /* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
  88. enum kcs_states {
  89. IDLE_STATE = 0,
  90. READ_STATE = 1,
  91. WRITE_STATE = 2,
  92. ERROR_STATE = 3,
  93. };
  94. /* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
  95. #define KCS_CMD_GET_STATUS_ABORT 0x60
  96. #define KCS_CMD_WRITE_START 0x61
  97. #define KCS_CMD_WRITE_END 0x62
  98. #define KCS_CMD_READ_BYTE 0x68
  99. static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state)
  100. {
  101. kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state));
  102. }
  103. static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv)
  104. {
  105. set_state(priv, ERROR_STATE);
  106. kcs_bmc_read_data(priv->client.dev);
  107. kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
  108. priv->phase = KCS_PHASE_ERROR;
  109. priv->data_in_avail = false;
  110. priv->data_in_idx = 0;
  111. }
  112. static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv)
  113. {
  114. struct kcs_bmc_device *dev;
  115. u8 data;
  116. dev = priv->client.dev;
  117. switch (priv->phase) {
  118. case KCS_PHASE_WRITE_START:
  119. priv->phase = KCS_PHASE_WRITE_DATA;
  120. fallthrough;
  121. case KCS_PHASE_WRITE_DATA:
  122. if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
  123. set_state(priv, WRITE_STATE);
  124. kcs_bmc_write_data(dev, KCS_ZERO_DATA);
  125. priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
  126. } else {
  127. kcs_bmc_ipmi_force_abort(priv);
  128. priv->error = KCS_LENGTH_ERROR;
  129. }
  130. break;
  131. case KCS_PHASE_WRITE_END_CMD:
  132. if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
  133. set_state(priv, READ_STATE);
  134. priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
  135. priv->phase = KCS_PHASE_WRITE_DONE;
  136. priv->data_in_avail = true;
  137. wake_up_interruptible(&priv->queue);
  138. } else {
  139. kcs_bmc_ipmi_force_abort(priv);
  140. priv->error = KCS_LENGTH_ERROR;
  141. }
  142. break;
  143. case KCS_PHASE_READ:
  144. if (priv->data_out_idx == priv->data_out_len)
  145. set_state(priv, IDLE_STATE);
  146. data = kcs_bmc_read_data(dev);
  147. if (data != KCS_CMD_READ_BYTE) {
  148. set_state(priv, ERROR_STATE);
  149. kcs_bmc_write_data(dev, KCS_ZERO_DATA);
  150. break;
  151. }
  152. if (priv->data_out_idx == priv->data_out_len) {
  153. kcs_bmc_write_data(dev, KCS_ZERO_DATA);
  154. priv->phase = KCS_PHASE_IDLE;
  155. break;
  156. }
  157. kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]);
  158. break;
  159. case KCS_PHASE_ABORT_ERROR1:
  160. set_state(priv, READ_STATE);
  161. kcs_bmc_read_data(dev);
  162. kcs_bmc_write_data(dev, priv->error);
  163. priv->phase = KCS_PHASE_ABORT_ERROR2;
  164. break;
  165. case KCS_PHASE_ABORT_ERROR2:
  166. set_state(priv, IDLE_STATE);
  167. kcs_bmc_read_data(dev);
  168. kcs_bmc_write_data(dev, KCS_ZERO_DATA);
  169. priv->phase = KCS_PHASE_IDLE;
  170. break;
  171. default:
  172. kcs_bmc_ipmi_force_abort(priv);
  173. break;
  174. }
  175. }
  176. static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv)
  177. {
  178. u8 cmd;
  179. set_state(priv, WRITE_STATE);
  180. kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
  181. cmd = kcs_bmc_read_data(priv->client.dev);
  182. switch (cmd) {
  183. case KCS_CMD_WRITE_START:
  184. priv->phase = KCS_PHASE_WRITE_START;
  185. priv->error = KCS_NO_ERROR;
  186. priv->data_in_avail = false;
  187. priv->data_in_idx = 0;
  188. break;
  189. case KCS_CMD_WRITE_END:
  190. if (priv->phase != KCS_PHASE_WRITE_DATA) {
  191. kcs_bmc_ipmi_force_abort(priv);
  192. break;
  193. }
  194. priv->phase = KCS_PHASE_WRITE_END_CMD;
  195. break;
  196. case KCS_CMD_GET_STATUS_ABORT:
  197. if (priv->error == KCS_NO_ERROR)
  198. priv->error = KCS_ABORTED_BY_COMMAND;
  199. priv->phase = KCS_PHASE_ABORT_ERROR1;
  200. priv->data_in_avail = false;
  201. priv->data_in_idx = 0;
  202. break;
  203. default:
  204. kcs_bmc_ipmi_force_abort(priv);
  205. priv->error = KCS_ILLEGAL_CONTROL_CODE;
  206. break;
  207. }
  208. }
  209. static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client)
  210. {
  211. return container_of(client, struct kcs_bmc_ipmi, client);
  212. }
  213. static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client)
  214. {
  215. struct kcs_bmc_ipmi *priv;
  216. u8 status;
  217. int ret;
  218. priv = client_to_kcs_bmc_ipmi(client);
  219. if (!priv)
  220. return IRQ_NONE;
  221. spin_lock(&priv->lock);
  222. status = kcs_bmc_read_status(client->dev);
  223. if (status & KCS_STATUS_IBF) {
  224. if (status & KCS_STATUS_CMD_DAT)
  225. kcs_bmc_ipmi_handle_cmd(priv);
  226. else
  227. kcs_bmc_ipmi_handle_data(priv);
  228. ret = IRQ_HANDLED;
  229. } else {
  230. ret = IRQ_NONE;
  231. }
  232. spin_unlock(&priv->lock);
  233. return ret;
  234. }
  235. static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = {
  236. .event = kcs_bmc_ipmi_event,
  237. };
  238. static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp)
  239. {
  240. return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev);
  241. }
  242. static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp)
  243. {
  244. struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
  245. return kcs_bmc_enable_device(priv->client.dev, &priv->client);
  246. }
  247. static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait)
  248. {
  249. struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
  250. __poll_t mask = 0;
  251. poll_wait(filp, &priv->queue, wait);
  252. spin_lock_irq(&priv->lock);
  253. if (priv->data_in_avail)
  254. mask |= EPOLLIN;
  255. spin_unlock_irq(&priv->lock);
  256. return mask;
  257. }
  258. static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf,
  259. size_t count, loff_t *ppos)
  260. {
  261. struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
  262. bool data_avail;
  263. size_t data_len;
  264. ssize_t ret;
  265. if (!(filp->f_flags & O_NONBLOCK))
  266. wait_event_interruptible(priv->queue,
  267. priv->data_in_avail);
  268. mutex_lock(&priv->mutex);
  269. spin_lock_irq(&priv->lock);
  270. data_avail = priv->data_in_avail;
  271. if (data_avail) {
  272. data_len = priv->data_in_idx;
  273. memcpy(priv->kbuffer, priv->data_in, data_len);
  274. }
  275. spin_unlock_irq(&priv->lock);
  276. if (!data_avail) {
  277. ret = -EAGAIN;
  278. goto out_unlock;
  279. }
  280. if (count < data_len) {
  281. pr_err("channel=%u with too large data : %zu\n",
  282. priv->client.dev->channel, data_len);
  283. spin_lock_irq(&priv->lock);
  284. kcs_bmc_ipmi_force_abort(priv);
  285. spin_unlock_irq(&priv->lock);
  286. ret = -EOVERFLOW;
  287. goto out_unlock;
  288. }
  289. if (copy_to_user(buf, priv->kbuffer, data_len)) {
  290. ret = -EFAULT;
  291. goto out_unlock;
  292. }
  293. ret = data_len;
  294. spin_lock_irq(&priv->lock);
  295. if (priv->phase == KCS_PHASE_WRITE_DONE) {
  296. priv->phase = KCS_PHASE_WAIT_READ;
  297. priv->data_in_avail = false;
  298. priv->data_in_idx = 0;
  299. } else {
  300. ret = -EAGAIN;
  301. }
  302. spin_unlock_irq(&priv->lock);
  303. out_unlock:
  304. mutex_unlock(&priv->mutex);
  305. return ret;
  306. }
  307. static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf,
  308. size_t count, loff_t *ppos)
  309. {
  310. struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
  311. ssize_t ret;
  312. /* a minimum response size '3' : netfn + cmd + ccode */
  313. if (count < 3 || count > KCS_MSG_BUFSIZ)
  314. return -EINVAL;
  315. mutex_lock(&priv->mutex);
  316. if (copy_from_user(priv->kbuffer, buf, count)) {
  317. ret = -EFAULT;
  318. goto out_unlock;
  319. }
  320. spin_lock_irq(&priv->lock);
  321. if (priv->phase == KCS_PHASE_WAIT_READ) {
  322. priv->phase = KCS_PHASE_READ;
  323. priv->data_out_idx = 1;
  324. priv->data_out_len = count;
  325. memcpy(priv->data_out, priv->kbuffer, count);
  326. kcs_bmc_write_data(priv->client.dev, priv->data_out[0]);
  327. ret = count;
  328. } else {
  329. ret = -EINVAL;
  330. }
  331. spin_unlock_irq(&priv->lock);
  332. out_unlock:
  333. mutex_unlock(&priv->mutex);
  334. return ret;
  335. }
  336. static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd,
  337. unsigned long arg)
  338. {
  339. struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
  340. long ret = 0;
  341. spin_lock_irq(&priv->lock);
  342. switch (cmd) {
  343. case IPMI_BMC_IOCTL_SET_SMS_ATN:
  344. kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN);
  345. break;
  346. case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
  347. kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0);
  348. break;
  349. case IPMI_BMC_IOCTL_FORCE_ABORT:
  350. kcs_bmc_ipmi_force_abort(priv);
  351. break;
  352. default:
  353. ret = -EINVAL;
  354. break;
  355. }
  356. spin_unlock_irq(&priv->lock);
  357. return ret;
  358. }
  359. static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp)
  360. {
  361. struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
  362. kcs_bmc_ipmi_force_abort(priv);
  363. kcs_bmc_disable_device(priv->client.dev, &priv->client);
  364. return 0;
  365. }
  366. static const struct file_operations kcs_bmc_ipmi_fops = {
  367. .owner = THIS_MODULE,
  368. .open = kcs_bmc_ipmi_open,
  369. .read = kcs_bmc_ipmi_read,
  370. .write = kcs_bmc_ipmi_write,
  371. .release = kcs_bmc_ipmi_release,
  372. .poll = kcs_bmc_ipmi_poll,
  373. .unlocked_ioctl = kcs_bmc_ipmi_ioctl,
  374. };
  375. static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock);
  376. static LIST_HEAD(kcs_bmc_ipmi_instances);
  377. static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc)
  378. {
  379. struct kcs_bmc_ipmi *priv;
  380. int rc;
  381. priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
  382. if (!priv)
  383. return -ENOMEM;
  384. spin_lock_init(&priv->lock);
  385. mutex_init(&priv->mutex);
  386. init_waitqueue_head(&priv->queue);
  387. priv->client.dev = kcs_bmc;
  388. priv->client.ops = &kcs_bmc_ipmi_client_ops;
  389. priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
  390. priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
  391. priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
  392. priv->miscdev.minor = MISC_DYNAMIC_MINOR;
  393. priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
  394. kcs_bmc->channel);
  395. if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name)
  396. return -EINVAL;
  397. priv->miscdev.fops = &kcs_bmc_ipmi_fops;
  398. rc = misc_register(&priv->miscdev);
  399. if (rc) {
  400. dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc);
  401. return rc;
  402. }
  403. spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
  404. list_add(&priv->entry, &kcs_bmc_ipmi_instances);
  405. spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
  406. dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel);
  407. return 0;
  408. }
  409. static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc)
  410. {
  411. struct kcs_bmc_ipmi *priv = NULL, *pos;
  412. spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
  413. list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) {
  414. if (pos->client.dev == kcs_bmc) {
  415. priv = pos;
  416. list_del(&pos->entry);
  417. break;
  418. }
  419. }
  420. spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
  421. if (!priv)
  422. return -ENODEV;
  423. misc_deregister(&priv->miscdev);
  424. kcs_bmc_disable_device(priv->client.dev, &priv->client);
  425. devm_kfree(kcs_bmc->dev, priv->kbuffer);
  426. devm_kfree(kcs_bmc->dev, priv->data_out);
  427. devm_kfree(kcs_bmc->dev, priv->data_in);
  428. devm_kfree(kcs_bmc->dev, priv);
  429. return 0;
  430. }
  431. static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = {
  432. .add_device = kcs_bmc_ipmi_add_device,
  433. .remove_device = kcs_bmc_ipmi_remove_device,
  434. };
  435. static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
  436. .ops = &kcs_bmc_ipmi_driver_ops,
  437. };
  438. static int __init kcs_bmc_ipmi_init(void)
  439. {
  440. kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
  441. return 0;
  442. }
  443. module_init(kcs_bmc_ipmi_init);
  444. static void __exit kcs_bmc_ipmi_exit(void)
  445. {
  446. kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
  447. }
  448. module_exit(kcs_bmc_ipmi_exit);
  449. MODULE_LICENSE("GPL v2");
  450. MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
  451. MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
  452. MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");