switchtec.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Microsemi Switchtec(tm) PCIe Management Driver
  4. * Copyright (c) 2017, Microsemi Corporation
  5. */
  6. #include <linux/switchtec.h>
  7. #include <linux/switchtec_ioctl.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/fs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/poll.h>
  13. #include <linux/wait.h>
  14. #include <linux/io-64-nonatomic-lo-hi.h>
  15. #include <linux/nospec.h>
  16. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  17. MODULE_VERSION("0.1");
  18. MODULE_LICENSE("GPL");
  19. MODULE_AUTHOR("Microsemi Corporation");
  20. static int max_devices = 16;
  21. module_param(max_devices, int, 0644);
  22. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  23. static dev_t switchtec_devt;
  24. static DEFINE_IDA(switchtec_minor_ida);
  25. struct class *switchtec_class;
  26. EXPORT_SYMBOL_GPL(switchtec_class);
  27. enum mrpc_state {
  28. MRPC_IDLE = 0,
  29. MRPC_QUEUED,
  30. MRPC_RUNNING,
  31. MRPC_DONE,
  32. };
  33. struct switchtec_user {
  34. struct switchtec_dev *stdev;
  35. enum mrpc_state state;
  36. struct completion comp;
  37. struct kref kref;
  38. struct list_head list;
  39. u32 cmd;
  40. u32 status;
  41. u32 return_code;
  42. size_t data_len;
  43. size_t read_len;
  44. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  45. int event_cnt;
  46. };
  47. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  48. {
  49. struct switchtec_user *stuser;
  50. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  51. if (!stuser)
  52. return ERR_PTR(-ENOMEM);
  53. get_device(&stdev->dev);
  54. stuser->stdev = stdev;
  55. kref_init(&stuser->kref);
  56. INIT_LIST_HEAD(&stuser->list);
  57. init_completion(&stuser->comp);
  58. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  59. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  60. return stuser;
  61. }
  62. static void stuser_free(struct kref *kref)
  63. {
  64. struct switchtec_user *stuser;
  65. stuser = container_of(kref, struct switchtec_user, kref);
  66. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  67. put_device(&stuser->stdev->dev);
  68. kfree(stuser);
  69. }
  70. static void stuser_put(struct switchtec_user *stuser)
  71. {
  72. kref_put(&stuser->kref, stuser_free);
  73. }
  74. static void stuser_set_state(struct switchtec_user *stuser,
  75. enum mrpc_state state)
  76. {
  77. /* requires the mrpc_mutex to already be held when called */
  78. const char * const state_names[] = {
  79. [MRPC_IDLE] = "IDLE",
  80. [MRPC_QUEUED] = "QUEUED",
  81. [MRPC_RUNNING] = "RUNNING",
  82. [MRPC_DONE] = "DONE",
  83. };
  84. stuser->state = state;
  85. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  86. stuser, state_names[state]);
  87. }
  88. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  89. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  90. {
  91. /* requires the mrpc_mutex to already be held when called */
  92. struct switchtec_user *stuser;
  93. if (stdev->mrpc_busy)
  94. return;
  95. if (list_empty(&stdev->mrpc_queue))
  96. return;
  97. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  98. list);
  99. stuser_set_state(stuser, MRPC_RUNNING);
  100. stdev->mrpc_busy = 1;
  101. memcpy_toio(&stdev->mmio_mrpc->input_data,
  102. stuser->data, stuser->data_len);
  103. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  104. schedule_delayed_work(&stdev->mrpc_timeout,
  105. msecs_to_jiffies(500));
  106. }
  107. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  108. {
  109. /* requires the mrpc_mutex to already be held when called */
  110. struct switchtec_dev *stdev = stuser->stdev;
  111. kref_get(&stuser->kref);
  112. stuser->read_len = sizeof(stuser->data);
  113. stuser_set_state(stuser, MRPC_QUEUED);
  114. reinit_completion(&stuser->comp);
  115. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  116. mrpc_cmd_submit(stdev);
  117. return 0;
  118. }
  119. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  120. {
  121. /* requires the mrpc_mutex to already be held when called */
  122. struct switchtec_user *stuser;
  123. if (list_empty(&stdev->mrpc_queue))
  124. return;
  125. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  126. list);
  127. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  128. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  129. return;
  130. stuser_set_state(stuser, MRPC_DONE);
  131. stuser->return_code = 0;
  132. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
  133. goto out;
  134. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  135. if (stuser->return_code != 0)
  136. goto out;
  137. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  138. stuser->read_len);
  139. out:
  140. complete_all(&stuser->comp);
  141. list_del_init(&stuser->list);
  142. stuser_put(stuser);
  143. stdev->mrpc_busy = 0;
  144. mrpc_cmd_submit(stdev);
  145. }
  146. static void mrpc_event_work(struct work_struct *work)
  147. {
  148. struct switchtec_dev *stdev;
  149. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  150. dev_dbg(&stdev->dev, "%s\n", __func__);
  151. mutex_lock(&stdev->mrpc_mutex);
  152. cancel_delayed_work(&stdev->mrpc_timeout);
  153. mrpc_complete_cmd(stdev);
  154. mutex_unlock(&stdev->mrpc_mutex);
  155. }
  156. static void mrpc_timeout_work(struct work_struct *work)
  157. {
  158. struct switchtec_dev *stdev;
  159. u32 status;
  160. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  161. dev_dbg(&stdev->dev, "%s\n", __func__);
  162. mutex_lock(&stdev->mrpc_mutex);
  163. status = ioread32(&stdev->mmio_mrpc->status);
  164. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  165. schedule_delayed_work(&stdev->mrpc_timeout,
  166. msecs_to_jiffies(500));
  167. goto out;
  168. }
  169. mrpc_complete_cmd(stdev);
  170. out:
  171. mutex_unlock(&stdev->mrpc_mutex);
  172. }
  173. static ssize_t device_version_show(struct device *dev,
  174. struct device_attribute *attr, char *buf)
  175. {
  176. struct switchtec_dev *stdev = to_stdev(dev);
  177. u32 ver;
  178. ver = ioread32(&stdev->mmio_sys_info->device_version);
  179. return sprintf(buf, "%x\n", ver);
  180. }
  181. static DEVICE_ATTR_RO(device_version);
  182. static ssize_t fw_version_show(struct device *dev,
  183. struct device_attribute *attr, char *buf)
  184. {
  185. struct switchtec_dev *stdev = to_stdev(dev);
  186. u32 ver;
  187. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  188. return sprintf(buf, "%08x\n", ver);
  189. }
  190. static DEVICE_ATTR_RO(fw_version);
  191. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  192. {
  193. int i;
  194. memcpy_fromio(buf, attr, len);
  195. buf[len] = '\n';
  196. buf[len + 1] = 0;
  197. for (i = len - 1; i > 0; i--) {
  198. if (buf[i] != ' ')
  199. break;
  200. buf[i] = '\n';
  201. buf[i + 1] = 0;
  202. }
  203. return strlen(buf);
  204. }
  205. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  206. static ssize_t field ## _show(struct device *dev, \
  207. struct device_attribute *attr, char *buf) \
  208. { \
  209. struct switchtec_dev *stdev = to_stdev(dev); \
  210. return io_string_show(buf, &stdev->mmio_sys_info->field, \
  211. sizeof(stdev->mmio_sys_info->field)); \
  212. } \
  213. \
  214. static DEVICE_ATTR_RO(field)
  215. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  216. DEVICE_ATTR_SYS_INFO_STR(product_id);
  217. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  218. DEVICE_ATTR_SYS_INFO_STR(component_vendor);
  219. static ssize_t component_id_show(struct device *dev,
  220. struct device_attribute *attr, char *buf)
  221. {
  222. struct switchtec_dev *stdev = to_stdev(dev);
  223. int id = ioread16(&stdev->mmio_sys_info->component_id);
  224. return sprintf(buf, "PM%04X\n", id);
  225. }
  226. static DEVICE_ATTR_RO(component_id);
  227. static ssize_t component_revision_show(struct device *dev,
  228. struct device_attribute *attr, char *buf)
  229. {
  230. struct switchtec_dev *stdev = to_stdev(dev);
  231. int rev = ioread8(&stdev->mmio_sys_info->component_revision);
  232. return sprintf(buf, "%d\n", rev);
  233. }
  234. static DEVICE_ATTR_RO(component_revision);
  235. static ssize_t partition_show(struct device *dev,
  236. struct device_attribute *attr, char *buf)
  237. {
  238. struct switchtec_dev *stdev = to_stdev(dev);
  239. return sprintf(buf, "%d\n", stdev->partition);
  240. }
  241. static DEVICE_ATTR_RO(partition);
  242. static ssize_t partition_count_show(struct device *dev,
  243. struct device_attribute *attr, char *buf)
  244. {
  245. struct switchtec_dev *stdev = to_stdev(dev);
  246. return sprintf(buf, "%d\n", stdev->partition_count);
  247. }
  248. static DEVICE_ATTR_RO(partition_count);
  249. static struct attribute *switchtec_device_attrs[] = {
  250. &dev_attr_device_version.attr,
  251. &dev_attr_fw_version.attr,
  252. &dev_attr_vendor_id.attr,
  253. &dev_attr_product_id.attr,
  254. &dev_attr_product_revision.attr,
  255. &dev_attr_component_vendor.attr,
  256. &dev_attr_component_id.attr,
  257. &dev_attr_component_revision.attr,
  258. &dev_attr_partition.attr,
  259. &dev_attr_partition_count.attr,
  260. NULL,
  261. };
  262. ATTRIBUTE_GROUPS(switchtec_device);
  263. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  264. {
  265. struct switchtec_dev *stdev;
  266. struct switchtec_user *stuser;
  267. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  268. stuser = stuser_create(stdev);
  269. if (IS_ERR(stuser))
  270. return PTR_ERR(stuser);
  271. filp->private_data = stuser;
  272. nonseekable_open(inode, filp);
  273. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  274. return 0;
  275. }
  276. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  277. {
  278. struct switchtec_user *stuser = filp->private_data;
  279. stuser_put(stuser);
  280. return 0;
  281. }
  282. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  283. {
  284. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  285. return -EINTR;
  286. if (!stdev->alive) {
  287. mutex_unlock(&stdev->mrpc_mutex);
  288. return -ENODEV;
  289. }
  290. return 0;
  291. }
  292. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  293. size_t size, loff_t *off)
  294. {
  295. struct switchtec_user *stuser = filp->private_data;
  296. struct switchtec_dev *stdev = stuser->stdev;
  297. int rc;
  298. if (size < sizeof(stuser->cmd) ||
  299. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  300. return -EINVAL;
  301. stuser->data_len = size - sizeof(stuser->cmd);
  302. rc = lock_mutex_and_test_alive(stdev);
  303. if (rc)
  304. return rc;
  305. if (stuser->state != MRPC_IDLE) {
  306. rc = -EBADE;
  307. goto out;
  308. }
  309. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  310. if (rc) {
  311. rc = -EFAULT;
  312. goto out;
  313. }
  314. data += sizeof(stuser->cmd);
  315. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  316. if (rc) {
  317. rc = -EFAULT;
  318. goto out;
  319. }
  320. rc = mrpc_queue_cmd(stuser);
  321. out:
  322. mutex_unlock(&stdev->mrpc_mutex);
  323. if (rc)
  324. return rc;
  325. return size;
  326. }
  327. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  328. size_t size, loff_t *off)
  329. {
  330. struct switchtec_user *stuser = filp->private_data;
  331. struct switchtec_dev *stdev = stuser->stdev;
  332. int rc;
  333. if (size < sizeof(stuser->cmd) ||
  334. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  335. return -EINVAL;
  336. rc = lock_mutex_and_test_alive(stdev);
  337. if (rc)
  338. return rc;
  339. if (stuser->state == MRPC_IDLE) {
  340. mutex_unlock(&stdev->mrpc_mutex);
  341. return -EBADE;
  342. }
  343. stuser->read_len = size - sizeof(stuser->return_code);
  344. mutex_unlock(&stdev->mrpc_mutex);
  345. if (filp->f_flags & O_NONBLOCK) {
  346. if (!try_wait_for_completion(&stuser->comp))
  347. return -EAGAIN;
  348. } else {
  349. rc = wait_for_completion_interruptible(&stuser->comp);
  350. if (rc < 0)
  351. return rc;
  352. }
  353. rc = lock_mutex_and_test_alive(stdev);
  354. if (rc)
  355. return rc;
  356. if (stuser->state != MRPC_DONE) {
  357. mutex_unlock(&stdev->mrpc_mutex);
  358. return -EBADE;
  359. }
  360. rc = copy_to_user(data, &stuser->return_code,
  361. sizeof(stuser->return_code));
  362. if (rc) {
  363. rc = -EFAULT;
  364. goto out;
  365. }
  366. data += sizeof(stuser->return_code);
  367. rc = copy_to_user(data, &stuser->data,
  368. size - sizeof(stuser->return_code));
  369. if (rc) {
  370. rc = -EFAULT;
  371. goto out;
  372. }
  373. stuser_set_state(stuser, MRPC_IDLE);
  374. out:
  375. mutex_unlock(&stdev->mrpc_mutex);
  376. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
  377. return size;
  378. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  379. return -ENXIO;
  380. else
  381. return -EBADMSG;
  382. }
  383. static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
  384. {
  385. struct switchtec_user *stuser = filp->private_data;
  386. struct switchtec_dev *stdev = stuser->stdev;
  387. __poll_t ret = 0;
  388. poll_wait(filp, &stuser->comp.wait, wait);
  389. poll_wait(filp, &stdev->event_wq, wait);
  390. if (lock_mutex_and_test_alive(stdev))
  391. return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
  392. mutex_unlock(&stdev->mrpc_mutex);
  393. if (try_wait_for_completion(&stuser->comp))
  394. ret |= EPOLLIN | EPOLLRDNORM;
  395. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  396. ret |= EPOLLPRI | EPOLLRDBAND;
  397. return ret;
  398. }
  399. static int ioctl_flash_info(struct switchtec_dev *stdev,
  400. struct switchtec_ioctl_flash_info __user *uinfo)
  401. {
  402. struct switchtec_ioctl_flash_info info = {0};
  403. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  404. info.flash_length = ioread32(&fi->flash_length);
  405. info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
  406. if (copy_to_user(uinfo, &info, sizeof(info)))
  407. return -EFAULT;
  408. return 0;
  409. }
  410. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  411. struct partition_info __iomem *pi)
  412. {
  413. info->address = ioread32(&pi->address);
  414. info->length = ioread32(&pi->length);
  415. }
  416. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  417. struct switchtec_ioctl_flash_part_info __user *uinfo)
  418. {
  419. struct switchtec_ioctl_flash_part_info info = {0};
  420. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  421. struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
  422. u32 active_addr = -1;
  423. if (copy_from_user(&info, uinfo, sizeof(info)))
  424. return -EFAULT;
  425. switch (info.flash_partition) {
  426. case SWITCHTEC_IOCTL_PART_CFG0:
  427. active_addr = ioread32(&fi->active_cfg);
  428. set_fw_info_part(&info, &fi->cfg0);
  429. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
  430. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  431. break;
  432. case SWITCHTEC_IOCTL_PART_CFG1:
  433. active_addr = ioread32(&fi->active_cfg);
  434. set_fw_info_part(&info, &fi->cfg1);
  435. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
  436. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  437. break;
  438. case SWITCHTEC_IOCTL_PART_IMG0:
  439. active_addr = ioread32(&fi->active_img);
  440. set_fw_info_part(&info, &fi->img0);
  441. if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
  442. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  443. break;
  444. case SWITCHTEC_IOCTL_PART_IMG1:
  445. active_addr = ioread32(&fi->active_img);
  446. set_fw_info_part(&info, &fi->img1);
  447. if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
  448. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  449. break;
  450. case SWITCHTEC_IOCTL_PART_NVLOG:
  451. set_fw_info_part(&info, &fi->nvlog);
  452. break;
  453. case SWITCHTEC_IOCTL_PART_VENDOR0:
  454. set_fw_info_part(&info, &fi->vendor[0]);
  455. break;
  456. case SWITCHTEC_IOCTL_PART_VENDOR1:
  457. set_fw_info_part(&info, &fi->vendor[1]);
  458. break;
  459. case SWITCHTEC_IOCTL_PART_VENDOR2:
  460. set_fw_info_part(&info, &fi->vendor[2]);
  461. break;
  462. case SWITCHTEC_IOCTL_PART_VENDOR3:
  463. set_fw_info_part(&info, &fi->vendor[3]);
  464. break;
  465. case SWITCHTEC_IOCTL_PART_VENDOR4:
  466. set_fw_info_part(&info, &fi->vendor[4]);
  467. break;
  468. case SWITCHTEC_IOCTL_PART_VENDOR5:
  469. set_fw_info_part(&info, &fi->vendor[5]);
  470. break;
  471. case SWITCHTEC_IOCTL_PART_VENDOR6:
  472. set_fw_info_part(&info, &fi->vendor[6]);
  473. break;
  474. case SWITCHTEC_IOCTL_PART_VENDOR7:
  475. set_fw_info_part(&info, &fi->vendor[7]);
  476. break;
  477. default:
  478. return -EINVAL;
  479. }
  480. if (info.address == active_addr)
  481. info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  482. if (copy_to_user(uinfo, &info, sizeof(info)))
  483. return -EFAULT;
  484. return 0;
  485. }
  486. static int ioctl_event_summary(struct switchtec_dev *stdev,
  487. struct switchtec_user *stuser,
  488. struct switchtec_ioctl_event_summary __user *usum)
  489. {
  490. struct switchtec_ioctl_event_summary s = {0};
  491. int i;
  492. u32 reg;
  493. s.global = ioread32(&stdev->mmio_sw_event->global_summary);
  494. s.part_bitmap = readq(&stdev->mmio_sw_event->part_event_bitmap);
  495. s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  496. for (i = 0; i < stdev->partition_count; i++) {
  497. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  498. s.part[i] = reg;
  499. }
  500. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  501. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  502. if (reg != PCI_VENDOR_ID_MICROSEMI)
  503. break;
  504. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  505. s.pff[i] = reg;
  506. }
  507. if (copy_to_user(usum, &s, sizeof(s)))
  508. return -EFAULT;
  509. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  510. return 0;
  511. }
  512. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  513. size_t offset, int index)
  514. {
  515. return (void __iomem *)stdev->mmio_sw_event + offset;
  516. }
  517. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  518. size_t offset, int index)
  519. {
  520. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  521. }
  522. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  523. size_t offset, int index)
  524. {
  525. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  526. }
  527. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  528. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  529. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  530. static const struct event_reg {
  531. size_t offset;
  532. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  533. size_t offset, int index);
  534. } event_regs[] = {
  535. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  536. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  537. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  538. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  539. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  540. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  541. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  542. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  543. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  544. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  545. twi_mrpc_comp_async_hdr),
  546. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  547. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  548. cli_mrpc_comp_async_hdr),
  549. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  550. EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
  551. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  552. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  553. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  554. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  555. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  556. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  557. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  558. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  559. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  560. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  561. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  562. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  563. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  564. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  565. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  566. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  567. };
  568. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  569. int event_id, int index)
  570. {
  571. size_t off;
  572. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  573. return ERR_PTR(-EINVAL);
  574. off = event_regs[event_id].offset;
  575. if (event_regs[event_id].map_reg == part_ev_reg) {
  576. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  577. index = stdev->partition;
  578. else if (index < 0 || index >= stdev->partition_count)
  579. return ERR_PTR(-EINVAL);
  580. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  581. if (index < 0 || index >= stdev->pff_csr_count)
  582. return ERR_PTR(-EINVAL);
  583. }
  584. return event_regs[event_id].map_reg(stdev, off, index);
  585. }
  586. static int event_ctl(struct switchtec_dev *stdev,
  587. struct switchtec_ioctl_event_ctl *ctl)
  588. {
  589. int i;
  590. u32 __iomem *reg;
  591. u32 hdr;
  592. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  593. if (IS_ERR(reg))
  594. return PTR_ERR(reg);
  595. hdr = ioread32(reg);
  596. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  597. ctl->data[i] = ioread32(&reg[i + 1]);
  598. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  599. ctl->count = (hdr >> 5) & 0xFF;
  600. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  601. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  602. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  603. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  604. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  605. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  606. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  607. hdr |= SWITCHTEC_EVENT_EN_LOG;
  608. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  609. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  610. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  611. hdr |= SWITCHTEC_EVENT_EN_CLI;
  612. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  613. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  614. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  615. hdr |= SWITCHTEC_EVENT_FATAL;
  616. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  617. hdr &= ~SWITCHTEC_EVENT_FATAL;
  618. if (ctl->flags)
  619. iowrite32(hdr, reg);
  620. ctl->flags = 0;
  621. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  622. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  623. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  624. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  625. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  626. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  627. if (hdr & SWITCHTEC_EVENT_FATAL)
  628. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  629. return 0;
  630. }
  631. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  632. struct switchtec_ioctl_event_ctl __user *uctl)
  633. {
  634. int ret;
  635. int nr_idxs;
  636. unsigned int event_flags;
  637. struct switchtec_ioctl_event_ctl ctl;
  638. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  639. return -EFAULT;
  640. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  641. return -EINVAL;
  642. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  643. return -EINVAL;
  644. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  645. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  646. nr_idxs = 1;
  647. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  648. nr_idxs = stdev->partition_count;
  649. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  650. nr_idxs = stdev->pff_csr_count;
  651. else
  652. return -EINVAL;
  653. event_flags = ctl.flags;
  654. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  655. ctl.flags = event_flags;
  656. ret = event_ctl(stdev, &ctl);
  657. if (ret < 0)
  658. return ret;
  659. }
  660. } else {
  661. ret = event_ctl(stdev, &ctl);
  662. if (ret < 0)
  663. return ret;
  664. }
  665. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  666. return -EFAULT;
  667. return 0;
  668. }
  669. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  670. struct switchtec_ioctl_pff_port *up)
  671. {
  672. int i, part;
  673. u32 reg;
  674. struct part_cfg_regs *pcfg;
  675. struct switchtec_ioctl_pff_port p;
  676. if (copy_from_user(&p, up, sizeof(p)))
  677. return -EFAULT;
  678. p.port = -1;
  679. for (part = 0; part < stdev->partition_count; part++) {
  680. pcfg = &stdev->mmio_part_cfg_all[part];
  681. p.partition = part;
  682. reg = ioread32(&pcfg->usp_pff_inst_id);
  683. if (reg == p.pff) {
  684. p.port = 0;
  685. break;
  686. }
  687. reg = ioread32(&pcfg->vep_pff_inst_id);
  688. if (reg == p.pff) {
  689. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  690. break;
  691. }
  692. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  693. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  694. if (reg != p.pff)
  695. continue;
  696. p.port = i + 1;
  697. break;
  698. }
  699. if (p.port != -1)
  700. break;
  701. }
  702. if (copy_to_user(up, &p, sizeof(p)))
  703. return -EFAULT;
  704. return 0;
  705. }
  706. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  707. struct switchtec_ioctl_pff_port *up)
  708. {
  709. struct switchtec_ioctl_pff_port p;
  710. struct part_cfg_regs *pcfg;
  711. if (copy_from_user(&p, up, sizeof(p)))
  712. return -EFAULT;
  713. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  714. pcfg = stdev->mmio_part_cfg;
  715. else if (p.partition < stdev->partition_count)
  716. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  717. else
  718. return -EINVAL;
  719. switch (p.port) {
  720. case 0:
  721. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  722. break;
  723. case SWITCHTEC_IOCTL_PFF_VEP:
  724. p.pff = ioread32(&pcfg->vep_pff_inst_id);
  725. break;
  726. default:
  727. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  728. return -EINVAL;
  729. p.port = array_index_nospec(p.port,
  730. ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
  731. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  732. break;
  733. }
  734. if (copy_to_user(up, &p, sizeof(p)))
  735. return -EFAULT;
  736. return 0;
  737. }
  738. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  739. unsigned long arg)
  740. {
  741. struct switchtec_user *stuser = filp->private_data;
  742. struct switchtec_dev *stdev = stuser->stdev;
  743. int rc;
  744. void __user *argp = (void __user *)arg;
  745. rc = lock_mutex_and_test_alive(stdev);
  746. if (rc)
  747. return rc;
  748. switch (cmd) {
  749. case SWITCHTEC_IOCTL_FLASH_INFO:
  750. rc = ioctl_flash_info(stdev, argp);
  751. break;
  752. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  753. rc = ioctl_flash_part_info(stdev, argp);
  754. break;
  755. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  756. rc = ioctl_event_summary(stdev, stuser, argp);
  757. break;
  758. case SWITCHTEC_IOCTL_EVENT_CTL:
  759. rc = ioctl_event_ctl(stdev, argp);
  760. break;
  761. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  762. rc = ioctl_pff_to_port(stdev, argp);
  763. break;
  764. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  765. rc = ioctl_port_to_pff(stdev, argp);
  766. break;
  767. default:
  768. rc = -ENOTTY;
  769. break;
  770. }
  771. mutex_unlock(&stdev->mrpc_mutex);
  772. return rc;
  773. }
  774. static const struct file_operations switchtec_fops = {
  775. .owner = THIS_MODULE,
  776. .open = switchtec_dev_open,
  777. .release = switchtec_dev_release,
  778. .write = switchtec_dev_write,
  779. .read = switchtec_dev_read,
  780. .poll = switchtec_dev_poll,
  781. .unlocked_ioctl = switchtec_dev_ioctl,
  782. .compat_ioctl = switchtec_dev_ioctl,
  783. };
  784. static void link_event_work(struct work_struct *work)
  785. {
  786. struct switchtec_dev *stdev;
  787. stdev = container_of(work, struct switchtec_dev, link_event_work);
  788. if (stdev->link_notifier)
  789. stdev->link_notifier(stdev);
  790. }
  791. static void check_link_state_events(struct switchtec_dev *stdev)
  792. {
  793. int idx;
  794. u32 reg;
  795. int count;
  796. int occurred = 0;
  797. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  798. reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
  799. dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
  800. count = (reg >> 5) & 0xFF;
  801. if (count != stdev->link_event_count[idx]) {
  802. occurred = 1;
  803. stdev->link_event_count[idx] = count;
  804. }
  805. }
  806. if (occurred)
  807. schedule_work(&stdev->link_event_work);
  808. }
  809. static void enable_link_state_events(struct switchtec_dev *stdev)
  810. {
  811. int idx;
  812. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  813. iowrite32(SWITCHTEC_EVENT_CLEAR |
  814. SWITCHTEC_EVENT_EN_IRQ,
  815. &stdev->mmio_pff_csr[idx].link_state_hdr);
  816. }
  817. }
  818. static void stdev_release(struct device *dev)
  819. {
  820. struct switchtec_dev *stdev = to_stdev(dev);
  821. kfree(stdev);
  822. }
  823. static void stdev_kill(struct switchtec_dev *stdev)
  824. {
  825. struct switchtec_user *stuser, *tmpuser;
  826. pci_clear_master(stdev->pdev);
  827. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  828. /* Mark the hardware as unavailable and complete all completions */
  829. mutex_lock(&stdev->mrpc_mutex);
  830. stdev->alive = false;
  831. /* Wake up and kill any users waiting on an MRPC request */
  832. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  833. complete_all(&stuser->comp);
  834. list_del_init(&stuser->list);
  835. stuser_put(stuser);
  836. }
  837. mutex_unlock(&stdev->mrpc_mutex);
  838. /* Wake up any users waiting on event_wq */
  839. wake_up_interruptible(&stdev->event_wq);
  840. }
  841. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  842. {
  843. struct switchtec_dev *stdev;
  844. int minor;
  845. struct device *dev;
  846. struct cdev *cdev;
  847. int rc;
  848. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  849. dev_to_node(&pdev->dev));
  850. if (!stdev)
  851. return ERR_PTR(-ENOMEM);
  852. stdev->alive = true;
  853. stdev->pdev = pdev;
  854. INIT_LIST_HEAD(&stdev->mrpc_queue);
  855. mutex_init(&stdev->mrpc_mutex);
  856. stdev->mrpc_busy = 0;
  857. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  858. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  859. INIT_WORK(&stdev->link_event_work, link_event_work);
  860. init_waitqueue_head(&stdev->event_wq);
  861. atomic_set(&stdev->event_cnt, 0);
  862. dev = &stdev->dev;
  863. device_initialize(dev);
  864. dev->class = switchtec_class;
  865. dev->parent = &pdev->dev;
  866. dev->groups = switchtec_device_groups;
  867. dev->release = stdev_release;
  868. minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
  869. GFP_KERNEL);
  870. if (minor < 0) {
  871. rc = minor;
  872. goto err_put;
  873. }
  874. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  875. dev_set_name(dev, "switchtec%d", minor);
  876. cdev = &stdev->cdev;
  877. cdev_init(cdev, &switchtec_fops);
  878. cdev->owner = THIS_MODULE;
  879. return stdev;
  880. err_put:
  881. put_device(&stdev->dev);
  882. return ERR_PTR(rc);
  883. }
  884. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  885. {
  886. size_t off = event_regs[eid].offset;
  887. u32 __iomem *hdr_reg;
  888. u32 hdr;
  889. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  890. hdr = ioread32(hdr_reg);
  891. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  892. return 0;
  893. if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
  894. eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
  895. return 0;
  896. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  897. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  898. iowrite32(hdr, hdr_reg);
  899. return 1;
  900. }
  901. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  902. {
  903. int idx;
  904. int count = 0;
  905. if (event_regs[eid].map_reg == part_ev_reg) {
  906. for (idx = 0; idx < stdev->partition_count; idx++)
  907. count += mask_event(stdev, eid, idx);
  908. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  909. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  910. if (!stdev->pff_local[idx])
  911. continue;
  912. count += mask_event(stdev, eid, idx);
  913. }
  914. } else {
  915. count += mask_event(stdev, eid, 0);
  916. }
  917. return count;
  918. }
  919. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  920. {
  921. struct switchtec_dev *stdev = dev;
  922. u32 reg;
  923. irqreturn_t ret = IRQ_NONE;
  924. int eid, event_count = 0;
  925. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  926. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  927. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  928. ret = IRQ_HANDLED;
  929. schedule_work(&stdev->mrpc_work);
  930. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  931. }
  932. check_link_state_events(stdev);
  933. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
  934. event_count += mask_all_events(stdev, eid);
  935. if (event_count) {
  936. atomic_inc(&stdev->event_cnt);
  937. wake_up_interruptible(&stdev->event_wq);
  938. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  939. event_count);
  940. return IRQ_HANDLED;
  941. }
  942. return ret;
  943. }
  944. static int switchtec_init_isr(struct switchtec_dev *stdev)
  945. {
  946. int nvecs;
  947. int event_irq;
  948. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
  949. PCI_IRQ_MSIX | PCI_IRQ_MSI);
  950. if (nvecs < 0)
  951. return nvecs;
  952. event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
  953. if (event_irq < 0 || event_irq >= nvecs)
  954. return -EFAULT;
  955. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  956. if (event_irq < 0)
  957. return event_irq;
  958. return devm_request_irq(&stdev->pdev->dev, event_irq,
  959. switchtec_event_isr, 0,
  960. KBUILD_MODNAME, stdev);
  961. }
  962. static void init_pff(struct switchtec_dev *stdev)
  963. {
  964. int i;
  965. u32 reg;
  966. struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
  967. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  968. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  969. if (reg != PCI_VENDOR_ID_MICROSEMI)
  970. break;
  971. }
  972. stdev->pff_csr_count = i;
  973. reg = ioread32(&pcfg->usp_pff_inst_id);
  974. if (reg < SWITCHTEC_MAX_PFF_CSR)
  975. stdev->pff_local[reg] = 1;
  976. reg = ioread32(&pcfg->vep_pff_inst_id);
  977. if (reg < SWITCHTEC_MAX_PFF_CSR)
  978. stdev->pff_local[reg] = 1;
  979. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  980. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  981. if (reg < SWITCHTEC_MAX_PFF_CSR)
  982. stdev->pff_local[reg] = 1;
  983. }
  984. }
  985. static int switchtec_init_pci(struct switchtec_dev *stdev,
  986. struct pci_dev *pdev)
  987. {
  988. int rc;
  989. rc = pcim_enable_device(pdev);
  990. if (rc)
  991. return rc;
  992. rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
  993. if (rc)
  994. return rc;
  995. pci_set_master(pdev);
  996. stdev->mmio = pcim_iomap_table(pdev)[0];
  997. stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
  998. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  999. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  1000. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  1001. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  1002. stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
  1003. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  1004. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1005. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1006. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1007. if (stdev->partition_count < 1)
  1008. stdev->partition_count = 1;
  1009. init_pff(stdev);
  1010. pci_set_drvdata(pdev, stdev);
  1011. return 0;
  1012. }
  1013. static int switchtec_pci_probe(struct pci_dev *pdev,
  1014. const struct pci_device_id *id)
  1015. {
  1016. struct switchtec_dev *stdev;
  1017. int rc;
  1018. if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
  1019. request_module_nowait("ntb_hw_switchtec");
  1020. stdev = stdev_create(pdev);
  1021. if (IS_ERR(stdev))
  1022. return PTR_ERR(stdev);
  1023. rc = switchtec_init_pci(stdev, pdev);
  1024. if (rc)
  1025. goto err_put;
  1026. rc = switchtec_init_isr(stdev);
  1027. if (rc) {
  1028. dev_err(&stdev->dev, "failed to init isr.\n");
  1029. goto err_put;
  1030. }
  1031. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1032. SWITCHTEC_EVENT_EN_IRQ,
  1033. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1034. enable_link_state_events(stdev);
  1035. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1036. if (rc)
  1037. goto err_devadd;
  1038. dev_info(&stdev->dev, "Management device registered.\n");
  1039. return 0;
  1040. err_devadd:
  1041. stdev_kill(stdev);
  1042. err_put:
  1043. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1044. put_device(&stdev->dev);
  1045. return rc;
  1046. }
  1047. static void switchtec_pci_remove(struct pci_dev *pdev)
  1048. {
  1049. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1050. pci_set_drvdata(pdev, NULL);
  1051. cdev_device_del(&stdev->cdev, &stdev->dev);
  1052. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1053. dev_info(&stdev->dev, "unregistered.\n");
  1054. stdev_kill(stdev);
  1055. put_device(&stdev->dev);
  1056. }
  1057. #define SWITCHTEC_PCI_DEVICE(device_id) \
  1058. { \
  1059. .vendor = PCI_VENDOR_ID_MICROSEMI, \
  1060. .device = device_id, \
  1061. .subvendor = PCI_ANY_ID, \
  1062. .subdevice = PCI_ANY_ID, \
  1063. .class = (PCI_CLASS_MEMORY_OTHER << 8), \
  1064. .class_mask = 0xFFFFFFFF, \
  1065. }, \
  1066. { \
  1067. .vendor = PCI_VENDOR_ID_MICROSEMI, \
  1068. .device = device_id, \
  1069. .subvendor = PCI_ANY_ID, \
  1070. .subdevice = PCI_ANY_ID, \
  1071. .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
  1072. .class_mask = 0xFFFFFFFF, \
  1073. }
  1074. static const struct pci_device_id switchtec_pci_tbl[] = {
  1075. SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
  1076. SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
  1077. SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
  1078. SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
  1079. SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
  1080. SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
  1081. SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
  1082. SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
  1083. SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
  1084. SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
  1085. SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
  1086. SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
  1087. SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
  1088. SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
  1089. SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
  1090. SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
  1091. SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
  1092. SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
  1093. SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
  1094. SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
  1095. SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
  1096. SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
  1097. SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
  1098. SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
  1099. SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
  1100. SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
  1101. SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
  1102. SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
  1103. SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
  1104. SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
  1105. {0}
  1106. };
  1107. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1108. static struct pci_driver switchtec_pci_driver = {
  1109. .name = KBUILD_MODNAME,
  1110. .id_table = switchtec_pci_tbl,
  1111. .probe = switchtec_pci_probe,
  1112. .remove = switchtec_pci_remove,
  1113. };
  1114. static int __init switchtec_init(void)
  1115. {
  1116. int rc;
  1117. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1118. "switchtec");
  1119. if (rc)
  1120. return rc;
  1121. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1122. if (IS_ERR(switchtec_class)) {
  1123. rc = PTR_ERR(switchtec_class);
  1124. goto err_create_class;
  1125. }
  1126. rc = pci_register_driver(&switchtec_pci_driver);
  1127. if (rc)
  1128. goto err_pci_register;
  1129. pr_info(KBUILD_MODNAME ": loaded.\n");
  1130. return 0;
  1131. err_pci_register:
  1132. class_destroy(switchtec_class);
  1133. err_create_class:
  1134. unregister_chrdev_region(switchtec_devt, max_devices);
  1135. return rc;
  1136. }
  1137. module_init(switchtec_init);
  1138. static void __exit switchtec_exit(void)
  1139. {
  1140. pci_unregister_driver(&switchtec_pci_driver);
  1141. class_destroy(switchtec_class);
  1142. unregister_chrdev_region(switchtec_devt, max_devices);
  1143. ida_destroy(&switchtec_minor_ida);
  1144. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1145. }
  1146. module_exit(switchtec_exit);