snic_main.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. #include <linux/module.h>
  4. #include <linux/mempool.h>
  5. #include <linux/string.h>
  6. #include <linux/slab.h>
  7. #include <linux/errno.h>
  8. #include <linux/init.h>
  9. #include <linux/pci.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/workqueue.h>
  14. #include <scsi/scsi_host.h>
  15. #include <scsi/scsi_tcq.h>
  16. #include "snic.h"
  17. #include "snic_fwint.h"
  18. #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
  19. /* Supported devices by snic module */
  20. static struct pci_device_id snic_id_table[] = {
  21. {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
  22. { 0, } /* end of table */
  23. };
  24. unsigned int snic_log_level = 0x0;
  25. module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
  26. MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
  27. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  28. unsigned int snic_trace_max_pages = 16;
  29. module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
  30. MODULE_PARM_DESC(snic_trace_max_pages,
  31. "Total allocated memory pages for snic trace buffer");
  32. #endif
  33. unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
  34. module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
  35. MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
  36. /*
  37. * snic_slave_alloc : callback function to SCSI Mid Layer, called on
  38. * scsi device initialization.
  39. */
  40. static int
  41. snic_slave_alloc(struct scsi_device *sdev)
  42. {
  43. struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
  44. if (!tgt || snic_tgt_chkready(tgt))
  45. return -ENXIO;
  46. return 0;
  47. }
  48. /*
  49. * snic_slave_configure : callback function to SCSI Mid Layer, called on
  50. * scsi device initialization.
  51. */
  52. static int
  53. snic_slave_configure(struct scsi_device *sdev)
  54. {
  55. struct snic *snic = shost_priv(sdev->host);
  56. u32 qdepth = 0, max_ios = 0;
  57. int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
  58. /* Set Queue Depth */
  59. max_ios = snic_max_qdepth;
  60. qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
  61. scsi_change_queue_depth(sdev, qdepth);
  62. if (snic->fwinfo.io_tmo > 1)
  63. tmo = snic->fwinfo.io_tmo * HZ;
  64. /* FW requires extended timeouts */
  65. blk_queue_rq_timeout(sdev->request_queue, tmo);
  66. return 0;
  67. }
  68. static int
  69. snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
  70. {
  71. struct snic *snic = shost_priv(sdev->host);
  72. int qsz = 0;
  73. qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
  74. if (qsz < sdev->queue_depth)
  75. atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
  76. else if (qsz > sdev->queue_depth)
  77. atomic64_inc(&snic->s_stats.misc.qsz_rampup);
  78. atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
  79. scsi_change_queue_depth(sdev, qsz);
  80. return sdev->queue_depth;
  81. }
  82. static const struct scsi_host_template snic_host_template = {
  83. .module = THIS_MODULE,
  84. .name = SNIC_DRV_NAME,
  85. .queuecommand = snic_queuecommand,
  86. .eh_abort_handler = snic_abort_cmd,
  87. .eh_device_reset_handler = snic_device_reset,
  88. .eh_host_reset_handler = snic_host_reset,
  89. .slave_alloc = snic_slave_alloc,
  90. .slave_configure = snic_slave_configure,
  91. .change_queue_depth = snic_change_queue_depth,
  92. .this_id = -1,
  93. .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
  94. .can_queue = SNIC_MAX_IO_REQ,
  95. .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
  96. .max_sectors = 0x800,
  97. .shost_groups = snic_host_groups,
  98. .track_queue_depth = 1,
  99. .cmd_size = sizeof(struct snic_internal_io_state),
  100. .proc_name = "snic_scsi",
  101. };
  102. /*
  103. * snic_handle_link_event : Handles link events such as link up/down/error
  104. */
  105. void
  106. snic_handle_link_event(struct snic *snic)
  107. {
  108. unsigned long flags;
  109. spin_lock_irqsave(&snic->snic_lock, flags);
  110. if (snic->stop_link_events) {
  111. spin_unlock_irqrestore(&snic->snic_lock, flags);
  112. return;
  113. }
  114. spin_unlock_irqrestore(&snic->snic_lock, flags);
  115. queue_work(snic_glob->event_q, &snic->link_work);
  116. } /* end of snic_handle_link_event */
  117. /*
  118. * snic_notify_set : sets notification area
  119. * This notification area is to receive events from fw
  120. * Note: snic supports only MSIX interrupts, in which we can just call
  121. * svnic_dev_notify_set directly
  122. */
  123. static int
  124. snic_notify_set(struct snic *snic)
  125. {
  126. int ret = 0;
  127. enum vnic_dev_intr_mode intr_mode;
  128. intr_mode = svnic_dev_get_intr_mode(snic->vdev);
  129. if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
  130. ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
  131. } else {
  132. SNIC_HOST_ERR(snic->shost,
  133. "Interrupt mode should be setup before devcmd notify set %d\n",
  134. intr_mode);
  135. ret = -1;
  136. }
  137. return ret;
  138. } /* end of snic_notify_set */
  139. /*
  140. * snic_dev_wait : polls vnic open status.
  141. */
  142. static int
  143. snic_dev_wait(struct vnic_dev *vdev,
  144. int (*start)(struct vnic_dev *, int),
  145. int (*finished)(struct vnic_dev *, int *),
  146. int arg)
  147. {
  148. unsigned long time;
  149. int ret, done;
  150. int retry_cnt = 0;
  151. ret = start(vdev, arg);
  152. if (ret)
  153. return ret;
  154. /*
  155. * Wait for func to complete...2 seconds max.
  156. *
  157. * Sometimes schedule_timeout_uninterruptible take long time
  158. * to wakeup, which results skipping retry. The retry counter
  159. * ensures to retry at least two times.
  160. */
  161. time = jiffies + (HZ * 2);
  162. do {
  163. ret = finished(vdev, &done);
  164. if (ret)
  165. return ret;
  166. if (done)
  167. return 0;
  168. schedule_timeout_uninterruptible(HZ/10);
  169. ++retry_cnt;
  170. } while (time_after(time, jiffies) || (retry_cnt < 3));
  171. return -ETIMEDOUT;
  172. } /* end of snic_dev_wait */
  173. /*
  174. * snic_cleanup: called by snic_remove
  175. * Stops the snic device, masks all interrupts, Completed CQ entries are
  176. * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
  177. */
  178. static int
  179. snic_cleanup(struct snic *snic)
  180. {
  181. unsigned int i;
  182. int ret;
  183. svnic_dev_disable(snic->vdev);
  184. for (i = 0; i < snic->intr_count; i++)
  185. svnic_intr_mask(&snic->intr[i]);
  186. for (i = 0; i < snic->wq_count; i++) {
  187. ret = svnic_wq_disable(&snic->wq[i]);
  188. if (ret)
  189. return ret;
  190. }
  191. /* Clean up completed IOs */
  192. snic_fwcq_cmpl_handler(snic, -1);
  193. snic_wq_cmpl_handler(snic, -1);
  194. /* Clean up the IOs that have not completed */
  195. for (i = 0; i < snic->wq_count; i++)
  196. svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
  197. for (i = 0; i < snic->cq_count; i++)
  198. svnic_cq_clean(&snic->cq[i]);
  199. for (i = 0; i < snic->intr_count; i++)
  200. svnic_intr_clean(&snic->intr[i]);
  201. /* Cleanup snic specific requests */
  202. snic_free_all_untagged_reqs(snic);
  203. /* Cleanup Pending SCSI commands */
  204. snic_shutdown_scsi_cleanup(snic);
  205. for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
  206. mempool_destroy(snic->req_pool[i]);
  207. return 0;
  208. } /* end of snic_cleanup */
  209. static void
  210. snic_iounmap(struct snic *snic)
  211. {
  212. if (snic->bar0.vaddr)
  213. iounmap(snic->bar0.vaddr);
  214. }
  215. /*
  216. * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
  217. */
  218. static int
  219. snic_vdev_open_done(struct vnic_dev *vdev, int *done)
  220. {
  221. struct snic *snic = svnic_dev_priv(vdev);
  222. int ret;
  223. int nretries = 5;
  224. do {
  225. ret = svnic_dev_open_done(vdev, done);
  226. if (ret == 0)
  227. break;
  228. SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
  229. } while (nretries--);
  230. return ret;
  231. } /* end of snic_vdev_open_done */
  232. /*
  233. * snic_add_host : registers scsi host with ML
  234. */
  235. static int
  236. snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
  237. {
  238. int ret = 0;
  239. ret = scsi_add_host(shost, &pdev->dev);
  240. if (ret) {
  241. SNIC_HOST_ERR(shost,
  242. "snic: scsi_add_host failed. %d\n",
  243. ret);
  244. return ret;
  245. }
  246. SNIC_BUG_ON(shost->work_q != NULL);
  247. shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM,
  248. shost->host_no);
  249. if (!shost->work_q) {
  250. SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
  251. ret = -ENOMEM;
  252. }
  253. return ret;
  254. } /* end of snic_add_host */
  255. static void
  256. snic_del_host(struct Scsi_Host *shost)
  257. {
  258. if (!shost->work_q)
  259. return;
  260. destroy_workqueue(shost->work_q);
  261. shost->work_q = NULL;
  262. scsi_remove_host(shost);
  263. }
  264. int
  265. snic_get_state(struct snic *snic)
  266. {
  267. return atomic_read(&snic->state);
  268. }
  269. void
  270. snic_set_state(struct snic *snic, enum snic_state state)
  271. {
  272. SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
  273. snic_state_to_str(snic_get_state(snic)),
  274. snic_state_to_str(state));
  275. atomic_set(&snic->state, state);
  276. }
  277. /*
  278. * snic_probe : Initialize the snic interface.
  279. */
  280. static int
  281. snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  282. {
  283. struct Scsi_Host *shost;
  284. struct snic *snic;
  285. mempool_t *pool;
  286. unsigned long flags;
  287. u32 max_ios = 0;
  288. int ret, i;
  289. /* Device Information */
  290. SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
  291. pdev->vendor, pdev->device, pdev->subsystem_vendor,
  292. pdev->subsystem_device);
  293. SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
  294. pdev->bus->number, PCI_SLOT(pdev->devfn),
  295. PCI_FUNC(pdev->devfn));
  296. /*
  297. * Allocate SCSI Host and setup association between host, and snic
  298. */
  299. shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
  300. if (!shost) {
  301. SNIC_ERR("Unable to alloc scsi_host\n");
  302. ret = -ENOMEM;
  303. goto prob_end;
  304. }
  305. snic = shost_priv(shost);
  306. snic->shost = shost;
  307. snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
  308. shost->host_no);
  309. SNIC_HOST_INFO(shost,
  310. "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
  311. shost->host_no, snic, shost, pdev->bus->number,
  312. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  313. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  314. /* Per snic debugfs init */
  315. snic_stats_debugfs_init(snic);
  316. #endif
  317. /* Setup PCI Resources */
  318. pci_set_drvdata(pdev, snic);
  319. snic->pdev = pdev;
  320. ret = pci_enable_device(pdev);
  321. if (ret) {
  322. SNIC_HOST_ERR(shost,
  323. "Cannot enable PCI Resources, aborting : %d\n",
  324. ret);
  325. goto err_free_snic;
  326. }
  327. ret = pci_request_regions(pdev, SNIC_DRV_NAME);
  328. if (ret) {
  329. SNIC_HOST_ERR(shost,
  330. "Cannot obtain PCI Resources, aborting : %d\n",
  331. ret);
  332. goto err_pci_disable;
  333. }
  334. pci_set_master(pdev);
  335. /*
  336. * Query PCI Controller on system for DMA addressing
  337. * limitation for the device. Try 43-bit first, and
  338. * fail to 32-bit.
  339. */
  340. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
  341. if (ret) {
  342. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  343. if (ret) {
  344. SNIC_HOST_ERR(shost,
  345. "No Usable DMA Configuration, aborting %d\n",
  346. ret);
  347. goto err_rel_regions;
  348. }
  349. }
  350. /* Map vNIC resources from BAR0 */
  351. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  352. SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
  353. ret = -ENODEV;
  354. goto err_rel_regions;
  355. }
  356. snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
  357. if (!snic->bar0.vaddr) {
  358. SNIC_HOST_ERR(shost,
  359. "Cannot memory map BAR0 res hdr aborting.\n");
  360. ret = -ENODEV;
  361. goto err_rel_regions;
  362. }
  363. snic->bar0.bus_addr = pci_resource_start(pdev, 0);
  364. snic->bar0.len = pci_resource_len(pdev, 0);
  365. SNIC_BUG_ON(snic->bar0.bus_addr == 0);
  366. /* Devcmd2 Resource Allocation and Initialization */
  367. snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
  368. if (!snic->vdev) {
  369. SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
  370. ret = -ENODEV;
  371. goto err_iounmap;
  372. }
  373. ret = svnic_dev_cmd_init(snic->vdev, 0);
  374. if (ret) {
  375. SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
  376. goto err_vnic_unreg;
  377. }
  378. ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
  379. if (ret) {
  380. SNIC_HOST_ERR(shost,
  381. "vNIC dev open failed, aborting. %d\n",
  382. ret);
  383. goto err_vnic_unreg;
  384. }
  385. ret = svnic_dev_init(snic->vdev, 0);
  386. if (ret) {
  387. SNIC_HOST_ERR(shost,
  388. "vNIC dev init failed. aborting. %d\n",
  389. ret);
  390. goto err_dev_close;
  391. }
  392. /* Get vNIC information */
  393. ret = snic_get_vnic_config(snic);
  394. if (ret) {
  395. SNIC_HOST_ERR(shost,
  396. "Get vNIC configuration failed, aborting. %d\n",
  397. ret);
  398. goto err_dev_close;
  399. }
  400. /* Configure Maximum Outstanding IO reqs */
  401. max_ios = snic->config.io_throttle_count;
  402. if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
  403. shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
  404. max_t(u32, SNIC_MIN_IO_REQ, max_ios));
  405. snic->max_tag_id = shost->can_queue;
  406. shost->max_lun = snic->config.luns_per_tgt;
  407. shost->max_id = SNIC_MAX_TARGET;
  408. shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
  409. snic_get_res_counts(snic);
  410. /*
  411. * Assumption: Only MSIx is supported
  412. */
  413. ret = snic_set_intr_mode(snic);
  414. if (ret) {
  415. SNIC_HOST_ERR(shost,
  416. "Failed to set intr mode aborting. %d\n",
  417. ret);
  418. goto err_dev_close;
  419. }
  420. ret = snic_alloc_vnic_res(snic);
  421. if (ret) {
  422. SNIC_HOST_ERR(shost,
  423. "Failed to alloc vNIC resources aborting. %d\n",
  424. ret);
  425. goto err_clear_intr;
  426. }
  427. /* Initialize specific lists */
  428. INIT_LIST_HEAD(&snic->list);
  429. /*
  430. * spl_cmd_list for maintaining snic specific cmds
  431. * such as EXCH_VER_REQ, REPORT_TARGETS etc
  432. */
  433. INIT_LIST_HEAD(&snic->spl_cmd_list);
  434. spin_lock_init(&snic->spl_cmd_lock);
  435. /* initialize all snic locks */
  436. spin_lock_init(&snic->snic_lock);
  437. for (i = 0; i < SNIC_WQ_MAX; i++)
  438. spin_lock_init(&snic->wq_lock[i]);
  439. for (i = 0; i < SNIC_IO_LOCKS; i++)
  440. spin_lock_init(&snic->io_req_lock[i]);
  441. pool = mempool_create_slab_pool(2,
  442. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  443. if (!pool) {
  444. SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
  445. ret = -ENOMEM;
  446. goto err_free_res;
  447. }
  448. snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
  449. pool = mempool_create_slab_pool(2,
  450. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  451. if (!pool) {
  452. SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
  453. ret = -ENOMEM;
  454. goto err_free_dflt_sgl_pool;
  455. }
  456. snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
  457. pool = mempool_create_slab_pool(2,
  458. snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  459. if (!pool) {
  460. SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
  461. ret = -ENOMEM;
  462. goto err_free_max_sgl_pool;
  463. }
  464. snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
  465. /* Initialize snic state */
  466. atomic_set(&snic->state, SNIC_INIT);
  467. atomic_set(&snic->ios_inflight, 0);
  468. /* Setup notification buffer area */
  469. ret = snic_notify_set(snic);
  470. if (ret) {
  471. SNIC_HOST_ERR(shost,
  472. "Failed to alloc notify buffer aborting. %d\n",
  473. ret);
  474. goto err_free_tmreq_pool;
  475. }
  476. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  477. list_add_tail(&snic->list, &snic_glob->snic_list);
  478. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  479. snic_disc_init(&snic->disc);
  480. INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
  481. INIT_WORK(&snic->disc_work, snic_handle_disc);
  482. INIT_WORK(&snic->link_work, snic_handle_link);
  483. /* Enable all queues */
  484. for (i = 0; i < snic->wq_count; i++)
  485. svnic_wq_enable(&snic->wq[i]);
  486. ret = svnic_dev_enable_wait(snic->vdev);
  487. if (ret) {
  488. SNIC_HOST_ERR(shost,
  489. "vNIC dev enable failed w/ error %d\n",
  490. ret);
  491. goto err_vdev_enable;
  492. }
  493. ret = snic_request_intr(snic);
  494. if (ret) {
  495. SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
  496. goto err_req_intr;
  497. }
  498. for (i = 0; i < snic->intr_count; i++)
  499. svnic_intr_unmask(&snic->intr[i]);
  500. /* Get snic params */
  501. ret = snic_get_conf(snic);
  502. if (ret) {
  503. SNIC_HOST_ERR(shost,
  504. "Failed to get snic io config from FW w err %d\n",
  505. ret);
  506. goto err_get_conf;
  507. }
  508. /*
  509. * Initialization done with PCI system, hardware, firmware.
  510. * Add shost to SCSI
  511. */
  512. ret = snic_add_host(shost, pdev);
  513. if (ret) {
  514. SNIC_HOST_ERR(shost,
  515. "Adding scsi host Failed ... exiting. %d\n",
  516. ret);
  517. goto err_get_conf;
  518. }
  519. snic_set_state(snic, SNIC_ONLINE);
  520. ret = snic_disc_start(snic);
  521. if (ret) {
  522. SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
  523. ret);
  524. goto err_get_conf;
  525. }
  526. SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
  527. return 0;
  528. err_get_conf:
  529. snic_free_all_untagged_reqs(snic);
  530. for (i = 0; i < snic->intr_count; i++)
  531. svnic_intr_mask(&snic->intr[i]);
  532. snic_free_intr(snic);
  533. err_req_intr:
  534. svnic_dev_disable(snic->vdev);
  535. err_vdev_enable:
  536. svnic_dev_notify_unset(snic->vdev);
  537. for (i = 0; i < snic->wq_count; i++) {
  538. int rc = 0;
  539. rc = svnic_wq_disable(&snic->wq[i]);
  540. if (rc) {
  541. SNIC_HOST_ERR(shost,
  542. "WQ Disable Failed w/ err = %d\n", rc);
  543. break;
  544. }
  545. }
  546. snic_del_host(snic->shost);
  547. err_free_tmreq_pool:
  548. mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
  549. err_free_max_sgl_pool:
  550. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
  551. err_free_dflt_sgl_pool:
  552. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
  553. err_free_res:
  554. snic_free_vnic_res(snic);
  555. err_clear_intr:
  556. snic_clear_intr_mode(snic);
  557. err_dev_close:
  558. svnic_dev_close(snic->vdev);
  559. err_vnic_unreg:
  560. svnic_dev_unregister(snic->vdev);
  561. err_iounmap:
  562. snic_iounmap(snic);
  563. err_rel_regions:
  564. pci_release_regions(pdev);
  565. err_pci_disable:
  566. pci_disable_device(pdev);
  567. err_free_snic:
  568. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  569. snic_stats_debugfs_remove(snic);
  570. #endif
  571. scsi_host_put(shost);
  572. pci_set_drvdata(pdev, NULL);
  573. prob_end:
  574. SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
  575. pdev->bus->number, PCI_SLOT(pdev->devfn),
  576. PCI_FUNC(pdev->devfn));
  577. return ret;
  578. } /* end of snic_probe */
  579. /*
  580. * snic_remove : invoked on unbinding the interface to cleanup the
  581. * resources allocated in snic_probe on initialization.
  582. */
  583. static void
  584. snic_remove(struct pci_dev *pdev)
  585. {
  586. struct snic *snic = pci_get_drvdata(pdev);
  587. unsigned long flags;
  588. if (!snic) {
  589. SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
  590. pdev->bus->number, PCI_SLOT(pdev->devfn),
  591. PCI_FUNC(pdev->devfn));
  592. return;
  593. }
  594. /*
  595. * Mark state so that the workqueue thread stops forwarding
  596. * received frames and link events. ISR and other threads
  597. * that can queue work items will also stop creating work
  598. * items on the snic workqueue
  599. */
  600. snic_set_state(snic, SNIC_OFFLINE);
  601. spin_lock_irqsave(&snic->snic_lock, flags);
  602. snic->stop_link_events = 1;
  603. spin_unlock_irqrestore(&snic->snic_lock, flags);
  604. flush_workqueue(snic_glob->event_q);
  605. snic_disc_term(snic);
  606. spin_lock_irqsave(&snic->snic_lock, flags);
  607. snic->in_remove = 1;
  608. spin_unlock_irqrestore(&snic->snic_lock, flags);
  609. /*
  610. * This stops the snic device, masks all interrupts, Completed
  611. * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
  612. * cleanup
  613. */
  614. snic_cleanup(snic);
  615. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  616. list_del(&snic->list);
  617. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  618. snic_tgt_del_all(snic);
  619. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  620. snic_stats_debugfs_remove(snic);
  621. #endif
  622. snic_del_host(snic->shost);
  623. svnic_dev_notify_unset(snic->vdev);
  624. snic_free_intr(snic);
  625. snic_free_vnic_res(snic);
  626. snic_clear_intr_mode(snic);
  627. svnic_dev_close(snic->vdev);
  628. svnic_dev_unregister(snic->vdev);
  629. snic_iounmap(snic);
  630. pci_release_regions(pdev);
  631. pci_disable_device(pdev);
  632. pci_set_drvdata(pdev, NULL);
  633. /* this frees Scsi_Host and snic memory (continuous chunk) */
  634. scsi_host_put(snic->shost);
  635. } /* end of snic_remove */
  636. struct snic_global *snic_glob;
  637. /*
  638. * snic_global_data_init: Initialize SNIC Global Data
  639. * Notes: All the global lists, variables should be part of global data
  640. * this helps in debugging.
  641. */
  642. static int
  643. snic_global_data_init(void)
  644. {
  645. int ret = 0;
  646. struct kmem_cache *cachep;
  647. ssize_t len = 0;
  648. snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
  649. if (!snic_glob) {
  650. SNIC_ERR("Failed to allocate Global Context.\n");
  651. ret = -ENOMEM;
  652. goto gdi_end;
  653. }
  654. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  655. /* Debugfs related Initialization */
  656. /* Create debugfs entries for snic */
  657. snic_debugfs_init();
  658. /* Trace related Initialization */
  659. /* Allocate memory for trace buffer */
  660. ret = snic_trc_init();
  661. if (ret < 0) {
  662. SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
  663. snic_trc_free();
  664. /* continue even if it fails */
  665. }
  666. #endif
  667. INIT_LIST_HEAD(&snic_glob->snic_list);
  668. spin_lock_init(&snic_glob->snic_list_lock);
  669. /* Create a cache for allocation of snic_host_req+default size ESGLs */
  670. len = sizeof(struct snic_req_info);
  671. len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
  672. cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
  673. SLAB_HWCACHE_ALIGN, NULL);
  674. if (!cachep) {
  675. SNIC_ERR("Failed to create snic default sgl slab\n");
  676. ret = -ENOMEM;
  677. goto err_dflt_req_slab;
  678. }
  679. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
  680. /* Create a cache for allocation of max size Extended SGLs */
  681. len = sizeof(struct snic_req_info);
  682. len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
  683. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  684. SLAB_HWCACHE_ALIGN, NULL);
  685. if (!cachep) {
  686. SNIC_ERR("Failed to create snic max sgl slab\n");
  687. ret = -ENOMEM;
  688. goto err_max_req_slab;
  689. }
  690. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
  691. len = sizeof(struct snic_host_req);
  692. cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN,
  693. SLAB_HWCACHE_ALIGN, NULL);
  694. if (!cachep) {
  695. SNIC_ERR("Failed to create snic tm req slab\n");
  696. ret = -ENOMEM;
  697. goto err_tmreq_slab;
  698. }
  699. snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
  700. /* snic_event queue */
  701. snic_glob->event_q =
  702. alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq");
  703. if (!snic_glob->event_q) {
  704. SNIC_ERR("snic event queue create failed\n");
  705. ret = -ENOMEM;
  706. goto err_eventq;
  707. }
  708. return ret;
  709. err_eventq:
  710. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  711. err_tmreq_slab:
  712. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  713. err_max_req_slab:
  714. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  715. err_dflt_req_slab:
  716. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  717. snic_trc_free();
  718. snic_debugfs_term();
  719. #endif
  720. kfree(snic_glob);
  721. snic_glob = NULL;
  722. gdi_end:
  723. return ret;
  724. } /* end of snic_glob_init */
  725. /*
  726. * snic_global_data_cleanup : Frees SNIC Global Data
  727. */
  728. static void
  729. snic_global_data_cleanup(void)
  730. {
  731. SNIC_BUG_ON(snic_glob == NULL);
  732. destroy_workqueue(snic_glob->event_q);
  733. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  734. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  735. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  736. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  737. /* Freeing Trace Resources */
  738. snic_trc_free();
  739. /* Freeing Debugfs Resources */
  740. snic_debugfs_term();
  741. #endif
  742. kfree(snic_glob);
  743. snic_glob = NULL;
  744. } /* end of snic_glob_cleanup */
  745. static struct pci_driver snic_driver = {
  746. .name = SNIC_DRV_NAME,
  747. .id_table = snic_id_table,
  748. .probe = snic_probe,
  749. .remove = snic_remove,
  750. };
  751. static int __init
  752. snic_init_module(void)
  753. {
  754. int ret = 0;
  755. #ifndef __x86_64__
  756. SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
  757. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  758. #endif
  759. SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
  760. ret = snic_global_data_init();
  761. if (ret) {
  762. SNIC_ERR("Failed to Initialize Global Data.\n");
  763. return ret;
  764. }
  765. ret = pci_register_driver(&snic_driver);
  766. if (ret < 0) {
  767. SNIC_ERR("PCI driver register error\n");
  768. goto err_pci_reg;
  769. }
  770. return ret;
  771. err_pci_reg:
  772. snic_global_data_cleanup();
  773. return ret;
  774. }
  775. static void __exit
  776. snic_cleanup_module(void)
  777. {
  778. pci_unregister_driver(&snic_driver);
  779. snic_global_data_cleanup();
  780. }
  781. module_init(snic_init_module);
  782. module_exit(snic_cleanup_module);
  783. MODULE_LICENSE("GPL v2");
  784. MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
  785. MODULE_VERSION(SNIC_DRV_VERSION);
  786. MODULE_DEVICE_TABLE(pci, snic_id_table);
  787. MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
  788. "Sesidhar Baddela <sebaddel@cisco.com>");