qla_mid.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include "qla_target.h"
  10. #include <linux/moduleparam.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/slab.h>
  13. #include <linux/list.h>
  14. #include <scsi/scsi_tcq.h>
  15. #include <scsi/scsicam.h>
  16. #include <linux/delay.h>
  17. void
  18. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  19. {
  20. if (vha->vp_idx && vha->timer_active) {
  21. del_timer_sync(&vha->timer);
  22. vha->timer_active = 0;
  23. }
  24. }
  25. static uint32_t
  26. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  27. {
  28. uint32_t vp_id;
  29. struct qla_hw_data *ha = vha->hw;
  30. unsigned long flags;
  31. /* Find an empty slot and assign an vp_id */
  32. mutex_lock(&ha->vport_lock);
  33. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  34. if (vp_id > ha->max_npiv_vports) {
  35. ql_dbg(ql_dbg_vport, vha, 0xa000,
  36. "vp_id %d is bigger than max-supported %d.\n",
  37. vp_id, ha->max_npiv_vports);
  38. mutex_unlock(&ha->vport_lock);
  39. return vp_id;
  40. }
  41. set_bit(vp_id, ha->vp_idx_map);
  42. ha->num_vhosts++;
  43. vha->vp_idx = vp_id;
  44. spin_lock_irqsave(&ha->vport_slock, flags);
  45. list_add_tail(&vha->list, &ha->vp_list);
  46. spin_unlock_irqrestore(&ha->vport_slock, flags);
  47. spin_lock_irqsave(&ha->hardware_lock, flags);
  48. qlt_update_vp_map(vha, SET_VP_IDX);
  49. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  50. mutex_unlock(&ha->vport_lock);
  51. return vp_id;
  52. }
  53. void
  54. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  55. {
  56. uint16_t vp_id;
  57. struct qla_hw_data *ha = vha->hw;
  58. unsigned long flags = 0;
  59. mutex_lock(&ha->vport_lock);
  60. /*
  61. * Wait for all pending activities to finish before removing vport from
  62. * the list.
  63. * Lock needs to be held for safe removal from the list (it
  64. * ensures no active vp_list traversal while the vport is removed
  65. * from the queue)
  66. */
  67. wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
  68. 10*HZ);
  69. spin_lock_irqsave(&ha->vport_slock, flags);
  70. if (atomic_read(&vha->vref_count)) {
  71. ql_dbg(ql_dbg_vport, vha, 0xfffa,
  72. "vha->vref_count=%u timeout\n", vha->vref_count.counter);
  73. vha->vref_count = (atomic_t)ATOMIC_INIT(0);
  74. }
  75. list_del(&vha->list);
  76. qlt_update_vp_map(vha, RESET_VP_IDX);
  77. spin_unlock_irqrestore(&ha->vport_slock, flags);
  78. vp_id = vha->vp_idx;
  79. ha->num_vhosts--;
  80. clear_bit(vp_id, ha->vp_idx_map);
  81. mutex_unlock(&ha->vport_lock);
  82. }
  83. static scsi_qla_host_t *
  84. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  85. {
  86. scsi_qla_host_t *vha;
  87. struct scsi_qla_host *tvha;
  88. unsigned long flags;
  89. spin_lock_irqsave(&ha->vport_slock, flags);
  90. /* Locate matching device in database. */
  91. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  92. if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
  93. spin_unlock_irqrestore(&ha->vport_slock, flags);
  94. return vha;
  95. }
  96. }
  97. spin_unlock_irqrestore(&ha->vport_slock, flags);
  98. return NULL;
  99. }
  100. /*
  101. * qla2x00_mark_vp_devices_dead
  102. * Updates fcport state when device goes offline.
  103. *
  104. * Input:
  105. * ha = adapter block pointer.
  106. * fcport = port structure pointer.
  107. *
  108. * Return:
  109. * None.
  110. *
  111. * Context:
  112. */
  113. static void
  114. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  115. {
  116. /*
  117. * !!! NOTE !!!
  118. * This function, if called in contexts other than vp create, disable
  119. * or delete, please make sure this is synchronized with the
  120. * delete thread.
  121. */
  122. fc_port_t *fcport;
  123. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  124. ql_dbg(ql_dbg_vport, vha, 0xa001,
  125. "Marking port dead, loop_id=0x%04x : %x.\n",
  126. fcport->loop_id, fcport->vha->vp_idx);
  127. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  128. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  129. }
  130. }
  131. int
  132. qla24xx_disable_vp(scsi_qla_host_t *vha)
  133. {
  134. unsigned long flags;
  135. int ret = QLA_SUCCESS;
  136. fc_port_t *fcport;
  137. if (vha->hw->flags.fw_started)
  138. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  139. atomic_set(&vha->loop_state, LOOP_DOWN);
  140. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  141. list_for_each_entry(fcport, &vha->vp_fcports, list)
  142. fcport->logout_on_delete = 0;
  143. qla2x00_mark_all_devices_lost(vha, 0);
  144. /* Remove port id from vp target map */
  145. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  146. qlt_update_vp_map(vha, RESET_AL_PA);
  147. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  148. qla2x00_mark_vp_devices_dead(vha);
  149. atomic_set(&vha->vp_state, VP_FAILED);
  150. vha->flags.management_server_logged_in = 0;
  151. if (ret == QLA_SUCCESS) {
  152. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  153. } else {
  154. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  155. return -1;
  156. }
  157. return 0;
  158. }
  159. int
  160. qla24xx_enable_vp(scsi_qla_host_t *vha)
  161. {
  162. int ret;
  163. struct qla_hw_data *ha = vha->hw;
  164. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  165. /* Check if physical ha port is Up */
  166. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  167. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  168. !(ha->current_topology & ISP_CFG_F)) {
  169. vha->vp_err_state = VP_ERR_PORTDWN;
  170. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  171. ql_dbg(ql_dbg_taskm, vha, 0x800b,
  172. "%s skip enable. loop_state %x topo %x\n",
  173. __func__, base_vha->loop_state.counter,
  174. ha->current_topology);
  175. goto enable_failed;
  176. }
  177. /* Initialize the new vport unless it is a persistent port */
  178. mutex_lock(&ha->vport_lock);
  179. ret = qla24xx_modify_vp_config(vha);
  180. mutex_unlock(&ha->vport_lock);
  181. if (ret != QLA_SUCCESS) {
  182. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  183. goto enable_failed;
  184. }
  185. ql_dbg(ql_dbg_taskm, vha, 0x801a,
  186. "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
  187. return 0;
  188. enable_failed:
  189. ql_dbg(ql_dbg_taskm, vha, 0x801b,
  190. "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
  191. return 1;
  192. }
  193. static void
  194. qla24xx_configure_vp(scsi_qla_host_t *vha)
  195. {
  196. struct fc_vport *fc_vport;
  197. int ret;
  198. fc_vport = vha->fc_vport;
  199. ql_dbg(ql_dbg_vport, vha, 0xa002,
  200. "%s: change request #3.\n", __func__);
  201. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  202. if (ret != QLA_SUCCESS) {
  203. ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
  204. "receiving of RSCN requests: 0x%x.\n", ret);
  205. return;
  206. } else {
  207. /* Corresponds to SCR enabled */
  208. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  209. }
  210. vha->flags.online = 1;
  211. if (qla24xx_configure_vhba(vha))
  212. return;
  213. atomic_set(&vha->vp_state, VP_ACTIVE);
  214. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  215. }
  216. void
  217. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  218. {
  219. scsi_qla_host_t *vha;
  220. struct qla_hw_data *ha = rsp->hw;
  221. int i = 0;
  222. unsigned long flags;
  223. spin_lock_irqsave(&ha->vport_slock, flags);
  224. list_for_each_entry(vha, &ha->vp_list, list) {
  225. if (vha->vp_idx) {
  226. atomic_inc(&vha->vref_count);
  227. spin_unlock_irqrestore(&ha->vport_slock, flags);
  228. switch (mb[0]) {
  229. case MBA_LIP_OCCURRED:
  230. case MBA_LOOP_UP:
  231. case MBA_LOOP_DOWN:
  232. case MBA_LIP_RESET:
  233. case MBA_POINT_TO_POINT:
  234. case MBA_CHG_IN_CONNECTION:
  235. ql_dbg(ql_dbg_async, vha, 0x5024,
  236. "Async_event for VP[%d], mb=0x%x vha=%p.\n",
  237. i, *mb, vha);
  238. qla2x00_async_event(vha, rsp, mb);
  239. break;
  240. case MBA_PORT_UPDATE:
  241. case MBA_RSCN_UPDATE:
  242. if ((mb[3] & 0xff) == vha->vp_idx) {
  243. ql_dbg(ql_dbg_async, vha, 0x5024,
  244. "Async_event for VP[%d], mb=0x%x vha=%p\n",
  245. i, *mb, vha);
  246. qla2x00_async_event(vha, rsp, mb);
  247. }
  248. break;
  249. }
  250. spin_lock_irqsave(&ha->vport_slock, flags);
  251. atomic_dec(&vha->vref_count);
  252. wake_up(&vha->vref_waitq);
  253. }
  254. i++;
  255. }
  256. spin_unlock_irqrestore(&ha->vport_slock, flags);
  257. }
  258. int
  259. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  260. {
  261. /*
  262. * Physical port will do most of the abort and recovery work. We can
  263. * just treat it as a loop down
  264. */
  265. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  266. atomic_set(&vha->loop_state, LOOP_DOWN);
  267. qla2x00_mark_all_devices_lost(vha, 0);
  268. } else {
  269. if (!atomic_read(&vha->loop_down_timer))
  270. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  271. }
  272. /*
  273. * To exclusively reset vport, we need to log it out first. Note: this
  274. * control_vp can fail if ISP reset is already issued, this is
  275. * expected, as the vp would be already logged out due to ISP reset.
  276. */
  277. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  278. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  279. ql_dbg(ql_dbg_taskm, vha, 0x801d,
  280. "Scheduling enable of Vport %d.\n", vha->vp_idx);
  281. return qla24xx_enable_vp(vha);
  282. }
  283. static int
  284. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  285. {
  286. struct qla_hw_data *ha = vha->hw;
  287. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  288. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
  289. "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
  290. /* Check if Fw is ready to configure VP first */
  291. if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
  292. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  293. /* VP acquired. complete port configuration */
  294. ql_dbg(ql_dbg_dpc, vha, 0x4014,
  295. "Configure VP scheduled.\n");
  296. qla24xx_configure_vp(vha);
  297. ql_dbg(ql_dbg_dpc, vha, 0x4015,
  298. "Configure VP end.\n");
  299. return 0;
  300. }
  301. }
  302. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  303. ql_dbg(ql_dbg_dpc, vha, 0x4016,
  304. "FCPort update scheduled.\n");
  305. qla2x00_update_fcports(vha);
  306. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  307. ql_dbg(ql_dbg_dpc, vha, 0x4017,
  308. "FCPort update end.\n");
  309. }
  310. if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
  311. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  312. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  313. if (!vha->relogin_jif ||
  314. time_after_eq(jiffies, vha->relogin_jif)) {
  315. vha->relogin_jif = jiffies + HZ;
  316. clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  317. ql_dbg(ql_dbg_dpc, vha, 0x4018,
  318. "Relogin needed scheduled.\n");
  319. qla24xx_post_relogin_work(vha);
  320. }
  321. }
  322. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  323. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  324. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  325. }
  326. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  327. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  328. ql_dbg(ql_dbg_dpc, vha, 0x401a,
  329. "Loop resync scheduled.\n");
  330. qla2x00_loop_resync(vha);
  331. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  332. ql_dbg(ql_dbg_dpc, vha, 0x401b,
  333. "Loop resync end.\n");
  334. }
  335. }
  336. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
  337. "Exiting %s.\n", __func__);
  338. return 0;
  339. }
  340. void
  341. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  342. {
  343. struct qla_hw_data *ha = vha->hw;
  344. scsi_qla_host_t *vp;
  345. unsigned long flags = 0;
  346. if (vha->vp_idx)
  347. return;
  348. if (list_empty(&ha->vp_list))
  349. return;
  350. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  351. if (!(ha->current_topology & ISP_CFG_F))
  352. return;
  353. spin_lock_irqsave(&ha->vport_slock, flags);
  354. list_for_each_entry(vp, &ha->vp_list, list) {
  355. if (vp->vp_idx) {
  356. atomic_inc(&vp->vref_count);
  357. spin_unlock_irqrestore(&ha->vport_slock, flags);
  358. qla2x00_do_dpc_vp(vp);
  359. spin_lock_irqsave(&ha->vport_slock, flags);
  360. atomic_dec(&vp->vref_count);
  361. }
  362. }
  363. spin_unlock_irqrestore(&ha->vport_slock, flags);
  364. }
  365. int
  366. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  367. {
  368. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  369. struct qla_hw_data *ha = base_vha->hw;
  370. scsi_qla_host_t *vha;
  371. uint8_t port_name[WWN_SIZE];
  372. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  373. return VPCERR_UNSUPPORTED;
  374. /* Check up the F/W and H/W support NPIV */
  375. if (!ha->flags.npiv_supported)
  376. return VPCERR_UNSUPPORTED;
  377. /* Check up whether npiv supported switch presented */
  378. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  379. return VPCERR_NO_FABRIC_SUPP;
  380. /* Check up unique WWPN */
  381. u64_to_wwn(fc_vport->port_name, port_name);
  382. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  383. return VPCERR_BAD_WWN;
  384. vha = qla24xx_find_vhost_by_name(ha, port_name);
  385. if (vha)
  386. return VPCERR_BAD_WWN;
  387. /* Check up max-npiv-supports */
  388. if (ha->num_vhosts > ha->max_npiv_vports) {
  389. ql_dbg(ql_dbg_vport, vha, 0xa004,
  390. "num_vhosts %ud is bigger "
  391. "than max_npiv_vports %ud.\n",
  392. ha->num_vhosts, ha->max_npiv_vports);
  393. return VPCERR_UNSUPPORTED;
  394. }
  395. return 0;
  396. }
  397. scsi_qla_host_t *
  398. qla24xx_create_vhost(struct fc_vport *fc_vport)
  399. {
  400. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  401. struct qla_hw_data *ha = base_vha->hw;
  402. scsi_qla_host_t *vha;
  403. struct scsi_host_template *sht = &qla2xxx_driver_template;
  404. struct Scsi_Host *host;
  405. vha = qla2x00_create_host(sht, ha);
  406. if (!vha) {
  407. ql_log(ql_log_warn, vha, 0xa005,
  408. "scsi_host_alloc() failed for vport.\n");
  409. return(NULL);
  410. }
  411. host = vha->host;
  412. fc_vport->dd_data = vha;
  413. /* New host info */
  414. u64_to_wwn(fc_vport->node_name, vha->node_name);
  415. u64_to_wwn(fc_vport->port_name, vha->port_name);
  416. vha->fc_vport = fc_vport;
  417. vha->device_flags = 0;
  418. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  419. if (vha->vp_idx > ha->max_npiv_vports) {
  420. ql_dbg(ql_dbg_vport, vha, 0xa006,
  421. "Couldn't allocate vp_id.\n");
  422. goto create_vhost_failed;
  423. }
  424. vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
  425. vha->dpc_flags = 0L;
  426. /*
  427. * To fix the issue of processing a parent's RSCN for the vport before
  428. * its SCR is complete.
  429. */
  430. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  431. atomic_set(&vha->loop_state, LOOP_DOWN);
  432. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  433. qla2x00_start_timer(vha, WATCH_INTERVAL);
  434. vha->req = base_vha->req;
  435. host->can_queue = base_vha->req->length + 128;
  436. host->cmd_per_lun = 3;
  437. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  438. host->max_cmd_len = 32;
  439. else
  440. host->max_cmd_len = MAX_CMDSZ;
  441. host->max_channel = MAX_BUSES - 1;
  442. host->max_lun = ql2xmaxlun;
  443. host->unique_id = host->host_no;
  444. host->max_id = ha->max_fibre_devices;
  445. host->transportt = qla2xxx_transport_vport_template;
  446. ql_dbg(ql_dbg_vport, vha, 0xa007,
  447. "Detect vport hba %ld at address = %p.\n",
  448. vha->host_no, vha);
  449. vha->flags.init_done = 1;
  450. mutex_lock(&ha->vport_lock);
  451. set_bit(vha->vp_idx, ha->vp_idx_map);
  452. ha->cur_vport_count++;
  453. mutex_unlock(&ha->vport_lock);
  454. return vha;
  455. create_vhost_failed:
  456. return NULL;
  457. }
  458. static void
  459. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  460. {
  461. struct qla_hw_data *ha = vha->hw;
  462. uint16_t que_id = req->id;
  463. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  464. sizeof(request_t), req->ring, req->dma);
  465. req->ring = NULL;
  466. req->dma = 0;
  467. if (que_id) {
  468. ha->req_q_map[que_id] = NULL;
  469. mutex_lock(&ha->vport_lock);
  470. clear_bit(que_id, ha->req_qid_map);
  471. mutex_unlock(&ha->vport_lock);
  472. }
  473. kfree(req->outstanding_cmds);
  474. kfree(req);
  475. req = NULL;
  476. }
  477. static void
  478. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  479. {
  480. struct qla_hw_data *ha = vha->hw;
  481. uint16_t que_id = rsp->id;
  482. if (rsp->msix && rsp->msix->have_irq) {
  483. free_irq(rsp->msix->vector, rsp->msix->handle);
  484. rsp->msix->have_irq = 0;
  485. rsp->msix->in_use = 0;
  486. rsp->msix->handle = NULL;
  487. }
  488. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  489. sizeof(response_t), rsp->ring, rsp->dma);
  490. rsp->ring = NULL;
  491. rsp->dma = 0;
  492. if (que_id) {
  493. ha->rsp_q_map[que_id] = NULL;
  494. mutex_lock(&ha->vport_lock);
  495. clear_bit(que_id, ha->rsp_qid_map);
  496. mutex_unlock(&ha->vport_lock);
  497. }
  498. kfree(rsp);
  499. rsp = NULL;
  500. }
  501. int
  502. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  503. {
  504. int ret = QLA_SUCCESS;
  505. if (req && vha->flags.qpairs_req_created) {
  506. req->options |= BIT_0;
  507. ret = qla25xx_init_req_que(vha, req);
  508. if (ret != QLA_SUCCESS)
  509. return QLA_FUNCTION_FAILED;
  510. qla25xx_free_req_que(vha, req);
  511. }
  512. return ret;
  513. }
  514. int
  515. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  516. {
  517. int ret = QLA_SUCCESS;
  518. if (rsp && vha->flags.qpairs_rsp_created) {
  519. rsp->options |= BIT_0;
  520. ret = qla25xx_init_rsp_que(vha, rsp);
  521. if (ret != QLA_SUCCESS)
  522. return QLA_FUNCTION_FAILED;
  523. qla25xx_free_rsp_que(vha, rsp);
  524. }
  525. return ret;
  526. }
  527. /* Delete all queues for a given vhost */
  528. int
  529. qla25xx_delete_queues(struct scsi_qla_host *vha)
  530. {
  531. int cnt, ret = 0;
  532. struct req_que *req = NULL;
  533. struct rsp_que *rsp = NULL;
  534. struct qla_hw_data *ha = vha->hw;
  535. struct qla_qpair *qpair, *tqpair;
  536. if (ql2xmqsupport || ql2xnvmeenable) {
  537. list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
  538. qp_list_elem)
  539. qla2xxx_delete_qpair(vha, qpair);
  540. } else {
  541. /* Delete request queues */
  542. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  543. req = ha->req_q_map[cnt];
  544. if (req && test_bit(cnt, ha->req_qid_map)) {
  545. ret = qla25xx_delete_req_que(vha, req);
  546. if (ret != QLA_SUCCESS) {
  547. ql_log(ql_log_warn, vha, 0x00ea,
  548. "Couldn't delete req que %d.\n",
  549. req->id);
  550. return ret;
  551. }
  552. }
  553. }
  554. /* Delete response queues */
  555. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  556. rsp = ha->rsp_q_map[cnt];
  557. if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
  558. ret = qla25xx_delete_rsp_que(vha, rsp);
  559. if (ret != QLA_SUCCESS) {
  560. ql_log(ql_log_warn, vha, 0x00eb,
  561. "Couldn't delete rsp que %d.\n",
  562. rsp->id);
  563. return ret;
  564. }
  565. }
  566. }
  567. }
  568. return ret;
  569. }
  570. int
  571. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  572. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
  573. {
  574. int ret = 0;
  575. struct req_que *req = NULL;
  576. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  577. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  578. uint16_t que_id = 0;
  579. device_reg_t *reg;
  580. uint32_t cnt;
  581. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  582. if (req == NULL) {
  583. ql_log(ql_log_fatal, base_vha, 0x00d9,
  584. "Failed to allocate memory for request queue.\n");
  585. goto failed;
  586. }
  587. req->length = REQUEST_ENTRY_CNT_24XX;
  588. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  589. (req->length + 1) * sizeof(request_t),
  590. &req->dma, GFP_KERNEL);
  591. if (req->ring == NULL) {
  592. ql_log(ql_log_fatal, base_vha, 0x00da,
  593. "Failed to allocate memory for request_ring.\n");
  594. goto que_failed;
  595. }
  596. ret = qla2x00_alloc_outstanding_cmds(ha, req);
  597. if (ret != QLA_SUCCESS)
  598. goto que_failed;
  599. mutex_lock(&ha->mq_lock);
  600. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  601. if (que_id >= ha->max_req_queues) {
  602. mutex_unlock(&ha->mq_lock);
  603. ql_log(ql_log_warn, base_vha, 0x00db,
  604. "No resources to create additional request queue.\n");
  605. goto que_failed;
  606. }
  607. set_bit(que_id, ha->req_qid_map);
  608. ha->req_q_map[que_id] = req;
  609. req->rid = rid;
  610. req->vp_idx = vp_idx;
  611. req->qos = qos;
  612. ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
  613. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  614. que_id, req->rid, req->vp_idx, req->qos);
  615. ql_dbg(ql_dbg_init, base_vha, 0x00dc,
  616. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  617. que_id, req->rid, req->vp_idx, req->qos);
  618. if (rsp_que < 0)
  619. req->rsp = NULL;
  620. else
  621. req->rsp = ha->rsp_q_map[rsp_que];
  622. /* Use alternate PCI bus number */
  623. if (MSB(req->rid))
  624. options |= BIT_4;
  625. /* Use alternate PCI devfn */
  626. if (LSB(req->rid))
  627. options |= BIT_5;
  628. req->options = options;
  629. ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
  630. "options=0x%x.\n", req->options);
  631. ql_dbg(ql_dbg_init, base_vha, 0x00dd,
  632. "options=0x%x.\n", req->options);
  633. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
  634. req->outstanding_cmds[cnt] = NULL;
  635. req->current_outstanding_cmd = 1;
  636. req->ring_ptr = req->ring;
  637. req->ring_index = 0;
  638. req->cnt = req->length;
  639. req->id = que_id;
  640. reg = ISP_QUE_REG(ha, que_id);
  641. req->req_q_in = &reg->isp25mq.req_q_in;
  642. req->req_q_out = &reg->isp25mq.req_q_out;
  643. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  644. req->out_ptr = (void *)(req->ring + req->length);
  645. mutex_unlock(&ha->mq_lock);
  646. ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
  647. "ring_ptr=%p ring_index=%d, "
  648. "cnt=%d id=%d max_q_depth=%d.\n",
  649. req->ring_ptr, req->ring_index,
  650. req->cnt, req->id, req->max_q_depth);
  651. ql_dbg(ql_dbg_init, base_vha, 0x00de,
  652. "ring_ptr=%p ring_index=%d, "
  653. "cnt=%d id=%d max_q_depth=%d.\n",
  654. req->ring_ptr, req->ring_index, req->cnt,
  655. req->id, req->max_q_depth);
  656. if (startqp) {
  657. ret = qla25xx_init_req_que(base_vha, req);
  658. if (ret != QLA_SUCCESS) {
  659. ql_log(ql_log_fatal, base_vha, 0x00df,
  660. "%s failed.\n", __func__);
  661. mutex_lock(&ha->mq_lock);
  662. clear_bit(que_id, ha->req_qid_map);
  663. mutex_unlock(&ha->mq_lock);
  664. goto que_failed;
  665. }
  666. vha->flags.qpairs_req_created = 1;
  667. }
  668. return req->id;
  669. que_failed:
  670. qla25xx_free_req_que(base_vha, req);
  671. failed:
  672. return 0;
  673. }
  674. static void qla_do_work(struct work_struct *work)
  675. {
  676. unsigned long flags;
  677. struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
  678. struct scsi_qla_host *vha;
  679. struct qla_hw_data *ha = qpair->hw;
  680. spin_lock_irqsave(&qpair->qp_lock, flags);
  681. vha = pci_get_drvdata(ha->pdev);
  682. qla24xx_process_response_queue(vha, qpair->rsp);
  683. spin_unlock_irqrestore(&qpair->qp_lock, flags);
  684. }
  685. /* create response queue */
  686. int
  687. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  688. uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
  689. {
  690. int ret = 0;
  691. struct rsp_que *rsp = NULL;
  692. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  693. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  694. uint16_t que_id = 0;
  695. device_reg_t *reg;
  696. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  697. if (rsp == NULL) {
  698. ql_log(ql_log_warn, base_vha, 0x0066,
  699. "Failed to allocate memory for response queue.\n");
  700. goto failed;
  701. }
  702. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  703. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  704. (rsp->length + 1) * sizeof(response_t),
  705. &rsp->dma, GFP_KERNEL);
  706. if (rsp->ring == NULL) {
  707. ql_log(ql_log_warn, base_vha, 0x00e1,
  708. "Failed to allocate memory for response ring.\n");
  709. goto que_failed;
  710. }
  711. mutex_lock(&ha->mq_lock);
  712. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  713. if (que_id >= ha->max_rsp_queues) {
  714. mutex_unlock(&ha->mq_lock);
  715. ql_log(ql_log_warn, base_vha, 0x00e2,
  716. "No resources to create additional request queue.\n");
  717. goto que_failed;
  718. }
  719. set_bit(que_id, ha->rsp_qid_map);
  720. rsp->msix = qpair->msix;
  721. ha->rsp_q_map[que_id] = rsp;
  722. rsp->rid = rid;
  723. rsp->vp_idx = vp_idx;
  724. rsp->hw = ha;
  725. ql_dbg(ql_dbg_init, base_vha, 0x00e4,
  726. "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
  727. que_id, rsp->rid, rsp->vp_idx, rsp->hw);
  728. /* Use alternate PCI bus number */
  729. if (MSB(rsp->rid))
  730. options |= BIT_4;
  731. /* Use alternate PCI devfn */
  732. if (LSB(rsp->rid))
  733. options |= BIT_5;
  734. /* Enable MSIX handshake mode on for uncapable adapters */
  735. if (!IS_MSIX_NACK_CAPABLE(ha))
  736. options |= BIT_6;
  737. /* Set option to indicate response queue creation */
  738. options |= BIT_1;
  739. rsp->options = options;
  740. rsp->id = que_id;
  741. reg = ISP_QUE_REG(ha, que_id);
  742. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  743. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  744. rsp->in_ptr = (void *)(rsp->ring + rsp->length);
  745. mutex_unlock(&ha->mq_lock);
  746. ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
  747. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
  748. rsp->options, rsp->id, rsp->rsp_q_in,
  749. rsp->rsp_q_out);
  750. ql_dbg(ql_dbg_init, base_vha, 0x00e5,
  751. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
  752. rsp->options, rsp->id, rsp->rsp_q_in,
  753. rsp->rsp_q_out);
  754. ret = qla25xx_request_irq(ha, qpair, qpair->msix,
  755. QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
  756. if (ret)
  757. goto que_failed;
  758. if (startqp) {
  759. ret = qla25xx_init_rsp_que(base_vha, rsp);
  760. if (ret != QLA_SUCCESS) {
  761. ql_log(ql_log_fatal, base_vha, 0x00e7,
  762. "%s failed.\n", __func__);
  763. mutex_lock(&ha->mq_lock);
  764. clear_bit(que_id, ha->rsp_qid_map);
  765. mutex_unlock(&ha->mq_lock);
  766. goto que_failed;
  767. }
  768. vha->flags.qpairs_rsp_created = 1;
  769. }
  770. rsp->req = NULL;
  771. qla2x00_init_response_q_entries(rsp);
  772. if (qpair->hw->wq)
  773. INIT_WORK(&qpair->q_work, qla_do_work);
  774. return rsp->id;
  775. que_failed:
  776. qla25xx_free_rsp_que(base_vha, rsp);
  777. failed:
  778. return 0;
  779. }
  780. static void qla_ctrlvp_sp_done(void *s, int res)
  781. {
  782. struct srb *sp = s;
  783. complete(&sp->comp);
  784. /* don't free sp here. Let the caller do the free */
  785. }
  786. /**
  787. * qla24xx_control_vp() - Enable a virtual port for given host
  788. * @vha: adapter block pointer
  789. * @cmd: command type to be sent for enable virtual port
  790. *
  791. * Return: qla2xxx local function return status code.
  792. */
  793. int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
  794. {
  795. int rval = QLA_MEMORY_ALLOC_FAILED;
  796. struct qla_hw_data *ha = vha->hw;
  797. int vp_index = vha->vp_idx;
  798. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  799. srb_t *sp;
  800. ql_dbg(ql_dbg_vport, vha, 0x10c1,
  801. "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
  802. if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
  803. return QLA_PARAMETER_ERROR;
  804. sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
  805. if (!sp)
  806. return rval;
  807. sp->type = SRB_CTRL_VP;
  808. sp->name = "ctrl_vp";
  809. sp->done = qla_ctrlvp_sp_done;
  810. sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
  811. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  812. sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
  813. sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
  814. rval = qla2x00_start_sp(sp);
  815. if (rval != QLA_SUCCESS) {
  816. ql_dbg(ql_dbg_async, vha, 0xffff,
  817. "%s: %s Failed submission. %x.\n",
  818. __func__, sp->name, rval);
  819. goto done;
  820. }
  821. ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
  822. sp->name, sp->handle);
  823. wait_for_completion(&sp->comp);
  824. rval = sp->rc;
  825. switch (rval) {
  826. case QLA_FUNCTION_TIMEOUT:
  827. ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
  828. __func__, sp->name, rval);
  829. break;
  830. case QLA_SUCCESS:
  831. ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
  832. __func__, sp->name);
  833. break;
  834. default:
  835. ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
  836. __func__, sp->name, rval);
  837. break;
  838. }
  839. done:
  840. sp->free(sp);
  841. return rval;
  842. }