qedi_fw.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248
  1. /*
  2. * QLogic iSCSI Offload Driver
  3. * Copyright (c) 2016 Cavium Inc.
  4. *
  5. * This software is available under the terms of the GNU General Public License
  6. * (GPL) Version 2, available from the file COPYING in the main directory of
  7. * this source tree.
  8. */
  9. #include <linux/blkdev.h>
  10. #include <scsi/scsi_tcq.h>
  11. #include <linux/delay.h>
  12. #include "qedi.h"
  13. #include "qedi_iscsi.h"
  14. #include "qedi_gbl.h"
  15. #include "qedi_fw_iscsi.h"
  16. #include "qedi_fw_scsi.h"
  17. static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
  18. struct iscsi_task *mtask);
  19. void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
  20. {
  21. struct scsi_cmnd *sc = cmd->scsi_cmd;
  22. if (cmd->io_tbl.sge_valid && sc) {
  23. cmd->io_tbl.sge_valid = 0;
  24. scsi_dma_unmap(sc);
  25. }
  26. }
  27. static void qedi_process_logout_resp(struct qedi_ctx *qedi,
  28. union iscsi_cqe *cqe,
  29. struct iscsi_task *task,
  30. struct qedi_conn *qedi_conn)
  31. {
  32. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  33. struct iscsi_logout_rsp *resp_hdr;
  34. struct iscsi_session *session = conn->session;
  35. struct iscsi_logout_response_hdr *cqe_logout_response;
  36. struct qedi_cmd *cmd;
  37. cmd = (struct qedi_cmd *)task->dd_data;
  38. cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
  39. spin_lock(&session->back_lock);
  40. resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
  41. memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
  42. resp_hdr->opcode = cqe_logout_response->opcode;
  43. resp_hdr->flags = cqe_logout_response->flags;
  44. resp_hdr->hlength = 0;
  45. resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
  46. resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
  47. resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
  48. resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
  49. resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
  50. resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
  51. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  52. "Freeing tid=0x%x for cid=0x%x\n",
  53. cmd->task_id, qedi_conn->iscsi_conn_id);
  54. spin_lock(&qedi_conn->list_lock);
  55. if (likely(cmd->io_cmd_in_list)) {
  56. cmd->io_cmd_in_list = false;
  57. list_del_init(&cmd->io_cmd);
  58. qedi_conn->active_cmd_count--;
  59. } else {
  60. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  61. "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
  62. cmd->task_id, qedi_conn->iscsi_conn_id,
  63. &cmd->io_cmd);
  64. }
  65. spin_unlock(&qedi_conn->list_lock);
  66. cmd->state = RESPONSE_RECEIVED;
  67. qedi_clear_task_idx(qedi, cmd->task_id);
  68. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
  69. spin_unlock(&session->back_lock);
  70. }
  71. static void qedi_process_text_resp(struct qedi_ctx *qedi,
  72. union iscsi_cqe *cqe,
  73. struct iscsi_task *task,
  74. struct qedi_conn *qedi_conn)
  75. {
  76. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  77. struct iscsi_session *session = conn->session;
  78. struct e4_iscsi_task_context *task_ctx;
  79. struct iscsi_text_rsp *resp_hdr_ptr;
  80. struct iscsi_text_response_hdr *cqe_text_response;
  81. struct qedi_cmd *cmd;
  82. int pld_len;
  83. cmd = (struct qedi_cmd *)task->dd_data;
  84. task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
  85. cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
  86. spin_lock(&session->back_lock);
  87. resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
  88. memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
  89. resp_hdr_ptr->opcode = cqe_text_response->opcode;
  90. resp_hdr_ptr->flags = cqe_text_response->flags;
  91. resp_hdr_ptr->hlength = 0;
  92. hton24(resp_hdr_ptr->dlength,
  93. (cqe_text_response->hdr_second_dword &
  94. ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
  95. resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
  96. conn->session->age);
  97. resp_hdr_ptr->ttt = cqe_text_response->ttt;
  98. resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
  99. resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
  100. resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
  101. pld_len = cqe_text_response->hdr_second_dword &
  102. ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
  103. qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
  104. memset(task_ctx, '\0', sizeof(*task_ctx));
  105. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  106. "Freeing tid=0x%x for cid=0x%x\n",
  107. cmd->task_id, qedi_conn->iscsi_conn_id);
  108. spin_lock(&qedi_conn->list_lock);
  109. if (likely(cmd->io_cmd_in_list)) {
  110. cmd->io_cmd_in_list = false;
  111. list_del_init(&cmd->io_cmd);
  112. qedi_conn->active_cmd_count--;
  113. } else {
  114. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  115. "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
  116. cmd->task_id, qedi_conn->iscsi_conn_id,
  117. &cmd->io_cmd);
  118. }
  119. spin_unlock(&qedi_conn->list_lock);
  120. cmd->state = RESPONSE_RECEIVED;
  121. qedi_clear_task_idx(qedi, cmd->task_id);
  122. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
  123. qedi_conn->gen_pdu.resp_buf,
  124. (qedi_conn->gen_pdu.resp_wr_ptr -
  125. qedi_conn->gen_pdu.resp_buf));
  126. spin_unlock(&session->back_lock);
  127. }
  128. static void qedi_tmf_resp_work(struct work_struct *work)
  129. {
  130. struct qedi_cmd *qedi_cmd =
  131. container_of(work, struct qedi_cmd, tmf_work);
  132. struct qedi_conn *qedi_conn = qedi_cmd->conn;
  133. struct qedi_ctx *qedi = qedi_conn->qedi;
  134. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  135. struct iscsi_session *session = conn->session;
  136. struct iscsi_tm_rsp *resp_hdr_ptr;
  137. struct iscsi_cls_session *cls_sess;
  138. int rval = 0;
  139. set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
  140. resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
  141. cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
  142. iscsi_block_session(session->cls_session);
  143. rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
  144. if (rval) {
  145. qedi_clear_task_idx(qedi, qedi_cmd->task_id);
  146. iscsi_unblock_session(session->cls_session);
  147. goto exit_tmf_resp;
  148. }
  149. iscsi_unblock_session(session->cls_session);
  150. qedi_clear_task_idx(qedi, qedi_cmd->task_id);
  151. spin_lock(&session->back_lock);
  152. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
  153. spin_unlock(&session->back_lock);
  154. exit_tmf_resp:
  155. kfree(resp_hdr_ptr);
  156. clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
  157. }
  158. static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
  159. union iscsi_cqe *cqe,
  160. struct iscsi_task *task,
  161. struct qedi_conn *qedi_conn)
  162. {
  163. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  164. struct iscsi_session *session = conn->session;
  165. struct iscsi_tmf_response_hdr *cqe_tmp_response;
  166. struct iscsi_tm_rsp *resp_hdr_ptr;
  167. struct iscsi_tm *tmf_hdr;
  168. struct qedi_cmd *qedi_cmd = NULL;
  169. cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
  170. qedi_cmd = task->dd_data;
  171. qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
  172. if (!qedi_cmd->tmf_resp_buf) {
  173. QEDI_ERR(&qedi->dbg_ctx,
  174. "Failed to allocate resp buf, cid=0x%x\n",
  175. qedi_conn->iscsi_conn_id);
  176. return;
  177. }
  178. spin_lock(&session->back_lock);
  179. resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
  180. memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
  181. /* Fill up the header */
  182. resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
  183. resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
  184. resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
  185. resp_hdr_ptr->hlength = 0;
  186. hton24(resp_hdr_ptr->dlength,
  187. (cqe_tmp_response->hdr_second_dword &
  188. ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
  189. resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
  190. conn->session->age);
  191. resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
  192. resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
  193. resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
  194. tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
  195. spin_lock(&qedi_conn->list_lock);
  196. if (likely(qedi_cmd->io_cmd_in_list)) {
  197. qedi_cmd->io_cmd_in_list = false;
  198. list_del_init(&qedi_cmd->io_cmd);
  199. qedi_conn->active_cmd_count--;
  200. }
  201. spin_unlock(&qedi_conn->list_lock);
  202. if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  203. ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
  204. ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  205. ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
  206. ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  207. ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
  208. INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
  209. queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
  210. goto unblock_sess;
  211. }
  212. qedi_clear_task_idx(qedi, qedi_cmd->task_id);
  213. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
  214. kfree(resp_hdr_ptr);
  215. unblock_sess:
  216. spin_unlock(&session->back_lock);
  217. }
  218. static void qedi_process_login_resp(struct qedi_ctx *qedi,
  219. union iscsi_cqe *cqe,
  220. struct iscsi_task *task,
  221. struct qedi_conn *qedi_conn)
  222. {
  223. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  224. struct iscsi_session *session = conn->session;
  225. struct e4_iscsi_task_context *task_ctx;
  226. struct iscsi_login_rsp *resp_hdr_ptr;
  227. struct iscsi_login_response_hdr *cqe_login_response;
  228. struct qedi_cmd *cmd;
  229. int pld_len;
  230. cmd = (struct qedi_cmd *)task->dd_data;
  231. cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
  232. task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
  233. spin_lock(&session->back_lock);
  234. resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
  235. memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
  236. resp_hdr_ptr->opcode = cqe_login_response->opcode;
  237. resp_hdr_ptr->flags = cqe_login_response->flags_attr;
  238. resp_hdr_ptr->hlength = 0;
  239. hton24(resp_hdr_ptr->dlength,
  240. (cqe_login_response->hdr_second_dword &
  241. ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
  242. resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
  243. conn->session->age);
  244. resp_hdr_ptr->tsih = cqe_login_response->tsih;
  245. resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
  246. resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
  247. resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
  248. resp_hdr_ptr->status_class = cqe_login_response->status_class;
  249. resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
  250. pld_len = cqe_login_response->hdr_second_dword &
  251. ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
  252. qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
  253. spin_lock(&qedi_conn->list_lock);
  254. if (likely(cmd->io_cmd_in_list)) {
  255. cmd->io_cmd_in_list = false;
  256. list_del_init(&cmd->io_cmd);
  257. qedi_conn->active_cmd_count--;
  258. }
  259. spin_unlock(&qedi_conn->list_lock);
  260. memset(task_ctx, '\0', sizeof(*task_ctx));
  261. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
  262. qedi_conn->gen_pdu.resp_buf,
  263. (qedi_conn->gen_pdu.resp_wr_ptr -
  264. qedi_conn->gen_pdu.resp_buf));
  265. spin_unlock(&session->back_lock);
  266. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  267. "Freeing tid=0x%x for cid=0x%x\n",
  268. cmd->task_id, qedi_conn->iscsi_conn_id);
  269. cmd->state = RESPONSE_RECEIVED;
  270. qedi_clear_task_idx(qedi, cmd->task_id);
  271. }
  272. static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
  273. struct iscsi_cqe_unsolicited *cqe,
  274. char *ptr, int len)
  275. {
  276. u16 idx = 0;
  277. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  278. "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
  279. len, qedi->bdq_prod_idx,
  280. (qedi->bdq_prod_idx % qedi->rq_num_entries));
  281. /* Obtain buffer address from rqe_opaque */
  282. idx = cqe->rqe_opaque;
  283. if (idx > (QEDI_BDQ_NUM - 1)) {
  284. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  285. "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
  286. idx);
  287. return;
  288. }
  289. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  290. "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
  291. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  292. "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
  293. switch (cqe->unsol_cqe_type) {
  294. case ISCSI_CQE_UNSOLICITED_SINGLE:
  295. case ISCSI_CQE_UNSOLICITED_FIRST:
  296. if (len)
  297. memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
  298. break;
  299. case ISCSI_CQE_UNSOLICITED_MIDDLE:
  300. case ISCSI_CQE_UNSOLICITED_LAST:
  301. break;
  302. default:
  303. break;
  304. }
  305. }
  306. static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
  307. struct iscsi_cqe_unsolicited *cqe,
  308. int count)
  309. {
  310. u16 tmp;
  311. u16 idx = 0;
  312. struct scsi_bd *pbl;
  313. /* Obtain buffer address from rqe_opaque */
  314. idx = cqe->rqe_opaque;
  315. if (idx > (QEDI_BDQ_NUM - 1)) {
  316. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  317. "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
  318. idx);
  319. return;
  320. }
  321. pbl = (struct scsi_bd *)qedi->bdq_pbl;
  322. pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
  323. pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
  324. pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
  325. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  326. "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
  327. pbl, pbl->address.hi, pbl->address.lo, idx);
  328. pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
  329. pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
  330. pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
  331. pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
  332. /* Increment producer to let f/w know we've handled the frame */
  333. qedi->bdq_prod_idx += count;
  334. writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
  335. tmp = readw(qedi->bdq_primary_prod);
  336. writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
  337. tmp = readw(qedi->bdq_secondary_prod);
  338. }
  339. static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
  340. struct iscsi_cqe_unsolicited *cqe,
  341. u32 pdu_len, u32 num_bdqs,
  342. char *bdq_data)
  343. {
  344. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  345. "num_bdqs [%d]\n", num_bdqs);
  346. qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
  347. qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
  348. }
  349. static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
  350. union iscsi_cqe *cqe,
  351. struct iscsi_task *task,
  352. struct qedi_conn *qedi_conn, u16 que_idx)
  353. {
  354. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  355. struct iscsi_session *session = conn->session;
  356. struct iscsi_nop_in_hdr *cqe_nop_in;
  357. struct iscsi_nopin *hdr;
  358. struct qedi_cmd *cmd;
  359. int tgt_async_nop = 0;
  360. u32 lun[2];
  361. u32 pdu_len, num_bdqs;
  362. char bdq_data[QEDI_BDQ_BUF_SIZE];
  363. unsigned long flags;
  364. spin_lock_bh(&session->back_lock);
  365. cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
  366. pdu_len = cqe_nop_in->hdr_second_dword &
  367. ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
  368. num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
  369. hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
  370. memset(hdr, 0, sizeof(struct iscsi_hdr));
  371. hdr->opcode = cqe_nop_in->opcode;
  372. hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
  373. hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
  374. hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
  375. hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
  376. if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
  377. spin_lock_irqsave(&qedi->hba_lock, flags);
  378. qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
  379. pdu_len, num_bdqs, bdq_data);
  380. hdr->itt = RESERVED_ITT;
  381. tgt_async_nop = 1;
  382. spin_unlock_irqrestore(&qedi->hba_lock, flags);
  383. goto done;
  384. }
  385. /* Response to one of our nop-outs */
  386. if (task) {
  387. cmd = task->dd_data;
  388. hdr->flags = ISCSI_FLAG_CMD_FINAL;
  389. hdr->itt = build_itt(cqe->cqe_solicited.itid,
  390. conn->session->age);
  391. lun[0] = 0xffffffff;
  392. lun[1] = 0xffffffff;
  393. memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
  394. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  395. "Freeing tid=0x%x for cid=0x%x\n",
  396. cmd->task_id, qedi_conn->iscsi_conn_id);
  397. cmd->state = RESPONSE_RECEIVED;
  398. spin_lock(&qedi_conn->list_lock);
  399. if (likely(cmd->io_cmd_in_list)) {
  400. cmd->io_cmd_in_list = false;
  401. list_del_init(&cmd->io_cmd);
  402. qedi_conn->active_cmd_count--;
  403. }
  404. spin_unlock(&qedi_conn->list_lock);
  405. qedi_clear_task_idx(qedi, cmd->task_id);
  406. }
  407. done:
  408. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
  409. spin_unlock_bh(&session->back_lock);
  410. return tgt_async_nop;
  411. }
  412. static void qedi_process_async_mesg(struct qedi_ctx *qedi,
  413. union iscsi_cqe *cqe,
  414. struct iscsi_task *task,
  415. struct qedi_conn *qedi_conn,
  416. u16 que_idx)
  417. {
  418. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  419. struct iscsi_session *session = conn->session;
  420. struct iscsi_async_msg_hdr *cqe_async_msg;
  421. struct iscsi_async *resp_hdr;
  422. u32 lun[2];
  423. u32 pdu_len, num_bdqs;
  424. char bdq_data[QEDI_BDQ_BUF_SIZE];
  425. unsigned long flags;
  426. spin_lock_bh(&session->back_lock);
  427. cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
  428. pdu_len = cqe_async_msg->hdr_second_dword &
  429. ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
  430. num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
  431. if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
  432. spin_lock_irqsave(&qedi->hba_lock, flags);
  433. qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
  434. pdu_len, num_bdqs, bdq_data);
  435. spin_unlock_irqrestore(&qedi->hba_lock, flags);
  436. }
  437. resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
  438. memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
  439. resp_hdr->opcode = cqe_async_msg->opcode;
  440. resp_hdr->flags = 0x80;
  441. lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
  442. lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
  443. memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
  444. resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
  445. resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
  446. resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
  447. resp_hdr->async_event = cqe_async_msg->async_event;
  448. resp_hdr->async_vcode = cqe_async_msg->async_vcode;
  449. resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
  450. resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
  451. resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
  452. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
  453. pdu_len);
  454. spin_unlock_bh(&session->back_lock);
  455. }
  456. static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
  457. union iscsi_cqe *cqe,
  458. struct iscsi_task *task,
  459. struct qedi_conn *qedi_conn,
  460. uint16_t que_idx)
  461. {
  462. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  463. struct iscsi_session *session = conn->session;
  464. struct iscsi_reject_hdr *cqe_reject;
  465. struct iscsi_reject *hdr;
  466. u32 pld_len, num_bdqs;
  467. unsigned long flags;
  468. spin_lock_bh(&session->back_lock);
  469. cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
  470. pld_len = cqe_reject->hdr_second_dword &
  471. ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
  472. num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
  473. if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
  474. spin_lock_irqsave(&qedi->hba_lock, flags);
  475. qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
  476. pld_len, num_bdqs, conn->data);
  477. spin_unlock_irqrestore(&qedi->hba_lock, flags);
  478. }
  479. hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
  480. memset(hdr, 0, sizeof(struct iscsi_hdr));
  481. hdr->opcode = cqe_reject->opcode;
  482. hdr->reason = cqe_reject->hdr_reason;
  483. hdr->flags = cqe_reject->hdr_flags;
  484. hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
  485. ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
  486. hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
  487. hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
  488. hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
  489. hdr->ffffffff = cpu_to_be32(0xffffffff);
  490. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
  491. conn->data, pld_len);
  492. spin_unlock_bh(&session->back_lock);
  493. }
  494. static void qedi_scsi_completion(struct qedi_ctx *qedi,
  495. union iscsi_cqe *cqe,
  496. struct iscsi_task *task,
  497. struct iscsi_conn *conn)
  498. {
  499. struct scsi_cmnd *sc_cmd;
  500. struct qedi_cmd *cmd = task->dd_data;
  501. struct iscsi_session *session = conn->session;
  502. struct iscsi_scsi_rsp *hdr;
  503. struct iscsi_data_in_hdr *cqe_data_in;
  504. int datalen = 0;
  505. struct qedi_conn *qedi_conn;
  506. u32 iscsi_cid;
  507. u8 cqe_err_bits = 0;
  508. iscsi_cid = cqe->cqe_common.conn_id;
  509. qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  510. cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
  511. cqe_err_bits =
  512. cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
  513. spin_lock_bh(&session->back_lock);
  514. /* get the scsi command */
  515. sc_cmd = cmd->scsi_cmd;
  516. if (!sc_cmd) {
  517. QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
  518. goto error;
  519. }
  520. if (!sc_cmd->SCp.ptr) {
  521. QEDI_WARN(&qedi->dbg_ctx,
  522. "SCp.ptr is NULL, returned in another context.\n");
  523. goto error;
  524. }
  525. if (!sc_cmd->request) {
  526. QEDI_WARN(&qedi->dbg_ctx,
  527. "sc_cmd->request is NULL, sc_cmd=%p.\n",
  528. sc_cmd);
  529. goto error;
  530. }
  531. if (!sc_cmd->request->special) {
  532. QEDI_WARN(&qedi->dbg_ctx,
  533. "request->special is NULL so request not valid, sc_cmd=%p.\n",
  534. sc_cmd);
  535. goto error;
  536. }
  537. if (!sc_cmd->request->q) {
  538. QEDI_WARN(&qedi->dbg_ctx,
  539. "request->q is NULL so request is not valid, sc_cmd=%p.\n",
  540. sc_cmd);
  541. goto error;
  542. }
  543. qedi_iscsi_unmap_sg_list(cmd);
  544. hdr = (struct iscsi_scsi_rsp *)task->hdr;
  545. hdr->opcode = cqe_data_in->opcode;
  546. hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
  547. hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
  548. hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
  549. hdr->response = cqe_data_in->reserved1;
  550. hdr->cmd_status = cqe_data_in->status_rsvd;
  551. hdr->flags = cqe_data_in->flags;
  552. hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
  553. if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
  554. datalen = cqe_data_in->reserved2 &
  555. ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
  556. memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
  557. }
  558. /* If f/w reports data underrun err then set residual to IO transfer
  559. * length, set Underrun flag and clear Overrun flag explicitly
  560. */
  561. if (unlikely(cqe_err_bits &&
  562. GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
  563. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  564. "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
  565. hdr->itt, cqe_data_in->flags, cmd->task_id,
  566. qedi_conn->iscsi_conn_id, hdr->residual_count,
  567. scsi_bufflen(sc_cmd));
  568. hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
  569. hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
  570. hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
  571. }
  572. spin_lock(&qedi_conn->list_lock);
  573. if (likely(cmd->io_cmd_in_list)) {
  574. cmd->io_cmd_in_list = false;
  575. list_del_init(&cmd->io_cmd);
  576. qedi_conn->active_cmd_count--;
  577. }
  578. spin_unlock(&qedi_conn->list_lock);
  579. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  580. "Freeing tid=0x%x for cid=0x%x\n",
  581. cmd->task_id, qedi_conn->iscsi_conn_id);
  582. cmd->state = RESPONSE_RECEIVED;
  583. if (qedi_io_tracing)
  584. qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
  585. qedi_clear_task_idx(qedi, cmd->task_id);
  586. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
  587. conn->data, datalen);
  588. error:
  589. spin_unlock_bh(&session->back_lock);
  590. }
  591. static void qedi_mtask_completion(struct qedi_ctx *qedi,
  592. union iscsi_cqe *cqe,
  593. struct iscsi_task *task,
  594. struct qedi_conn *conn, uint16_t que_idx)
  595. {
  596. struct iscsi_conn *iscsi_conn;
  597. u32 hdr_opcode;
  598. hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
  599. iscsi_conn = conn->cls_conn->dd_data;
  600. switch (hdr_opcode) {
  601. case ISCSI_OPCODE_SCSI_RESPONSE:
  602. case ISCSI_OPCODE_DATA_IN:
  603. qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
  604. break;
  605. case ISCSI_OPCODE_LOGIN_RESPONSE:
  606. qedi_process_login_resp(qedi, cqe, task, conn);
  607. break;
  608. case ISCSI_OPCODE_TMF_RESPONSE:
  609. qedi_process_tmf_resp(qedi, cqe, task, conn);
  610. break;
  611. case ISCSI_OPCODE_TEXT_RESPONSE:
  612. qedi_process_text_resp(qedi, cqe, task, conn);
  613. break;
  614. case ISCSI_OPCODE_LOGOUT_RESPONSE:
  615. qedi_process_logout_resp(qedi, cqe, task, conn);
  616. break;
  617. case ISCSI_OPCODE_NOP_IN:
  618. qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
  619. break;
  620. default:
  621. QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
  622. }
  623. }
  624. static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
  625. struct iscsi_cqe_solicited *cqe,
  626. struct iscsi_task *task,
  627. struct qedi_conn *qedi_conn)
  628. {
  629. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  630. struct iscsi_session *session = conn->session;
  631. struct qedi_cmd *cmd = task->dd_data;
  632. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
  633. "itid=0x%x, cmd task id=0x%x\n",
  634. cqe->itid, cmd->task_id);
  635. cmd->state = RESPONSE_RECEIVED;
  636. qedi_clear_task_idx(qedi, cmd->task_id);
  637. spin_lock_bh(&session->back_lock);
  638. __iscsi_put_task(task);
  639. spin_unlock_bh(&session->back_lock);
  640. }
  641. static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
  642. struct iscsi_cqe_solicited *cqe,
  643. struct iscsi_task *task,
  644. struct iscsi_conn *conn)
  645. {
  646. struct qedi_work_map *work, *work_tmp;
  647. u32 proto_itt = cqe->itid;
  648. u32 ptmp_itt = 0;
  649. itt_t protoitt = 0;
  650. int found = 0;
  651. struct qedi_cmd *qedi_cmd = NULL;
  652. u32 rtid = 0;
  653. u32 iscsi_cid;
  654. struct qedi_conn *qedi_conn;
  655. struct qedi_cmd *dbg_cmd;
  656. struct iscsi_task *mtask;
  657. struct iscsi_tm *tmf_hdr = NULL;
  658. iscsi_cid = cqe->conn_id;
  659. qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  660. if (!qedi_conn) {
  661. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  662. "icid not found 0x%x\n", cqe->conn_id);
  663. return;
  664. }
  665. /* Based on this itt get the corresponding qedi_cmd */
  666. spin_lock_bh(&qedi_conn->tmf_work_lock);
  667. list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
  668. list) {
  669. if (work->rtid == proto_itt) {
  670. /* We found the command */
  671. qedi_cmd = work->qedi_cmd;
  672. if (!qedi_cmd->list_tmf_work) {
  673. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  674. "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
  675. proto_itt, qedi_conn->iscsi_conn_id);
  676. WARN_ON(1);
  677. }
  678. found = 1;
  679. mtask = qedi_cmd->task;
  680. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  681. rtid = work->rtid;
  682. list_del_init(&work->list);
  683. kfree(work);
  684. qedi_cmd->list_tmf_work = NULL;
  685. }
  686. }
  687. spin_unlock_bh(&qedi_conn->tmf_work_lock);
  688. if (found) {
  689. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  690. "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
  691. proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
  692. if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  693. ISCSI_TM_FUNC_ABORT_TASK) {
  694. spin_lock_bh(&conn->session->back_lock);
  695. protoitt = build_itt(get_itt(tmf_hdr->rtt),
  696. conn->session->age);
  697. task = iscsi_itt_to_task(conn, protoitt);
  698. spin_unlock_bh(&conn->session->back_lock);
  699. if (!task) {
  700. QEDI_NOTICE(&qedi->dbg_ctx,
  701. "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
  702. get_itt(tmf_hdr->rtt),
  703. qedi_conn->iscsi_conn_id);
  704. return;
  705. }
  706. dbg_cmd = task->dd_data;
  707. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  708. "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
  709. get_itt(tmf_hdr->rtt), get_itt(task->itt),
  710. dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
  711. if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
  712. qedi_cmd->state = CLEANUP_RECV;
  713. qedi_clear_task_idx(qedi_conn->qedi, rtid);
  714. spin_lock(&qedi_conn->list_lock);
  715. if (likely(dbg_cmd->io_cmd_in_list)) {
  716. dbg_cmd->io_cmd_in_list = false;
  717. list_del_init(&dbg_cmd->io_cmd);
  718. qedi_conn->active_cmd_count--;
  719. }
  720. spin_unlock(&qedi_conn->list_lock);
  721. qedi_cmd->state = CLEANUP_RECV;
  722. wake_up_interruptible(&qedi_conn->wait_queue);
  723. }
  724. } else if (qedi_conn->cmd_cleanup_req > 0) {
  725. spin_lock_bh(&conn->session->back_lock);
  726. qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
  727. protoitt = build_itt(ptmp_itt, conn->session->age);
  728. task = iscsi_itt_to_task(conn, protoitt);
  729. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  730. "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
  731. cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
  732. qedi_conn->iscsi_conn_id);
  733. spin_unlock_bh(&conn->session->back_lock);
  734. if (!task) {
  735. QEDI_NOTICE(&qedi->dbg_ctx,
  736. "task is null, itid=0x%x, cid=0x%x\n",
  737. cqe->itid, qedi_conn->iscsi_conn_id);
  738. return;
  739. }
  740. qedi_conn->cmd_cleanup_cmpl++;
  741. wake_up(&qedi_conn->wait_queue);
  742. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  743. "Freeing tid=0x%x for cid=0x%x\n",
  744. cqe->itid, qedi_conn->iscsi_conn_id);
  745. qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
  746. } else {
  747. qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
  748. protoitt = build_itt(ptmp_itt, conn->session->age);
  749. task = iscsi_itt_to_task(conn, protoitt);
  750. QEDI_ERR(&qedi->dbg_ctx,
  751. "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
  752. protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
  753. }
  754. }
  755. void qedi_fp_process_cqes(struct qedi_work *work)
  756. {
  757. struct qedi_ctx *qedi = work->qedi;
  758. union iscsi_cqe *cqe = &work->cqe;
  759. struct iscsi_task *task = NULL;
  760. struct iscsi_nopout *nopout_hdr;
  761. struct qedi_conn *q_conn;
  762. struct iscsi_conn *conn;
  763. struct qedi_cmd *qedi_cmd;
  764. u32 comp_type;
  765. u32 iscsi_cid;
  766. u32 hdr_opcode;
  767. u16 que_idx = work->que_idx;
  768. u8 cqe_err_bits = 0;
  769. comp_type = cqe->cqe_common.cqe_type;
  770. hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
  771. cqe_err_bits =
  772. cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
  773. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  774. "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
  775. cqe->cqe_common.conn_id, comp_type, hdr_opcode);
  776. if (comp_type >= MAX_ISCSI_CQES_TYPE) {
  777. QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
  778. return;
  779. }
  780. iscsi_cid = cqe->cqe_common.conn_id;
  781. q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  782. if (!q_conn) {
  783. QEDI_WARN(&qedi->dbg_ctx,
  784. "Session no longer exists for cid=0x%x!!\n",
  785. iscsi_cid);
  786. return;
  787. }
  788. conn = q_conn->cls_conn->dd_data;
  789. if (unlikely(cqe_err_bits &&
  790. GET_FIELD(cqe_err_bits,
  791. CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
  792. iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
  793. return;
  794. }
  795. switch (comp_type) {
  796. case ISCSI_CQE_TYPE_SOLICITED:
  797. case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
  798. qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
  799. task = qedi_cmd->task;
  800. if (!task) {
  801. QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
  802. return;
  803. }
  804. /* Process NOPIN local completion */
  805. nopout_hdr = (struct iscsi_nopout *)task->hdr;
  806. if ((nopout_hdr->itt == RESERVED_ITT) &&
  807. (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
  808. qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
  809. task, q_conn);
  810. } else {
  811. cqe->cqe_solicited.itid =
  812. qedi_get_itt(cqe->cqe_solicited);
  813. /* Process other solicited responses */
  814. qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
  815. }
  816. break;
  817. case ISCSI_CQE_TYPE_UNSOLICITED:
  818. switch (hdr_opcode) {
  819. case ISCSI_OPCODE_NOP_IN:
  820. qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
  821. que_idx);
  822. break;
  823. case ISCSI_OPCODE_ASYNC_MSG:
  824. qedi_process_async_mesg(qedi, cqe, task, q_conn,
  825. que_idx);
  826. break;
  827. case ISCSI_OPCODE_REJECT:
  828. qedi_process_reject_mesg(qedi, cqe, task, q_conn,
  829. que_idx);
  830. break;
  831. }
  832. goto exit_fp_process;
  833. case ISCSI_CQE_TYPE_DUMMY:
  834. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
  835. goto exit_fp_process;
  836. case ISCSI_CQE_TYPE_TASK_CLEANUP:
  837. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
  838. qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
  839. conn);
  840. goto exit_fp_process;
  841. default:
  842. QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
  843. break;
  844. }
  845. exit_fp_process:
  846. return;
  847. }
  848. static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
  849. {
  850. struct iscsi_db_data dbell = { 0 };
  851. dbell.agg_flags = 0;
  852. dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
  853. dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
  854. dbell.params |=
  855. DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
  856. dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
  857. writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
  858. /* Make sure fw write idx is coherent, and include both memory barriers
  859. * as a failsafe as for some architectures the call is the same but on
  860. * others they are two different assembly operations.
  861. */
  862. wmb();
  863. mmiowb();
  864. QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
  865. "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
  866. qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
  867. qedi_conn->iscsi_conn_id);
  868. }
  869. static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
  870. {
  871. struct qedi_endpoint *ep;
  872. u16 rval;
  873. ep = qedi_conn->ep;
  874. rval = ep->sq_prod_idx;
  875. /* Increament SQ index */
  876. ep->sq_prod_idx++;
  877. ep->fw_sq_prod_idx++;
  878. if (ep->sq_prod_idx == QEDI_SQ_SIZE)
  879. ep->sq_prod_idx = 0;
  880. return rval;
  881. }
  882. int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
  883. struct iscsi_task *task)
  884. {
  885. struct iscsi_login_req_hdr login_req_pdu_header;
  886. struct scsi_sgl_task_params tx_sgl_task_params;
  887. struct scsi_sgl_task_params rx_sgl_task_params;
  888. struct iscsi_task_params task_params;
  889. struct e4_iscsi_task_context *fw_task_ctx;
  890. struct qedi_ctx *qedi = qedi_conn->qedi;
  891. struct iscsi_login_req *login_hdr;
  892. struct scsi_sge *resp_sge = NULL;
  893. struct qedi_cmd *qedi_cmd;
  894. struct qedi_endpoint *ep;
  895. s16 tid = 0;
  896. u16 sq_idx = 0;
  897. int rval = 0;
  898. resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  899. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  900. ep = qedi_conn->ep;
  901. login_hdr = (struct iscsi_login_req *)task->hdr;
  902. tid = qedi_get_task_idx(qedi);
  903. if (tid == -1)
  904. return -ENOMEM;
  905. fw_task_ctx =
  906. (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  907. tid);
  908. memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  909. qedi_cmd->task_id = tid;
  910. memset(&task_params, 0, sizeof(task_params));
  911. memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
  912. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  913. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  914. /* Update header info */
  915. login_req_pdu_header.opcode = login_hdr->opcode;
  916. login_req_pdu_header.version_min = login_hdr->min_version;
  917. login_req_pdu_header.version_max = login_hdr->max_version;
  918. login_req_pdu_header.flags_attr = login_hdr->flags;
  919. login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
  920. login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
  921. login_req_pdu_header.tsih = login_hdr->tsih;
  922. login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
  923. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  924. login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  925. login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
  926. login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
  927. login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
  928. login_req_pdu_header.exp_stat_sn = 0;
  929. /* Fill tx AHS and rx buffer */
  930. tx_sgl_task_params.sgl =
  931. (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  932. tx_sgl_task_params.sgl_phys_addr.lo =
  933. (u32)(qedi_conn->gen_pdu.req_dma_addr);
  934. tx_sgl_task_params.sgl_phys_addr.hi =
  935. (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
  936. tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
  937. tx_sgl_task_params.num_sges = 1;
  938. rx_sgl_task_params.sgl =
  939. (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  940. rx_sgl_task_params.sgl_phys_addr.lo =
  941. (u32)(qedi_conn->gen_pdu.resp_dma_addr);
  942. rx_sgl_task_params.sgl_phys_addr.hi =
  943. (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
  944. rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
  945. rx_sgl_task_params.num_sges = 1;
  946. /* Fill fw input params */
  947. task_params.context = fw_task_ctx;
  948. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  949. task_params.itid = tid;
  950. task_params.cq_rss_number = 0;
  951. task_params.tx_io_size = ntoh24(login_hdr->dlength);
  952. task_params.rx_io_size = resp_sge->sge_len;
  953. sq_idx = qedi_get_wqe_idx(qedi_conn);
  954. task_params.sqe = &ep->sq[sq_idx];
  955. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  956. rval = init_initiator_login_request_task(&task_params,
  957. &login_req_pdu_header,
  958. &tx_sgl_task_params,
  959. &rx_sgl_task_params);
  960. if (rval)
  961. return -1;
  962. spin_lock(&qedi_conn->list_lock);
  963. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  964. qedi_cmd->io_cmd_in_list = true;
  965. qedi_conn->active_cmd_count++;
  966. spin_unlock(&qedi_conn->list_lock);
  967. qedi_ring_doorbell(qedi_conn);
  968. return 0;
  969. }
  970. int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
  971. struct iscsi_task *task)
  972. {
  973. struct iscsi_logout_req_hdr logout_pdu_header;
  974. struct scsi_sgl_task_params tx_sgl_task_params;
  975. struct scsi_sgl_task_params rx_sgl_task_params;
  976. struct iscsi_task_params task_params;
  977. struct e4_iscsi_task_context *fw_task_ctx;
  978. struct iscsi_logout *logout_hdr = NULL;
  979. struct qedi_ctx *qedi = qedi_conn->qedi;
  980. struct qedi_cmd *qedi_cmd;
  981. struct qedi_endpoint *ep;
  982. s16 tid = 0;
  983. u16 sq_idx = 0;
  984. int rval = 0;
  985. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  986. logout_hdr = (struct iscsi_logout *)task->hdr;
  987. ep = qedi_conn->ep;
  988. tid = qedi_get_task_idx(qedi);
  989. if (tid == -1)
  990. return -ENOMEM;
  991. fw_task_ctx =
  992. (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  993. tid);
  994. memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  995. qedi_cmd->task_id = tid;
  996. memset(&task_params, 0, sizeof(task_params));
  997. memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
  998. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  999. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1000. /* Update header info */
  1001. logout_pdu_header.opcode = logout_hdr->opcode;
  1002. logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
  1003. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  1004. logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1005. logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
  1006. logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
  1007. logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
  1008. /* Fill fw input params */
  1009. task_params.context = fw_task_ctx;
  1010. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1011. task_params.itid = tid;
  1012. task_params.cq_rss_number = 0;
  1013. task_params.tx_io_size = 0;
  1014. task_params.rx_io_size = 0;
  1015. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1016. task_params.sqe = &ep->sq[sq_idx];
  1017. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1018. rval = init_initiator_logout_request_task(&task_params,
  1019. &logout_pdu_header,
  1020. NULL, NULL);
  1021. if (rval)
  1022. return -1;
  1023. spin_lock(&qedi_conn->list_lock);
  1024. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1025. qedi_cmd->io_cmd_in_list = true;
  1026. qedi_conn->active_cmd_count++;
  1027. spin_unlock(&qedi_conn->list_lock);
  1028. qedi_ring_doorbell(qedi_conn);
  1029. return 0;
  1030. }
  1031. int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
  1032. struct iscsi_task *task, bool in_recovery)
  1033. {
  1034. int rval;
  1035. struct iscsi_task *ctask;
  1036. struct qedi_cmd *cmd, *cmd_tmp;
  1037. struct iscsi_tm *tmf_hdr;
  1038. unsigned int lun = 0;
  1039. bool lun_reset = false;
  1040. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  1041. struct iscsi_session *session = conn->session;
  1042. /* From recovery, task is NULL or from tmf resp valid task */
  1043. if (task) {
  1044. tmf_hdr = (struct iscsi_tm *)task->hdr;
  1045. if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1046. ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
  1047. lun_reset = true;
  1048. lun = scsilun_to_int(&tmf_hdr->lun);
  1049. }
  1050. }
  1051. qedi_conn->cmd_cleanup_req = 0;
  1052. qedi_conn->cmd_cleanup_cmpl = 0;
  1053. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1054. "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
  1055. qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
  1056. in_recovery, lun_reset);
  1057. if (lun_reset)
  1058. spin_lock_bh(&session->back_lock);
  1059. spin_lock(&qedi_conn->list_lock);
  1060. list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
  1061. io_cmd) {
  1062. ctask = cmd->task;
  1063. if (ctask == task)
  1064. continue;
  1065. if (lun_reset) {
  1066. if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
  1067. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1068. "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
  1069. cmd->task_id, get_itt(ctask->itt),
  1070. cmd->scsi_cmd, cmd->scsi_cmd->device,
  1071. ctask->state, cmd->state,
  1072. qedi_conn->iscsi_conn_id);
  1073. if (cmd->scsi_cmd->device->lun != lun)
  1074. continue;
  1075. }
  1076. }
  1077. qedi_conn->cmd_cleanup_req++;
  1078. qedi_iscsi_cleanup_task(ctask, true);
  1079. cmd->io_cmd_in_list = false;
  1080. list_del_init(&cmd->io_cmd);
  1081. qedi_conn->active_cmd_count--;
  1082. QEDI_WARN(&qedi->dbg_ctx,
  1083. "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
  1084. &cmd->io_cmd, qedi_conn->iscsi_conn_id);
  1085. }
  1086. spin_unlock(&qedi_conn->list_lock);
  1087. if (lun_reset)
  1088. spin_unlock_bh(&session->back_lock);
  1089. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1090. "cmd_cleanup_req=%d, cid=0x%x\n",
  1091. qedi_conn->cmd_cleanup_req,
  1092. qedi_conn->iscsi_conn_id);
  1093. rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
  1094. ((qedi_conn->cmd_cleanup_req ==
  1095. qedi_conn->cmd_cleanup_cmpl) ||
  1096. qedi_conn->ep),
  1097. 5 * HZ);
  1098. if (rval) {
  1099. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1100. "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
  1101. qedi_conn->cmd_cleanup_req,
  1102. qedi_conn->cmd_cleanup_cmpl,
  1103. qedi_conn->iscsi_conn_id);
  1104. return 0;
  1105. }
  1106. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1107. "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
  1108. qedi_conn->cmd_cleanup_req,
  1109. qedi_conn->cmd_cleanup_cmpl,
  1110. qedi_conn->iscsi_conn_id);
  1111. iscsi_host_for_each_session(qedi->shost,
  1112. qedi_mark_device_missing);
  1113. qedi_ops->common->drain(qedi->cdev);
  1114. /* Enable IOs for all other sessions except current.*/
  1115. if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
  1116. (qedi_conn->cmd_cleanup_req ==
  1117. qedi_conn->cmd_cleanup_cmpl),
  1118. 5 * HZ)) {
  1119. iscsi_host_for_each_session(qedi->shost,
  1120. qedi_mark_device_available);
  1121. return -1;
  1122. }
  1123. iscsi_host_for_each_session(qedi->shost,
  1124. qedi_mark_device_available);
  1125. return 0;
  1126. }
  1127. void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
  1128. struct iscsi_task *task)
  1129. {
  1130. struct qedi_endpoint *qedi_ep;
  1131. int rval;
  1132. qedi_ep = qedi_conn->ep;
  1133. qedi_conn->cmd_cleanup_req = 0;
  1134. qedi_conn->cmd_cleanup_cmpl = 0;
  1135. if (!qedi_ep) {
  1136. QEDI_WARN(&qedi->dbg_ctx,
  1137. "Cannot proceed, ep already disconnected, cid=0x%x\n",
  1138. qedi_conn->iscsi_conn_id);
  1139. return;
  1140. }
  1141. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1142. "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
  1143. qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
  1144. qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
  1145. rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
  1146. if (rval) {
  1147. QEDI_ERR(&qedi->dbg_ctx,
  1148. "fatal error, need hard reset, cid=0x%x\n",
  1149. qedi_conn->iscsi_conn_id);
  1150. WARN_ON(1);
  1151. }
  1152. }
  1153. static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
  1154. struct qedi_conn *qedi_conn,
  1155. struct iscsi_task *task,
  1156. struct qedi_cmd *qedi_cmd,
  1157. struct qedi_work_map *list_work)
  1158. {
  1159. struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
  1160. int wait;
  1161. wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
  1162. ((qedi_cmd->state ==
  1163. CLEANUP_RECV) ||
  1164. ((qedi_cmd->type == TYPEIO) &&
  1165. (cmd->state ==
  1166. RESPONSE_RECEIVED))),
  1167. 5 * HZ);
  1168. if (!wait) {
  1169. qedi_cmd->state = CLEANUP_WAIT_FAILED;
  1170. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1171. "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
  1172. cmd->task_id, qedi_conn->iscsi_conn_id);
  1173. return -1;
  1174. }
  1175. return 0;
  1176. }
  1177. static void qedi_tmf_work(struct work_struct *work)
  1178. {
  1179. struct qedi_cmd *qedi_cmd =
  1180. container_of(work, struct qedi_cmd, tmf_work);
  1181. struct qedi_conn *qedi_conn = qedi_cmd->conn;
  1182. struct qedi_ctx *qedi = qedi_conn->qedi;
  1183. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  1184. struct iscsi_cls_session *cls_sess;
  1185. struct qedi_work_map *list_work = NULL;
  1186. struct iscsi_task *mtask;
  1187. struct qedi_cmd *cmd;
  1188. struct iscsi_task *ctask;
  1189. struct iscsi_tm *tmf_hdr;
  1190. s16 rval = 0;
  1191. s16 tid = 0;
  1192. mtask = qedi_cmd->task;
  1193. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  1194. cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
  1195. set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
  1196. ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
  1197. if (!ctask || !ctask->sc) {
  1198. QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
  1199. goto abort_ret;
  1200. }
  1201. cmd = (struct qedi_cmd *)ctask->dd_data;
  1202. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1203. "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
  1204. get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
  1205. qedi_conn->iscsi_conn_id);
  1206. if (qedi_do_not_recover) {
  1207. QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
  1208. qedi_do_not_recover);
  1209. goto abort_ret;
  1210. }
  1211. list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
  1212. if (!list_work) {
  1213. QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
  1214. goto abort_ret;
  1215. }
  1216. qedi_cmd->type = TYPEIO;
  1217. list_work->qedi_cmd = qedi_cmd;
  1218. list_work->rtid = cmd->task_id;
  1219. list_work->state = QEDI_WORK_SCHEDULED;
  1220. qedi_cmd->list_tmf_work = list_work;
  1221. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1222. "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
  1223. list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
  1224. tmf_hdr->flags);
  1225. spin_lock_bh(&qedi_conn->tmf_work_lock);
  1226. list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
  1227. spin_unlock_bh(&qedi_conn->tmf_work_lock);
  1228. qedi_iscsi_cleanup_task(ctask, false);
  1229. rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
  1230. list_work);
  1231. if (rval == -1) {
  1232. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1233. "FW cleanup got escalated, cid=0x%x\n",
  1234. qedi_conn->iscsi_conn_id);
  1235. goto ldel_exit;
  1236. }
  1237. tid = qedi_get_task_idx(qedi);
  1238. if (tid == -1) {
  1239. QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
  1240. qedi_conn->iscsi_conn_id);
  1241. goto ldel_exit;
  1242. }
  1243. qedi_cmd->task_id = tid;
  1244. qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
  1245. abort_ret:
  1246. clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
  1247. return;
  1248. ldel_exit:
  1249. spin_lock_bh(&qedi_conn->tmf_work_lock);
  1250. if (!qedi_cmd->list_tmf_work) {
  1251. list_del_init(&list_work->list);
  1252. qedi_cmd->list_tmf_work = NULL;
  1253. kfree(list_work);
  1254. }
  1255. spin_unlock_bh(&qedi_conn->tmf_work_lock);
  1256. spin_lock(&qedi_conn->list_lock);
  1257. if (likely(cmd->io_cmd_in_list)) {
  1258. cmd->io_cmd_in_list = false;
  1259. list_del_init(&cmd->io_cmd);
  1260. qedi_conn->active_cmd_count--;
  1261. }
  1262. spin_unlock(&qedi_conn->list_lock);
  1263. clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
  1264. }
  1265. static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
  1266. struct iscsi_task *mtask)
  1267. {
  1268. struct iscsi_tmf_request_hdr tmf_pdu_header;
  1269. struct iscsi_task_params task_params;
  1270. struct qedi_ctx *qedi = qedi_conn->qedi;
  1271. struct e4_iscsi_task_context *fw_task_ctx;
  1272. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  1273. struct iscsi_task *ctask;
  1274. struct iscsi_tm *tmf_hdr;
  1275. struct qedi_cmd *qedi_cmd;
  1276. struct qedi_cmd *cmd;
  1277. struct qedi_endpoint *ep;
  1278. u32 scsi_lun[2];
  1279. s16 tid = 0;
  1280. u16 sq_idx = 0;
  1281. int rval = 0;
  1282. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  1283. qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
  1284. ep = qedi_conn->ep;
  1285. if (!ep)
  1286. return -ENODEV;
  1287. tid = qedi_get_task_idx(qedi);
  1288. if (tid == -1)
  1289. return -ENOMEM;
  1290. fw_task_ctx =
  1291. (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1292. tid);
  1293. memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  1294. qedi_cmd->task_id = tid;
  1295. memset(&task_params, 0, sizeof(task_params));
  1296. memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
  1297. /* Update header info */
  1298. qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
  1299. tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
  1300. tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
  1301. memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
  1302. tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
  1303. tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  1304. if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1305. ISCSI_TM_FUNC_ABORT_TASK) {
  1306. ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
  1307. if (!ctask || !ctask->sc) {
  1308. QEDI_ERR(&qedi->dbg_ctx,
  1309. "Could not get reference task\n");
  1310. return 0;
  1311. }
  1312. cmd = (struct qedi_cmd *)ctask->dd_data;
  1313. tmf_pdu_header.rtt =
  1314. qedi_set_itt(cmd->task_id,
  1315. get_itt(tmf_hdr->rtt));
  1316. } else {
  1317. tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
  1318. }
  1319. tmf_pdu_header.opcode = tmf_hdr->opcode;
  1320. tmf_pdu_header.function = tmf_hdr->flags;
  1321. tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
  1322. tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
  1323. /* Fill fw input params */
  1324. task_params.context = fw_task_ctx;
  1325. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1326. task_params.itid = tid;
  1327. task_params.cq_rss_number = 0;
  1328. task_params.tx_io_size = 0;
  1329. task_params.rx_io_size = 0;
  1330. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1331. task_params.sqe = &ep->sq[sq_idx];
  1332. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1333. rval = init_initiator_tmf_request_task(&task_params,
  1334. &tmf_pdu_header);
  1335. if (rval)
  1336. return -1;
  1337. spin_lock(&qedi_conn->list_lock);
  1338. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1339. qedi_cmd->io_cmd_in_list = true;
  1340. qedi_conn->active_cmd_count++;
  1341. spin_unlock(&qedi_conn->list_lock);
  1342. qedi_ring_doorbell(qedi_conn);
  1343. return 0;
  1344. }
  1345. int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
  1346. struct iscsi_task *mtask)
  1347. {
  1348. struct qedi_ctx *qedi = qedi_conn->qedi;
  1349. struct iscsi_tm *tmf_hdr;
  1350. struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
  1351. s16 tid = 0;
  1352. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  1353. qedi_cmd->task = mtask;
  1354. /* If abort task then schedule the work and return */
  1355. if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1356. ISCSI_TM_FUNC_ABORT_TASK) {
  1357. qedi_cmd->state = CLEANUP_WAIT;
  1358. INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
  1359. queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
  1360. } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1361. ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
  1362. ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1363. ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
  1364. ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1365. ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
  1366. tid = qedi_get_task_idx(qedi);
  1367. if (tid == -1) {
  1368. QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
  1369. qedi_conn->iscsi_conn_id);
  1370. return -1;
  1371. }
  1372. qedi_cmd->task_id = tid;
  1373. qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
  1374. } else {
  1375. QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
  1376. qedi_conn->iscsi_conn_id);
  1377. return -1;
  1378. }
  1379. return 0;
  1380. }
  1381. int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
  1382. struct iscsi_task *task)
  1383. {
  1384. struct iscsi_text_request_hdr text_request_pdu_header;
  1385. struct scsi_sgl_task_params tx_sgl_task_params;
  1386. struct scsi_sgl_task_params rx_sgl_task_params;
  1387. struct iscsi_task_params task_params;
  1388. struct e4_iscsi_task_context *fw_task_ctx;
  1389. struct qedi_ctx *qedi = qedi_conn->qedi;
  1390. struct iscsi_text *text_hdr;
  1391. struct scsi_sge *req_sge = NULL;
  1392. struct scsi_sge *resp_sge = NULL;
  1393. struct qedi_cmd *qedi_cmd;
  1394. struct qedi_endpoint *ep;
  1395. s16 tid = 0;
  1396. u16 sq_idx = 0;
  1397. int rval = 0;
  1398. req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  1399. resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1400. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  1401. text_hdr = (struct iscsi_text *)task->hdr;
  1402. ep = qedi_conn->ep;
  1403. tid = qedi_get_task_idx(qedi);
  1404. if (tid == -1)
  1405. return -ENOMEM;
  1406. fw_task_ctx =
  1407. (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1408. tid);
  1409. memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  1410. qedi_cmd->task_id = tid;
  1411. memset(&task_params, 0, sizeof(task_params));
  1412. memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
  1413. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  1414. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1415. /* Update header info */
  1416. text_request_pdu_header.opcode = text_hdr->opcode;
  1417. text_request_pdu_header.flags_attr = text_hdr->flags;
  1418. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  1419. text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1420. text_request_pdu_header.ttt = text_hdr->ttt;
  1421. text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
  1422. text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
  1423. text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
  1424. /* Fill tx AHS and rx buffer */
  1425. tx_sgl_task_params.sgl =
  1426. (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  1427. tx_sgl_task_params.sgl_phys_addr.lo =
  1428. (u32)(qedi_conn->gen_pdu.req_dma_addr);
  1429. tx_sgl_task_params.sgl_phys_addr.hi =
  1430. (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
  1431. tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
  1432. tx_sgl_task_params.num_sges = 1;
  1433. rx_sgl_task_params.sgl =
  1434. (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1435. rx_sgl_task_params.sgl_phys_addr.lo =
  1436. (u32)(qedi_conn->gen_pdu.resp_dma_addr);
  1437. rx_sgl_task_params.sgl_phys_addr.hi =
  1438. (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
  1439. rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
  1440. rx_sgl_task_params.num_sges = 1;
  1441. /* Fill fw input params */
  1442. task_params.context = fw_task_ctx;
  1443. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1444. task_params.itid = tid;
  1445. task_params.cq_rss_number = 0;
  1446. task_params.tx_io_size = ntoh24(text_hdr->dlength);
  1447. task_params.rx_io_size = resp_sge->sge_len;
  1448. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1449. task_params.sqe = &ep->sq[sq_idx];
  1450. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1451. rval = init_initiator_text_request_task(&task_params,
  1452. &text_request_pdu_header,
  1453. &tx_sgl_task_params,
  1454. &rx_sgl_task_params);
  1455. if (rval)
  1456. return -1;
  1457. spin_lock(&qedi_conn->list_lock);
  1458. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1459. qedi_cmd->io_cmd_in_list = true;
  1460. qedi_conn->active_cmd_count++;
  1461. spin_unlock(&qedi_conn->list_lock);
  1462. qedi_ring_doorbell(qedi_conn);
  1463. return 0;
  1464. }
  1465. int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
  1466. struct iscsi_task *task,
  1467. char *datap, int data_len, int unsol)
  1468. {
  1469. struct iscsi_nop_out_hdr nop_out_pdu_header;
  1470. struct scsi_sgl_task_params tx_sgl_task_params;
  1471. struct scsi_sgl_task_params rx_sgl_task_params;
  1472. struct iscsi_task_params task_params;
  1473. struct qedi_ctx *qedi = qedi_conn->qedi;
  1474. struct e4_iscsi_task_context *fw_task_ctx;
  1475. struct iscsi_nopout *nopout_hdr;
  1476. struct scsi_sge *resp_sge = NULL;
  1477. struct qedi_cmd *qedi_cmd;
  1478. struct qedi_endpoint *ep;
  1479. u32 scsi_lun[2];
  1480. s16 tid = 0;
  1481. u16 sq_idx = 0;
  1482. int rval = 0;
  1483. resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1484. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  1485. nopout_hdr = (struct iscsi_nopout *)task->hdr;
  1486. ep = qedi_conn->ep;
  1487. tid = qedi_get_task_idx(qedi);
  1488. if (tid == -1)
  1489. return -ENOMEM;
  1490. fw_task_ctx =
  1491. (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1492. tid);
  1493. memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  1494. qedi_cmd->task_id = tid;
  1495. memset(&task_params, 0, sizeof(task_params));
  1496. memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
  1497. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  1498. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1499. /* Update header info */
  1500. nop_out_pdu_header.opcode = nopout_hdr->opcode;
  1501. SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
  1502. SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
  1503. memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
  1504. nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
  1505. nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  1506. nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
  1507. nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
  1508. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  1509. if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
  1510. nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
  1511. nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
  1512. } else {
  1513. nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1514. nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
  1515. spin_lock(&qedi_conn->list_lock);
  1516. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1517. qedi_cmd->io_cmd_in_list = true;
  1518. qedi_conn->active_cmd_count++;
  1519. spin_unlock(&qedi_conn->list_lock);
  1520. }
  1521. /* Fill tx AHS and rx buffer */
  1522. if (data_len) {
  1523. tx_sgl_task_params.sgl =
  1524. (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  1525. tx_sgl_task_params.sgl_phys_addr.lo =
  1526. (u32)(qedi_conn->gen_pdu.req_dma_addr);
  1527. tx_sgl_task_params.sgl_phys_addr.hi =
  1528. (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
  1529. tx_sgl_task_params.total_buffer_size = data_len;
  1530. tx_sgl_task_params.num_sges = 1;
  1531. rx_sgl_task_params.sgl =
  1532. (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1533. rx_sgl_task_params.sgl_phys_addr.lo =
  1534. (u32)(qedi_conn->gen_pdu.resp_dma_addr);
  1535. rx_sgl_task_params.sgl_phys_addr.hi =
  1536. (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
  1537. rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
  1538. rx_sgl_task_params.num_sges = 1;
  1539. }
  1540. /* Fill fw input params */
  1541. task_params.context = fw_task_ctx;
  1542. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1543. task_params.itid = tid;
  1544. task_params.cq_rss_number = 0;
  1545. task_params.tx_io_size = data_len;
  1546. task_params.rx_io_size = resp_sge->sge_len;
  1547. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1548. task_params.sqe = &ep->sq[sq_idx];
  1549. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1550. rval = init_initiator_nop_out_task(&task_params,
  1551. &nop_out_pdu_header,
  1552. &tx_sgl_task_params,
  1553. &rx_sgl_task_params);
  1554. if (rval)
  1555. return -1;
  1556. qedi_ring_doorbell(qedi_conn);
  1557. return 0;
  1558. }
  1559. static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
  1560. int bd_index)
  1561. {
  1562. struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  1563. int frag_size, sg_frags;
  1564. sg_frags = 0;
  1565. while (sg_len) {
  1566. if (addr % QEDI_PAGE_SIZE)
  1567. frag_size =
  1568. (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
  1569. else
  1570. frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
  1571. (sg_len % QEDI_BD_SPLIT_SZ);
  1572. if (frag_size == 0)
  1573. frag_size = QEDI_BD_SPLIT_SZ;
  1574. bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
  1575. bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
  1576. bd[bd_index + sg_frags].sge_len = (u16)frag_size;
  1577. QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
  1578. "split sge %d: addr=%llx, len=%x",
  1579. (bd_index + sg_frags), addr, frag_size);
  1580. addr += (u64)frag_size;
  1581. sg_frags++;
  1582. sg_len -= frag_size;
  1583. }
  1584. return sg_frags;
  1585. }
  1586. static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
  1587. {
  1588. struct scsi_cmnd *sc = cmd->scsi_cmd;
  1589. struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  1590. struct scatterlist *sg;
  1591. int byte_count = 0;
  1592. int bd_count = 0;
  1593. int sg_count;
  1594. int sg_len;
  1595. int sg_frags;
  1596. u64 addr, end_addr;
  1597. int i;
  1598. WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
  1599. sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
  1600. scsi_sg_count(sc), sc->sc_data_direction);
  1601. /*
  1602. * New condition to send single SGE as cached-SGL.
  1603. * Single SGE with length less than 64K.
  1604. */
  1605. sg = scsi_sglist(sc);
  1606. if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
  1607. sg_len = sg_dma_len(sg);
  1608. addr = (u64)sg_dma_address(sg);
  1609. bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
  1610. bd[bd_count].sge_addr.hi = (addr >> 32);
  1611. bd[bd_count].sge_len = (u16)sg_len;
  1612. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  1613. "single-cached-sgl: bd_count:%d addr=%llx, len=%x",
  1614. sg_count, addr, sg_len);
  1615. return ++bd_count;
  1616. }
  1617. scsi_for_each_sg(sc, sg, sg_count, i) {
  1618. sg_len = sg_dma_len(sg);
  1619. addr = (u64)sg_dma_address(sg);
  1620. end_addr = (addr + sg_len);
  1621. /*
  1622. * first sg elem in the 'list',
  1623. * check if end addr is page-aligned.
  1624. */
  1625. if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
  1626. cmd->use_slowpath = true;
  1627. /*
  1628. * last sg elem in the 'list',
  1629. * check if start addr is page-aligned.
  1630. */
  1631. else if ((i == (sg_count - 1)) &&
  1632. (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
  1633. cmd->use_slowpath = true;
  1634. /*
  1635. * middle sg elements in list,
  1636. * check if start and end addr is page-aligned
  1637. */
  1638. else if ((i != 0) && (i != (sg_count - 1)) &&
  1639. ((addr % QEDI_PAGE_SIZE) ||
  1640. (end_addr % QEDI_PAGE_SIZE)))
  1641. cmd->use_slowpath = true;
  1642. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
  1643. i, sg_len);
  1644. if (sg_len > QEDI_BD_SPLIT_SZ) {
  1645. sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
  1646. } else {
  1647. sg_frags = 1;
  1648. bd[bd_count].sge_addr.lo = addr & 0xffffffff;
  1649. bd[bd_count].sge_addr.hi = addr >> 32;
  1650. bd[bd_count].sge_len = sg_len;
  1651. }
  1652. byte_count += sg_len;
  1653. bd_count += sg_frags;
  1654. }
  1655. if (byte_count != scsi_bufflen(sc))
  1656. QEDI_ERR(&qedi->dbg_ctx,
  1657. "byte_count = %d != scsi_bufflen = %d\n", byte_count,
  1658. scsi_bufflen(sc));
  1659. else
  1660. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
  1661. byte_count);
  1662. WARN_ON(byte_count != scsi_bufflen(sc));
  1663. return bd_count;
  1664. }
  1665. static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
  1666. {
  1667. int bd_count;
  1668. struct scsi_cmnd *sc = cmd->scsi_cmd;
  1669. if (scsi_sg_count(sc)) {
  1670. bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
  1671. if (bd_count == 0)
  1672. return;
  1673. } else {
  1674. struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  1675. bd[0].sge_addr.lo = 0;
  1676. bd[0].sge_addr.hi = 0;
  1677. bd[0].sge_len = 0;
  1678. bd_count = 0;
  1679. }
  1680. cmd->io_tbl.sge_valid = bd_count;
  1681. }
  1682. static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
  1683. {
  1684. u32 dword;
  1685. int lpcnt;
  1686. u8 *srcp;
  1687. lpcnt = sc->cmd_len / sizeof(dword);
  1688. srcp = (u8 *)sc->cmnd;
  1689. while (lpcnt--) {
  1690. memcpy(&dword, (const void *)srcp, 4);
  1691. *dstp = cpu_to_be32(dword);
  1692. srcp += 4;
  1693. dstp++;
  1694. }
  1695. if (sc->cmd_len & 0x3) {
  1696. dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
  1697. *dstp = cpu_to_be32(dword);
  1698. }
  1699. }
  1700. void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
  1701. u16 tid, int8_t direction)
  1702. {
  1703. struct qedi_io_log *io_log;
  1704. struct iscsi_conn *conn = task->conn;
  1705. struct qedi_conn *qedi_conn = conn->dd_data;
  1706. struct scsi_cmnd *sc_cmd = task->sc;
  1707. unsigned long flags;
  1708. spin_lock_irqsave(&qedi->io_trace_lock, flags);
  1709. io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
  1710. io_log->direction = direction;
  1711. io_log->task_id = tid;
  1712. io_log->cid = qedi_conn->iscsi_conn_id;
  1713. io_log->lun = sc_cmd->device->lun;
  1714. io_log->op = sc_cmd->cmnd[0];
  1715. io_log->lba[0] = sc_cmd->cmnd[2];
  1716. io_log->lba[1] = sc_cmd->cmnd[3];
  1717. io_log->lba[2] = sc_cmd->cmnd[4];
  1718. io_log->lba[3] = sc_cmd->cmnd[5];
  1719. io_log->bufflen = scsi_bufflen(sc_cmd);
  1720. io_log->sg_count = scsi_sg_count(sc_cmd);
  1721. io_log->fast_sgs = qedi->fast_sgls;
  1722. io_log->cached_sgs = qedi->cached_sgls;
  1723. io_log->slow_sgs = qedi->slow_sgls;
  1724. io_log->cached_sge = qedi->use_cached_sge;
  1725. io_log->slow_sge = qedi->use_slow_sge;
  1726. io_log->fast_sge = qedi->use_fast_sge;
  1727. io_log->result = sc_cmd->result;
  1728. io_log->jiffies = jiffies;
  1729. io_log->blk_req_cpu = smp_processor_id();
  1730. if (direction == QEDI_IO_TRACE_REQ) {
  1731. /* For requests we only care about the submission CPU */
  1732. io_log->req_cpu = smp_processor_id() % qedi->num_queues;
  1733. io_log->intr_cpu = 0;
  1734. io_log->blk_rsp_cpu = 0;
  1735. } else if (direction == QEDI_IO_TRACE_RSP) {
  1736. io_log->req_cpu = smp_processor_id() % qedi->num_queues;
  1737. io_log->intr_cpu = qedi->intr_cpu;
  1738. io_log->blk_rsp_cpu = smp_processor_id();
  1739. }
  1740. qedi->io_trace_idx++;
  1741. if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
  1742. qedi->io_trace_idx = 0;
  1743. qedi->use_cached_sge = false;
  1744. qedi->use_slow_sge = false;
  1745. qedi->use_fast_sge = false;
  1746. spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
  1747. }
  1748. int qedi_iscsi_send_ioreq(struct iscsi_task *task)
  1749. {
  1750. struct iscsi_conn *conn = task->conn;
  1751. struct iscsi_session *session = conn->session;
  1752. struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
  1753. struct qedi_ctx *qedi = iscsi_host_priv(shost);
  1754. struct qedi_conn *qedi_conn = conn->dd_data;
  1755. struct qedi_cmd *cmd = task->dd_data;
  1756. struct scsi_cmnd *sc = task->sc;
  1757. struct iscsi_cmd_hdr cmd_pdu_header;
  1758. struct scsi_sgl_task_params tx_sgl_task_params;
  1759. struct scsi_sgl_task_params rx_sgl_task_params;
  1760. struct scsi_sgl_task_params *prx_sgl = NULL;
  1761. struct scsi_sgl_task_params *ptx_sgl = NULL;
  1762. struct iscsi_task_params task_params;
  1763. struct iscsi_conn_params conn_params;
  1764. struct scsi_initiator_cmd_params cmd_params;
  1765. struct e4_iscsi_task_context *fw_task_ctx;
  1766. struct iscsi_cls_conn *cls_conn;
  1767. struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
  1768. enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
  1769. struct qedi_endpoint *ep;
  1770. u32 scsi_lun[2];
  1771. s16 tid = 0;
  1772. u16 sq_idx = 0;
  1773. u16 cq_idx;
  1774. int rval = 0;
  1775. ep = qedi_conn->ep;
  1776. cls_conn = qedi_conn->cls_conn;
  1777. conn = cls_conn->dd_data;
  1778. qedi_iscsi_map_sg_list(cmd);
  1779. int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
  1780. tid = qedi_get_task_idx(qedi);
  1781. if (tid == -1)
  1782. return -ENOMEM;
  1783. fw_task_ctx =
  1784. (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1785. tid);
  1786. memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  1787. cmd->task_id = tid;
  1788. memset(&task_params, 0, sizeof(task_params));
  1789. memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
  1790. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  1791. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1792. memset(&conn_params, 0, sizeof(conn_params));
  1793. memset(&cmd_params, 0, sizeof(cmd_params));
  1794. cq_idx = smp_processor_id() % qedi->num_queues;
  1795. /* Update header info */
  1796. SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
  1797. ISCSI_ATTR_SIMPLE);
  1798. if (hdr->cdb[0] != TEST_UNIT_READY) {
  1799. if (sc->sc_data_direction == DMA_TO_DEVICE) {
  1800. SET_FIELD(cmd_pdu_header.flags_attr,
  1801. ISCSI_CMD_HDR_WRITE, 1);
  1802. task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
  1803. } else {
  1804. SET_FIELD(cmd_pdu_header.flags_attr,
  1805. ISCSI_CMD_HDR_READ, 1);
  1806. task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
  1807. }
  1808. }
  1809. cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
  1810. cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  1811. qedi_update_itt_map(qedi, tid, task->itt, cmd);
  1812. cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1813. cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
  1814. cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
  1815. cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
  1816. cmd_pdu_header.hdr_first_byte = hdr->opcode;
  1817. qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
  1818. /* Fill tx AHS and rx buffer */
  1819. if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
  1820. tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
  1821. tx_sgl_task_params.sgl_phys_addr.lo =
  1822. (u32)(cmd->io_tbl.sge_tbl_dma);
  1823. tx_sgl_task_params.sgl_phys_addr.hi =
  1824. (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
  1825. tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
  1826. tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
  1827. if (cmd->use_slowpath)
  1828. tx_sgl_task_params.small_mid_sge = true;
  1829. } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
  1830. rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
  1831. rx_sgl_task_params.sgl_phys_addr.lo =
  1832. (u32)(cmd->io_tbl.sge_tbl_dma);
  1833. rx_sgl_task_params.sgl_phys_addr.hi =
  1834. (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
  1835. rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
  1836. rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
  1837. }
  1838. /* Add conn param */
  1839. conn_params.first_burst_length = conn->session->first_burst;
  1840. conn_params.max_send_pdu_length = conn->max_xmit_dlength;
  1841. conn_params.max_burst_length = conn->session->max_burst;
  1842. if (conn->session->initial_r2t_en)
  1843. conn_params.initial_r2t = true;
  1844. if (conn->session->imm_data_en)
  1845. conn_params.immediate_data = true;
  1846. /* Add cmd params */
  1847. cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
  1848. cmd_params.sense_data_buffer_phys_addr.hi =
  1849. (u32)((u64)cmd->sense_buffer_dma >> 32);
  1850. /* Fill fw input params */
  1851. task_params.context = fw_task_ctx;
  1852. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1853. task_params.itid = tid;
  1854. task_params.cq_rss_number = cq_idx;
  1855. if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
  1856. task_params.tx_io_size = scsi_bufflen(sc);
  1857. else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
  1858. task_params.rx_io_size = scsi_bufflen(sc);
  1859. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1860. task_params.sqe = &ep->sq[sq_idx];
  1861. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  1862. "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
  1863. (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
  1864. "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
  1865. "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
  1866. (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
  1867. (u32)(cmd->io_tbl.sge_tbl_dma),
  1868. (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
  1869. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1870. if (task_params.tx_io_size != 0)
  1871. ptx_sgl = &tx_sgl_task_params;
  1872. if (task_params.rx_io_size != 0)
  1873. prx_sgl = &rx_sgl_task_params;
  1874. rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
  1875. &cmd_params, &cmd_pdu_header,
  1876. ptx_sgl, prx_sgl,
  1877. NULL);
  1878. if (rval)
  1879. return -1;
  1880. spin_lock(&qedi_conn->list_lock);
  1881. list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
  1882. cmd->io_cmd_in_list = true;
  1883. qedi_conn->active_cmd_count++;
  1884. spin_unlock(&qedi_conn->list_lock);
  1885. qedi_ring_doorbell(qedi_conn);
  1886. return 0;
  1887. }
  1888. int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
  1889. {
  1890. struct iscsi_task_params task_params;
  1891. struct qedi_endpoint *ep;
  1892. struct iscsi_conn *conn = task->conn;
  1893. struct qedi_conn *qedi_conn = conn->dd_data;
  1894. struct qedi_cmd *cmd = task->dd_data;
  1895. u16 sq_idx = 0;
  1896. int rval = 0;
  1897. QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1898. "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
  1899. cmd->task_id, get_itt(task->itt), task->state,
  1900. cmd->state, qedi_conn->iscsi_conn_id);
  1901. memset(&task_params, 0, sizeof(task_params));
  1902. ep = qedi_conn->ep;
  1903. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1904. task_params.sqe = &ep->sq[sq_idx];
  1905. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1906. task_params.itid = cmd->task_id;
  1907. rval = init_cleanup_task(&task_params);
  1908. if (rval)
  1909. return rval;
  1910. qedi_ring_doorbell(qedi_conn);
  1911. return 0;
  1912. }