target_core_spc.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * SCSI Primary Commands (SPC) parsing and emulation.
  4. *
  5. * (c) Copyright 2002-2013 Datera, Inc.
  6. *
  7. * Nicholas A. Bellinger <nab@kernel.org>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/unaligned.h>
  12. #include <scsi/scsi_proto.h>
  13. #include <scsi/scsi_common.h>
  14. #include <scsi/scsi_tcq.h>
  15. #include <target/target_core_base.h>
  16. #include <target/target_core_backend.h>
  17. #include <target/target_core_fabric.h>
  18. #include "target_core_internal.h"
  19. #include "target_core_alua.h"
  20. #include "target_core_pr.h"
  21. #include "target_core_ua.h"
  22. #include "target_core_xcopy.h"
  23. static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
  24. {
  25. struct t10_alua_tg_pt_gp *tg_pt_gp;
  26. /*
  27. * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
  28. */
  29. buf[5] = 0x80;
  30. /*
  31. * Set TPGS field for explicit and/or implicit ALUA access type
  32. * and opteration.
  33. *
  34. * See spc4r17 section 6.4.2 Table 135
  35. */
  36. rcu_read_lock();
  37. tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
  38. if (tg_pt_gp)
  39. buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
  40. rcu_read_unlock();
  41. }
  42. static u16
  43. spc_find_scsi_transport_vd(int proto_id)
  44. {
  45. switch (proto_id) {
  46. case SCSI_PROTOCOL_FCP:
  47. return SCSI_VERSION_DESCRIPTOR_FCP4;
  48. case SCSI_PROTOCOL_ISCSI:
  49. return SCSI_VERSION_DESCRIPTOR_ISCSI;
  50. case SCSI_PROTOCOL_SAS:
  51. return SCSI_VERSION_DESCRIPTOR_SAS3;
  52. case SCSI_PROTOCOL_SBP:
  53. return SCSI_VERSION_DESCRIPTOR_SBP3;
  54. case SCSI_PROTOCOL_SRP:
  55. return SCSI_VERSION_DESCRIPTOR_SRP;
  56. default:
  57. pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
  58. " transport PROTOCOL IDENTIFIER %#x\n", proto_id);
  59. return 0;
  60. }
  61. }
  62. sense_reason_t
  63. spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
  64. {
  65. struct se_lun *lun = cmd->se_lun;
  66. struct se_portal_group *tpg = lun->lun_tpg;
  67. struct se_device *dev = cmd->se_dev;
  68. struct se_session *sess = cmd->se_sess;
  69. /* Set RMB (removable media) for tape devices */
  70. if (dev->transport->get_device_type(dev) == TYPE_TAPE)
  71. buf[1] = 0x80;
  72. buf[2] = 0x06; /* SPC-4 */
  73. /*
  74. * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
  75. *
  76. * SPC4 says:
  77. * A RESPONSE DATA FORMAT field set to 2h indicates that the
  78. * standard INQUIRY data is in the format defined in this
  79. * standard. Response data format values less than 2h are
  80. * obsolete. Response data format values greater than 2h are
  81. * reserved.
  82. */
  83. buf[3] = 2;
  84. /*
  85. * Enable SCCS and TPGS fields for Emulated ALUA
  86. */
  87. spc_fill_alua_data(lun, buf);
  88. /*
  89. * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
  90. */
  91. if (dev->dev_attrib.emulate_3pc)
  92. buf[5] |= 0x8;
  93. /*
  94. * Set Protection (PROTECT) bit when DIF has been enabled on the
  95. * device, and the fabric supports VERIFY + PASS. Also report
  96. * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
  97. * to unprotected devices.
  98. */
  99. if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
  100. if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
  101. buf[5] |= 0x1;
  102. }
  103. /*
  104. * Set MULTIP bit to indicate presence of multiple SCSI target ports
  105. */
  106. if (dev->export_count > 1)
  107. buf[6] |= 0x10;
  108. buf[7] = 0x2; /* CmdQue=1 */
  109. /*
  110. * ASCII data fields described as being left-aligned shall have any
  111. * unused bytes at the end of the field (i.e., highest offset) and the
  112. * unused bytes shall be filled with ASCII space characters (20h).
  113. */
  114. memset(&buf[8], 0x20,
  115. INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
  116. memcpy(&buf[8], dev->t10_wwn.vendor,
  117. strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
  118. memcpy(&buf[16], dev->t10_wwn.model,
  119. strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
  120. memcpy(&buf[32], dev->t10_wwn.revision,
  121. strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
  122. /*
  123. * Set the VERSION DESCRIPTOR fields
  124. */
  125. put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
  126. put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
  127. put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
  128. if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
  129. put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
  130. buf[4] = 91; /* Set additional length to 91 */
  131. return 0;
  132. }
  133. EXPORT_SYMBOL(spc_emulate_inquiry_std);
  134. /* unit serial number */
  135. static sense_reason_t
  136. spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
  137. {
  138. struct se_device *dev = cmd->se_dev;
  139. u16 len;
  140. if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
  141. len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
  142. len++; /* Extra Byte for NULL Terminator */
  143. buf[3] = len;
  144. }
  145. return 0;
  146. }
  147. /*
  148. * Generate NAA IEEE Registered Extended designator
  149. */
  150. void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
  151. unsigned char *buf)
  152. {
  153. unsigned char *p = &dev->t10_wwn.unit_serial[0];
  154. u32 company_id = dev->t10_wwn.company_id;
  155. int cnt, off = 0;
  156. bool next = true;
  157. /*
  158. * Start NAA IEEE Registered Extended Identifier/Designator
  159. */
  160. buf[off] = 0x6 << 4;
  161. /* IEEE COMPANY_ID */
  162. buf[off++] |= (company_id >> 20) & 0xf;
  163. buf[off++] = (company_id >> 12) & 0xff;
  164. buf[off++] = (company_id >> 4) & 0xff;
  165. buf[off] = (company_id & 0xf) << 4;
  166. /*
  167. * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
  168. * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
  169. * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
  170. * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
  171. * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
  172. * per device uniqeness.
  173. */
  174. for (cnt = off + 13; *p && off < cnt; p++) {
  175. int val = hex_to_bin(*p);
  176. if (val < 0)
  177. continue;
  178. if (next) {
  179. next = false;
  180. buf[off++] |= val;
  181. } else {
  182. next = true;
  183. buf[off] = val << 4;
  184. }
  185. }
  186. }
  187. /*
  188. * Device identification VPD, for a complete list of
  189. * DESIGNATOR TYPEs see spc4r17 Table 459.
  190. */
  191. sense_reason_t
  192. spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
  193. {
  194. struct se_device *dev = cmd->se_dev;
  195. struct se_lun *lun = cmd->se_lun;
  196. struct se_portal_group *tpg = NULL;
  197. struct t10_alua_lu_gp_member *lu_gp_mem;
  198. struct t10_alua_tg_pt_gp *tg_pt_gp;
  199. unsigned char *prod = &dev->t10_wwn.model[0];
  200. u32 off = 0;
  201. u16 len = 0, id_len;
  202. off = 4;
  203. /*
  204. * NAA IEEE Registered Extended Assigned designator format, see
  205. * spc4r17 section 7.7.3.6.5
  206. *
  207. * We depend upon a target_core_mod/ConfigFS provided
  208. * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
  209. * value in order to return the NAA id.
  210. */
  211. if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
  212. goto check_t10_vend_desc;
  213. /* CODE SET == Binary */
  214. buf[off++] = 0x1;
  215. /* Set ASSOCIATION == addressed logical unit: 0)b */
  216. buf[off] = 0x00;
  217. /* Identifier/Designator type == NAA identifier */
  218. buf[off++] |= 0x3;
  219. off++;
  220. /* Identifier/Designator length */
  221. buf[off++] = 0x10;
  222. /* NAA IEEE Registered Extended designator */
  223. spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
  224. len = 20;
  225. off = (len + 4);
  226. check_t10_vend_desc:
  227. /*
  228. * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
  229. */
  230. id_len = 8; /* For Vendor field */
  231. if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)
  232. id_len += sprintf(&buf[off+12], "%s:%s", prod,
  233. &dev->t10_wwn.unit_serial[0]);
  234. buf[off] = 0x2; /* ASCII */
  235. buf[off+1] = 0x1; /* T10 Vendor ID */
  236. buf[off+2] = 0x0;
  237. /* left align Vendor ID and pad with spaces */
  238. memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
  239. memcpy(&buf[off+4], dev->t10_wwn.vendor,
  240. strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
  241. /* Extra Byte for NULL Terminator */
  242. id_len++;
  243. /* Identifier Length */
  244. buf[off+3] = id_len;
  245. /* Header size for Designation descriptor */
  246. len += (id_len + 4);
  247. off += (id_len + 4);
  248. if (1) {
  249. struct t10_alua_lu_gp *lu_gp;
  250. u32 padding, scsi_name_len, scsi_target_len;
  251. u16 lu_gp_id = 0;
  252. u16 tg_pt_gp_id = 0;
  253. u16 tpgt;
  254. tpg = lun->lun_tpg;
  255. /*
  256. * Relative target port identifer, see spc4r17
  257. * section 7.7.3.7
  258. *
  259. * Get the PROTOCOL IDENTIFIER as defined by spc4r17
  260. * section 7.5.1 Table 362
  261. */
  262. buf[off] = tpg->proto_id << 4;
  263. buf[off++] |= 0x1; /* CODE SET == Binary */
  264. buf[off] = 0x80; /* Set PIV=1 */
  265. /* Set ASSOCIATION == target port: 01b */
  266. buf[off] |= 0x10;
  267. /* DESIGNATOR TYPE == Relative target port identifer */
  268. buf[off++] |= 0x4;
  269. off++; /* Skip over Reserved */
  270. buf[off++] = 4; /* DESIGNATOR LENGTH */
  271. /* Skip over Obsolete field in RTPI payload
  272. * in Table 472 */
  273. off += 2;
  274. put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
  275. off += 2;
  276. len += 8; /* Header size + Designation descriptor */
  277. /*
  278. * Target port group identifier, see spc4r17
  279. * section 7.7.3.8
  280. *
  281. * Get the PROTOCOL IDENTIFIER as defined by spc4r17
  282. * section 7.5.1 Table 362
  283. */
  284. rcu_read_lock();
  285. tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
  286. if (!tg_pt_gp) {
  287. rcu_read_unlock();
  288. goto check_lu_gp;
  289. }
  290. tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
  291. rcu_read_unlock();
  292. buf[off] = tpg->proto_id << 4;
  293. buf[off++] |= 0x1; /* CODE SET == Binary */
  294. buf[off] = 0x80; /* Set PIV=1 */
  295. /* Set ASSOCIATION == target port: 01b */
  296. buf[off] |= 0x10;
  297. /* DESIGNATOR TYPE == Target port group identifier */
  298. buf[off++] |= 0x5;
  299. off++; /* Skip over Reserved */
  300. buf[off++] = 4; /* DESIGNATOR LENGTH */
  301. off += 2; /* Skip over Reserved Field */
  302. put_unaligned_be16(tg_pt_gp_id, &buf[off]);
  303. off += 2;
  304. len += 8; /* Header size + Designation descriptor */
  305. /*
  306. * Logical Unit Group identifier, see spc4r17
  307. * section 7.7.3.8
  308. */
  309. check_lu_gp:
  310. lu_gp_mem = dev->dev_alua_lu_gp_mem;
  311. if (!lu_gp_mem)
  312. goto check_scsi_name;
  313. spin_lock(&lu_gp_mem->lu_gp_mem_lock);
  314. lu_gp = lu_gp_mem->lu_gp;
  315. if (!lu_gp) {
  316. spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
  317. goto check_scsi_name;
  318. }
  319. lu_gp_id = lu_gp->lu_gp_id;
  320. spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
  321. buf[off++] |= 0x1; /* CODE SET == Binary */
  322. /* DESIGNATOR TYPE == Logical Unit Group identifier */
  323. buf[off++] |= 0x6;
  324. off++; /* Skip over Reserved */
  325. buf[off++] = 4; /* DESIGNATOR LENGTH */
  326. off += 2; /* Skip over Reserved Field */
  327. put_unaligned_be16(lu_gp_id, &buf[off]);
  328. off += 2;
  329. len += 8; /* Header size + Designation descriptor */
  330. /*
  331. * SCSI name string designator, see spc4r17
  332. * section 7.7.3.11
  333. *
  334. * Get the PROTOCOL IDENTIFIER as defined by spc4r17
  335. * section 7.5.1 Table 362
  336. */
  337. check_scsi_name:
  338. buf[off] = tpg->proto_id << 4;
  339. buf[off++] |= 0x3; /* CODE SET == UTF-8 */
  340. buf[off] = 0x80; /* Set PIV=1 */
  341. /* Set ASSOCIATION == target port: 01b */
  342. buf[off] |= 0x10;
  343. /* DESIGNATOR TYPE == SCSI name string */
  344. buf[off++] |= 0x8;
  345. off += 2; /* Skip over Reserved and length */
  346. /*
  347. * SCSI name string identifer containing, $FABRIC_MOD
  348. * dependent information. For LIO-Target and iSCSI
  349. * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
  350. * UTF-8 encoding.
  351. */
  352. tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
  353. scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
  354. tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
  355. scsi_name_len += 1 /* Include NULL terminator */;
  356. /*
  357. * The null-terminated, null-padded (see 4.4.2) SCSI
  358. * NAME STRING field contains a UTF-8 format string.
  359. * The number of bytes in the SCSI NAME STRING field
  360. * (i.e., the value in the DESIGNATOR LENGTH field)
  361. * shall be no larger than 256 and shall be a multiple
  362. * of four.
  363. */
  364. padding = ((-scsi_name_len) & 3);
  365. if (padding)
  366. scsi_name_len += padding;
  367. if (scsi_name_len > 256)
  368. scsi_name_len = 256;
  369. buf[off-1] = scsi_name_len;
  370. off += scsi_name_len;
  371. /* Header size + Designation descriptor */
  372. len += (scsi_name_len + 4);
  373. /*
  374. * Target device designator
  375. */
  376. buf[off] = tpg->proto_id << 4;
  377. buf[off++] |= 0x3; /* CODE SET == UTF-8 */
  378. buf[off] = 0x80; /* Set PIV=1 */
  379. /* Set ASSOCIATION == target device: 10b */
  380. buf[off] |= 0x20;
  381. /* DESIGNATOR TYPE == SCSI name string */
  382. buf[off++] |= 0x8;
  383. off += 2; /* Skip over Reserved and length */
  384. /*
  385. * SCSI name string identifer containing, $FABRIC_MOD
  386. * dependent information. For LIO-Target and iSCSI
  387. * Target Port, this means "<iSCSI name>" in
  388. * UTF-8 encoding.
  389. */
  390. scsi_target_len = sprintf(&buf[off], "%s",
  391. tpg->se_tpg_tfo->tpg_get_wwn(tpg));
  392. scsi_target_len += 1 /* Include NULL terminator */;
  393. /*
  394. * The null-terminated, null-padded (see 4.4.2) SCSI
  395. * NAME STRING field contains a UTF-8 format string.
  396. * The number of bytes in the SCSI NAME STRING field
  397. * (i.e., the value in the DESIGNATOR LENGTH field)
  398. * shall be no larger than 256 and shall be a multiple
  399. * of four.
  400. */
  401. padding = ((-scsi_target_len) & 3);
  402. if (padding)
  403. scsi_target_len += padding;
  404. if (scsi_target_len > 256)
  405. scsi_target_len = 256;
  406. buf[off-1] = scsi_target_len;
  407. off += scsi_target_len;
  408. /* Header size + Designation descriptor */
  409. len += (scsi_target_len + 4);
  410. }
  411. put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
  412. return 0;
  413. }
  414. EXPORT_SYMBOL(spc_emulate_evpd_83);
  415. /* Extended INQUIRY Data VPD Page */
  416. static sense_reason_t
  417. spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
  418. {
  419. struct se_device *dev = cmd->se_dev;
  420. struct se_session *sess = cmd->se_sess;
  421. buf[3] = 0x3c;
  422. /*
  423. * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
  424. * only for TYPE3 protection.
  425. */
  426. if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
  427. if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
  428. cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
  429. buf[4] = 0x5;
  430. else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
  431. cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
  432. buf[4] = 0x4;
  433. }
  434. /* logical unit supports type 1 and type 3 protection */
  435. if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
  436. (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
  437. (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
  438. buf[4] |= (0x3 << 3);
  439. }
  440. /* Set HEADSUP, ORDSUP, SIMPSUP */
  441. buf[5] = 0x07;
  442. /* If WriteCache emulation is enabled, set V_SUP */
  443. if (target_check_wce(dev))
  444. buf[6] = 0x01;
  445. /* If an LBA map is present set R_SUP */
  446. spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
  447. if (!list_empty(&dev->t10_alua.lba_map_list))
  448. buf[8] = 0x10;
  449. spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
  450. return 0;
  451. }
  452. /* Block Limits VPD page */
  453. static sense_reason_t
  454. spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
  455. {
  456. struct se_device *dev = cmd->se_dev;
  457. u32 mtl = 0;
  458. int have_tp = 0, opt, min;
  459. u32 io_max_blocks;
  460. /*
  461. * Following spc3r22 section 6.5.3 Block Limits VPD page, when
  462. * emulate_tpu=1 or emulate_tpws=1 we will be expect a
  463. * different page length for Thin Provisioning.
  464. */
  465. if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
  466. have_tp = 1;
  467. buf[0] = dev->transport->get_device_type(dev);
  468. buf[3] = have_tp ? 0x3c : 0x10;
  469. /* Set WSNZ to 1 */
  470. buf[4] = 0x01;
  471. /*
  472. * Set MAXIMUM COMPARE AND WRITE LENGTH
  473. */
  474. if (dev->dev_attrib.emulate_caw)
  475. buf[5] = 0x01;
  476. /*
  477. * Set OPTIMAL TRANSFER LENGTH GRANULARITY
  478. */
  479. if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
  480. put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
  481. else
  482. put_unaligned_be16(1, &buf[6]);
  483. /*
  484. * Set MAXIMUM TRANSFER LENGTH
  485. *
  486. * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
  487. * enforcing maximum HW scatter-gather-list entry limit
  488. */
  489. if (cmd->se_tfo->max_data_sg_nents) {
  490. mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
  491. dev->dev_attrib.block_size;
  492. }
  493. io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
  494. dev->dev_attrib.hw_block_size,
  495. dev->dev_attrib.block_size);
  496. put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]);
  497. /*
  498. * Set OPTIMAL TRANSFER LENGTH
  499. */
  500. if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
  501. put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
  502. else
  503. put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
  504. /*
  505. * Exit now if we don't support TP.
  506. */
  507. if (!have_tp)
  508. goto max_write_same;
  509. /*
  510. * Set MAXIMUM UNMAP LBA COUNT
  511. */
  512. put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
  513. /*
  514. * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
  515. */
  516. put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
  517. &buf[24]);
  518. /*
  519. * Set OPTIMAL UNMAP GRANULARITY
  520. */
  521. put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
  522. /*
  523. * UNMAP GRANULARITY ALIGNMENT
  524. */
  525. put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
  526. &buf[32]);
  527. if (dev->dev_attrib.unmap_granularity_alignment != 0)
  528. buf[32] |= 0x80; /* Set the UGAVALID bit */
  529. /*
  530. * MAXIMUM WRITE SAME LENGTH
  531. */
  532. max_write_same:
  533. put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
  534. return 0;
  535. }
  536. /* Block Device Characteristics VPD page */
  537. static sense_reason_t
  538. spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
  539. {
  540. struct se_device *dev = cmd->se_dev;
  541. buf[0] = dev->transport->get_device_type(dev);
  542. buf[3] = 0x3c;
  543. buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
  544. return 0;
  545. }
  546. /* Thin Provisioning VPD */
  547. static sense_reason_t
  548. spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
  549. {
  550. struct se_device *dev = cmd->se_dev;
  551. /*
  552. * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
  553. *
  554. * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
  555. * zero, then the page length shall be set to 0004h. If the DP bit
  556. * is set to one, then the page length shall be set to the value
  557. * defined in table 162.
  558. */
  559. buf[0] = dev->transport->get_device_type(dev);
  560. /*
  561. * Set Hardcoded length mentioned above for DP=0
  562. */
  563. put_unaligned_be16(0x0004, &buf[2]);
  564. /*
  565. * The THRESHOLD EXPONENT field indicates the threshold set size in
  566. * LBAs as a power of 2 (i.e., the threshold set size is equal to
  567. * 2(threshold exponent)).
  568. *
  569. * Note that this is currently set to 0x00 as mkp says it will be
  570. * changing again. We can enable this once it has settled in T10
  571. * and is actually used by Linux/SCSI ML code.
  572. */
  573. buf[4] = 0x00;
  574. /*
  575. * A TPU bit set to one indicates that the device server supports
  576. * the UNMAP command (see 5.25). A TPU bit set to zero indicates
  577. * that the device server does not support the UNMAP command.
  578. */
  579. if (dev->dev_attrib.emulate_tpu != 0)
  580. buf[5] = 0x80;
  581. /*
  582. * A TPWS bit set to one indicates that the device server supports
  583. * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
  584. * A TPWS bit set to zero indicates that the device server does not
  585. * support the use of the WRITE SAME (16) command to unmap LBAs.
  586. */
  587. if (dev->dev_attrib.emulate_tpws != 0)
  588. buf[5] |= 0x40 | 0x20;
  589. /*
  590. * The unmap_zeroes_data set means that the underlying device supports
  591. * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
  592. * satisfies the SBC requirements for LBPRZ, meaning that a subsequent
  593. * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
  594. * See sbc4r36 6.6.4.
  595. */
  596. if (((dev->dev_attrib.emulate_tpu != 0) ||
  597. (dev->dev_attrib.emulate_tpws != 0)) &&
  598. (dev->dev_attrib.unmap_zeroes_data != 0))
  599. buf[5] |= 0x04;
  600. return 0;
  601. }
  602. /* Referrals VPD page */
  603. static sense_reason_t
  604. spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
  605. {
  606. struct se_device *dev = cmd->se_dev;
  607. buf[0] = dev->transport->get_device_type(dev);
  608. buf[3] = 0x0c;
  609. put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
  610. put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
  611. return 0;
  612. }
  613. static sense_reason_t
  614. spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
  615. static struct {
  616. uint8_t page;
  617. sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
  618. } evpd_handlers[] = {
  619. { .page = 0x00, .emulate = spc_emulate_evpd_00 },
  620. { .page = 0x80, .emulate = spc_emulate_evpd_80 },
  621. { .page = 0x83, .emulate = spc_emulate_evpd_83 },
  622. { .page = 0x86, .emulate = spc_emulate_evpd_86 },
  623. { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
  624. { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
  625. { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
  626. { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
  627. };
  628. /* supported vital product data pages */
  629. static sense_reason_t
  630. spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
  631. {
  632. int p;
  633. /*
  634. * Only report the INQUIRY EVPD=1 pages after a valid NAA
  635. * Registered Extended LUN WWN has been set via ConfigFS
  636. * during device creation/restart.
  637. */
  638. if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
  639. buf[3] = ARRAY_SIZE(evpd_handlers);
  640. for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
  641. buf[p + 4] = evpd_handlers[p].page;
  642. }
  643. return 0;
  644. }
  645. static sense_reason_t
  646. spc_emulate_inquiry(struct se_cmd *cmd)
  647. {
  648. struct se_device *dev = cmd->se_dev;
  649. unsigned char *rbuf;
  650. unsigned char *cdb = cmd->t_task_cdb;
  651. unsigned char *buf;
  652. sense_reason_t ret;
  653. int p;
  654. int len = 0;
  655. buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
  656. if (!buf) {
  657. pr_err("Unable to allocate response buffer for INQUIRY\n");
  658. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  659. }
  660. buf[0] = dev->transport->get_device_type(dev);
  661. if (!(cdb[1] & 0x1)) {
  662. if (cdb[2]) {
  663. pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
  664. cdb[2]);
  665. ret = TCM_INVALID_CDB_FIELD;
  666. goto out;
  667. }
  668. ret = spc_emulate_inquiry_std(cmd, buf);
  669. len = buf[4] + 5;
  670. goto out;
  671. }
  672. for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
  673. if (cdb[2] == evpd_handlers[p].page) {
  674. buf[1] = cdb[2];
  675. ret = evpd_handlers[p].emulate(cmd, buf);
  676. len = get_unaligned_be16(&buf[2]) + 4;
  677. goto out;
  678. }
  679. }
  680. pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]);
  681. ret = TCM_INVALID_CDB_FIELD;
  682. out:
  683. rbuf = transport_kmap_data_sg(cmd);
  684. if (rbuf) {
  685. memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
  686. transport_kunmap_data_sg(cmd);
  687. }
  688. kfree(buf);
  689. if (!ret)
  690. target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
  691. return ret;
  692. }
  693. static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
  694. {
  695. p[0] = 0x01;
  696. p[1] = 0x0a;
  697. /* No changeable values for now */
  698. if (pc == 1)
  699. goto out;
  700. out:
  701. return 12;
  702. }
  703. static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
  704. {
  705. struct se_device *dev = cmd->se_dev;
  706. struct se_session *sess = cmd->se_sess;
  707. p[0] = 0x0a;
  708. p[1] = 0x0a;
  709. /* No changeable values for now */
  710. if (pc == 1)
  711. goto out;
  712. /* GLTSD: No implicit save of log parameters */
  713. p[2] = (1 << 1);
  714. if (target_sense_desc_format(dev))
  715. /* D_SENSE: Descriptor format sense data for 64bit sectors */
  716. p[2] |= (1 << 2);
  717. /*
  718. * From spc4r23, 7.4.7 Control mode page
  719. *
  720. * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
  721. * restrictions on the algorithm used for reordering commands
  722. * having the SIMPLE task attribute (see SAM-4).
  723. *
  724. * Table 368 -- QUEUE ALGORITHM MODIFIER field
  725. * Code Description
  726. * 0h Restricted reordering
  727. * 1h Unrestricted reordering allowed
  728. * 2h to 7h Reserved
  729. * 8h to Fh Vendor specific
  730. *
  731. * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
  732. * the device server shall order the processing sequence of commands
  733. * having the SIMPLE task attribute such that data integrity is maintained
  734. * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
  735. * requests is halted at any time, the final value of all data observable
  736. * on the medium shall be the same as if all the commands had been processed
  737. * with the ORDERED task attribute).
  738. *
  739. * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
  740. * device server may reorder the processing sequence of commands having the
  741. * SIMPLE task attribute in any manner. Any data integrity exposures related to
  742. * command sequence order shall be explicitly handled by the application client
  743. * through the selection of appropriate ommands and task attributes.
  744. */
  745. p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
  746. /*
  747. * From spc4r17, section 7.4.6 Control mode Page
  748. *
  749. * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
  750. *
  751. * 00b: The logical unit shall clear any unit attention condition
  752. * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
  753. * status and shall not establish a unit attention condition when a com-
  754. * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
  755. * status.
  756. *
  757. * 10b: The logical unit shall not clear any unit attention condition
  758. * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
  759. * status and shall not establish a unit attention condition when
  760. * a command is completed with BUSY, TASK SET FULL, or RESERVATION
  761. * CONFLICT status.
  762. *
  763. * 11b a The logical unit shall not clear any unit attention condition
  764. * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
  765. * status and shall establish a unit attention condition for the
  766. * initiator port associated with the I_T nexus on which the BUSY,
  767. * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
  768. * Depending on the status, the additional sense code shall be set to
  769. * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
  770. * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
  771. * command, a unit attention condition shall be established only once
  772. * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
  773. * to the number of commands completed with one of those status codes.
  774. */
  775. switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
  776. case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
  777. p[4] = 0x30;
  778. break;
  779. case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
  780. p[4] = 0x20;
  781. break;
  782. default: /* TARGET_UA_INTLCK_CTRL_CLEAR */
  783. p[4] = 0x00;
  784. break;
  785. }
  786. /*
  787. * From spc4r17, section 7.4.6 Control mode Page
  788. *
  789. * Task Aborted Status (TAS) bit set to zero.
  790. *
  791. * A task aborted status (TAS) bit set to zero specifies that aborted
  792. * tasks shall be terminated by the device server without any response
  793. * to the application client. A TAS bit set to one specifies that tasks
  794. * aborted by the actions of an I_T nexus other than the I_T nexus on
  795. * which the command was received shall be completed with TASK ABORTED
  796. * status (see SAM-4).
  797. */
  798. p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
  799. /*
  800. * From spc4r30, section 7.5.7 Control mode page
  801. *
  802. * Application Tag Owner (ATO) bit set to one.
  803. *
  804. * If the ATO bit is set to one the device server shall not modify the
  805. * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
  806. * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
  807. * TAG field.
  808. */
  809. if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
  810. if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
  811. p[5] |= 0x80;
  812. }
  813. p[8] = 0xff;
  814. p[9] = 0xff;
  815. p[11] = 30;
  816. out:
  817. return 12;
  818. }
  819. static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
  820. {
  821. struct se_device *dev = cmd->se_dev;
  822. p[0] = 0x08;
  823. p[1] = 0x12;
  824. /* No changeable values for now */
  825. if (pc == 1)
  826. goto out;
  827. if (target_check_wce(dev))
  828. p[2] = 0x04; /* Write Cache Enable */
  829. p[12] = 0x20; /* Disabled Read Ahead */
  830. out:
  831. return 20;
  832. }
  833. static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
  834. {
  835. p[0] = 0x1c;
  836. p[1] = 0x0a;
  837. /* No changeable values for now */
  838. if (pc == 1)
  839. goto out;
  840. out:
  841. return 12;
  842. }
  843. static struct {
  844. uint8_t page;
  845. uint8_t subpage;
  846. int (*emulate)(struct se_cmd *, u8, unsigned char *);
  847. } modesense_handlers[] = {
  848. { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
  849. { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
  850. { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
  851. { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
  852. };
  853. static void spc_modesense_write_protect(unsigned char *buf, int type)
  854. {
  855. /*
  856. * I believe that the WP bit (bit 7) in the mode header is the same for
  857. * all device types..
  858. */
  859. switch (type) {
  860. case TYPE_DISK:
  861. case TYPE_TAPE:
  862. default:
  863. buf[0] |= 0x80; /* WP bit */
  864. break;
  865. }
  866. }
  867. static void spc_modesense_dpofua(unsigned char *buf, int type)
  868. {
  869. switch (type) {
  870. case TYPE_DISK:
  871. buf[0] |= 0x10; /* DPOFUA bit */
  872. break;
  873. default:
  874. break;
  875. }
  876. }
  877. static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
  878. {
  879. *buf++ = 8;
  880. put_unaligned_be32(min(blocks, 0xffffffffull), buf);
  881. buf += 4;
  882. put_unaligned_be32(block_size, buf);
  883. return 9;
  884. }
  885. static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
  886. {
  887. if (blocks <= 0xffffffff)
  888. return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
  889. *buf++ = 1; /* LONGLBA */
  890. buf += 2;
  891. *buf++ = 16;
  892. put_unaligned_be64(blocks, buf);
  893. buf += 12;
  894. put_unaligned_be32(block_size, buf);
  895. return 17;
  896. }
  897. static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
  898. {
  899. struct se_device *dev = cmd->se_dev;
  900. char *cdb = cmd->t_task_cdb;
  901. unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
  902. int type = dev->transport->get_device_type(dev);
  903. int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
  904. bool dbd = !!(cdb[1] & 0x08);
  905. bool llba = ten ? !!(cdb[1] & 0x10) : false;
  906. u8 pc = cdb[2] >> 6;
  907. u8 page = cdb[2] & 0x3f;
  908. u8 subpage = cdb[3];
  909. int length = 0;
  910. int ret;
  911. int i;
  912. memset(buf, 0, SE_MODE_PAGE_BUF);
  913. /*
  914. * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
  915. * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
  916. */
  917. length = ten ? 3 : 2;
  918. /* DEVICE-SPECIFIC PARAMETER */
  919. if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
  920. spc_modesense_write_protect(&buf[length], type);
  921. /*
  922. * SBC only allows us to enable FUA and DPO together. Fortunately
  923. * DPO is explicitly specified as a hint, so a noop is a perfectly
  924. * valid implementation.
  925. */
  926. if (target_check_fua(dev))
  927. spc_modesense_dpofua(&buf[length], type);
  928. ++length;
  929. /* BLOCK DESCRIPTOR */
  930. /*
  931. * For now we only include a block descriptor for disk (SBC)
  932. * devices; other command sets use a slightly different format.
  933. */
  934. if (!dbd && type == TYPE_DISK) {
  935. u64 blocks = dev->transport->get_blocks(dev);
  936. u32 block_size = dev->dev_attrib.block_size;
  937. if (ten) {
  938. if (llba) {
  939. length += spc_modesense_long_blockdesc(&buf[length],
  940. blocks, block_size);
  941. } else {
  942. length += 3;
  943. length += spc_modesense_blockdesc(&buf[length],
  944. blocks, block_size);
  945. }
  946. } else {
  947. length += spc_modesense_blockdesc(&buf[length], blocks,
  948. block_size);
  949. }
  950. } else {
  951. if (ten)
  952. length += 4;
  953. else
  954. length += 1;
  955. }
  956. if (page == 0x3f) {
  957. if (subpage != 0x00 && subpage != 0xff) {
  958. pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
  959. return TCM_INVALID_CDB_FIELD;
  960. }
  961. for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
  962. /*
  963. * Tricky way to say all subpage 00h for
  964. * subpage==0, all subpages for subpage==0xff
  965. * (and we just checked above that those are
  966. * the only two possibilities).
  967. */
  968. if ((modesense_handlers[i].subpage & ~subpage) == 0) {
  969. ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
  970. if (!ten && length + ret >= 255)
  971. break;
  972. length += ret;
  973. }
  974. }
  975. goto set_length;
  976. }
  977. for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
  978. if (modesense_handlers[i].page == page &&
  979. modesense_handlers[i].subpage == subpage) {
  980. length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
  981. goto set_length;
  982. }
  983. /*
  984. * We don't intend to implement:
  985. * - obsolete page 03h "format parameters" (checked by Solaris)
  986. */
  987. if (page != 0x03)
  988. pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
  989. page, subpage);
  990. return TCM_UNKNOWN_MODE_PAGE;
  991. set_length:
  992. if (ten)
  993. put_unaligned_be16(length - 2, buf);
  994. else
  995. buf[0] = length - 1;
  996. rbuf = transport_kmap_data_sg(cmd);
  997. if (rbuf) {
  998. memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
  999. transport_kunmap_data_sg(cmd);
  1000. }
  1001. target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
  1002. return 0;
  1003. }
  1004. static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
  1005. {
  1006. char *cdb = cmd->t_task_cdb;
  1007. bool ten = cdb[0] == MODE_SELECT_10;
  1008. int off = ten ? 8 : 4;
  1009. bool pf = !!(cdb[1] & 0x10);
  1010. u8 page, subpage;
  1011. unsigned char *buf;
  1012. unsigned char tbuf[SE_MODE_PAGE_BUF];
  1013. int length;
  1014. sense_reason_t ret = 0;
  1015. int i;
  1016. if (!cmd->data_length) {
  1017. target_complete_cmd(cmd, SAM_STAT_GOOD);
  1018. return 0;
  1019. }
  1020. if (cmd->data_length < off + 2)
  1021. return TCM_PARAMETER_LIST_LENGTH_ERROR;
  1022. buf = transport_kmap_data_sg(cmd);
  1023. if (!buf)
  1024. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1025. if (!pf) {
  1026. ret = TCM_INVALID_CDB_FIELD;
  1027. goto out;
  1028. }
  1029. page = buf[off] & 0x3f;
  1030. subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
  1031. for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
  1032. if (modesense_handlers[i].page == page &&
  1033. modesense_handlers[i].subpage == subpage) {
  1034. memset(tbuf, 0, SE_MODE_PAGE_BUF);
  1035. length = modesense_handlers[i].emulate(cmd, 0, tbuf);
  1036. goto check_contents;
  1037. }
  1038. ret = TCM_UNKNOWN_MODE_PAGE;
  1039. goto out;
  1040. check_contents:
  1041. if (cmd->data_length < off + length) {
  1042. ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
  1043. goto out;
  1044. }
  1045. if (memcmp(buf + off, tbuf, length))
  1046. ret = TCM_INVALID_PARAMETER_LIST;
  1047. out:
  1048. transport_kunmap_data_sg(cmd);
  1049. if (!ret)
  1050. target_complete_cmd(cmd, SAM_STAT_GOOD);
  1051. return ret;
  1052. }
  1053. static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
  1054. {
  1055. unsigned char *cdb = cmd->t_task_cdb;
  1056. unsigned char *rbuf;
  1057. u8 ua_asc = 0, ua_ascq = 0;
  1058. unsigned char buf[SE_SENSE_BUF];
  1059. bool desc_format = target_sense_desc_format(cmd->se_dev);
  1060. memset(buf, 0, SE_SENSE_BUF);
  1061. if (cdb[1] & 0x01) {
  1062. pr_err("REQUEST_SENSE description emulation not"
  1063. " supported\n");
  1064. return TCM_INVALID_CDB_FIELD;
  1065. }
  1066. rbuf = transport_kmap_data_sg(cmd);
  1067. if (!rbuf)
  1068. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1069. if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
  1070. scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
  1071. ua_asc, ua_ascq);
  1072. else
  1073. scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
  1074. memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
  1075. transport_kunmap_data_sg(cmd);
  1076. target_complete_cmd(cmd, SAM_STAT_GOOD);
  1077. return 0;
  1078. }
  1079. sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
  1080. {
  1081. struct se_dev_entry *deve;
  1082. struct se_session *sess = cmd->se_sess;
  1083. struct se_node_acl *nacl;
  1084. struct scsi_lun slun;
  1085. unsigned char *buf;
  1086. u32 lun_count = 0, offset = 8;
  1087. __be32 len;
  1088. buf = transport_kmap_data_sg(cmd);
  1089. if (cmd->data_length && !buf)
  1090. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1091. /*
  1092. * If no struct se_session pointer is present, this struct se_cmd is
  1093. * coming via a target_core_mod PASSTHROUGH op, and not through
  1094. * a $FABRIC_MOD. In that case, report LUN=0 only.
  1095. */
  1096. if (!sess)
  1097. goto done;
  1098. nacl = sess->se_node_acl;
  1099. rcu_read_lock();
  1100. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  1101. /*
  1102. * We determine the correct LUN LIST LENGTH even once we
  1103. * have reached the initial allocation length.
  1104. * See SPC2-R20 7.19.
  1105. */
  1106. lun_count++;
  1107. if (offset >= cmd->data_length)
  1108. continue;
  1109. int_to_scsilun(deve->mapped_lun, &slun);
  1110. memcpy(buf + offset, &slun,
  1111. min(8u, cmd->data_length - offset));
  1112. offset += 8;
  1113. }
  1114. rcu_read_unlock();
  1115. /*
  1116. * See SPC3 r07, page 159.
  1117. */
  1118. done:
  1119. /*
  1120. * If no LUNs are accessible, report virtual LUN 0.
  1121. */
  1122. if (lun_count == 0) {
  1123. int_to_scsilun(0, &slun);
  1124. if (cmd->data_length > 8)
  1125. memcpy(buf + offset, &slun,
  1126. min(8u, cmd->data_length - offset));
  1127. lun_count = 1;
  1128. }
  1129. if (buf) {
  1130. len = cpu_to_be32(lun_count * 8);
  1131. memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
  1132. transport_kunmap_data_sg(cmd);
  1133. }
  1134. target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
  1135. return 0;
  1136. }
  1137. EXPORT_SYMBOL(spc_emulate_report_luns);
  1138. static sense_reason_t
  1139. spc_emulate_testunitready(struct se_cmd *cmd)
  1140. {
  1141. target_complete_cmd(cmd, SAM_STAT_GOOD);
  1142. return 0;
  1143. }
  1144. static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev)
  1145. {
  1146. if (!target_check_fua(dev))
  1147. usage_bits[1] &= ~0x18;
  1148. else
  1149. usage_bits[1] |= 0x18;
  1150. }
  1151. static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
  1152. {
  1153. if (!target_check_fua(dev))
  1154. usage_bits[10] &= ~0x18;
  1155. else
  1156. usage_bits[10] |= 0x18;
  1157. }
  1158. static struct target_opcode_descriptor tcm_opcode_read6 = {
  1159. .support = SCSI_SUPPORT_FULL,
  1160. .opcode = READ_6,
  1161. .cdb_size = 6,
  1162. .usage_bits = {READ_6, 0x1f, 0xff, 0xff,
  1163. 0xff, SCSI_CONTROL_MASK},
  1164. };
  1165. static struct target_opcode_descriptor tcm_opcode_read10 = {
  1166. .support = SCSI_SUPPORT_FULL,
  1167. .opcode = READ_10,
  1168. .cdb_size = 10,
  1169. .usage_bits = {READ_10, 0xf8, 0xff, 0xff,
  1170. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
  1171. 0xff, SCSI_CONTROL_MASK},
  1172. .update_usage_bits = set_dpofua_usage_bits,
  1173. };
  1174. static struct target_opcode_descriptor tcm_opcode_read12 = {
  1175. .support = SCSI_SUPPORT_FULL,
  1176. .opcode = READ_12,
  1177. .cdb_size = 12,
  1178. .usage_bits = {READ_12, 0xf8, 0xff, 0xff,
  1179. 0xff, 0xff, 0xff, 0xff,
  1180. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1181. .update_usage_bits = set_dpofua_usage_bits,
  1182. };
  1183. static struct target_opcode_descriptor tcm_opcode_read16 = {
  1184. .support = SCSI_SUPPORT_FULL,
  1185. .opcode = READ_16,
  1186. .cdb_size = 16,
  1187. .usage_bits = {READ_16, 0xf8, 0xff, 0xff,
  1188. 0xff, 0xff, 0xff, 0xff,
  1189. 0xff, 0xff, 0xff, 0xff,
  1190. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1191. .update_usage_bits = set_dpofua_usage_bits,
  1192. };
  1193. static struct target_opcode_descriptor tcm_opcode_write6 = {
  1194. .support = SCSI_SUPPORT_FULL,
  1195. .opcode = WRITE_6,
  1196. .cdb_size = 6,
  1197. .usage_bits = {WRITE_6, 0x1f, 0xff, 0xff,
  1198. 0xff, SCSI_CONTROL_MASK},
  1199. };
  1200. static struct target_opcode_descriptor tcm_opcode_write10 = {
  1201. .support = SCSI_SUPPORT_FULL,
  1202. .opcode = WRITE_10,
  1203. .cdb_size = 10,
  1204. .usage_bits = {WRITE_10, 0xf8, 0xff, 0xff,
  1205. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
  1206. 0xff, SCSI_CONTROL_MASK},
  1207. .update_usage_bits = set_dpofua_usage_bits,
  1208. };
  1209. static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
  1210. .support = SCSI_SUPPORT_FULL,
  1211. .opcode = WRITE_VERIFY,
  1212. .cdb_size = 10,
  1213. .usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff,
  1214. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
  1215. 0xff, SCSI_CONTROL_MASK},
  1216. .update_usage_bits = set_dpofua_usage_bits,
  1217. };
  1218. static struct target_opcode_descriptor tcm_opcode_write12 = {
  1219. .support = SCSI_SUPPORT_FULL,
  1220. .opcode = WRITE_12,
  1221. .cdb_size = 12,
  1222. .usage_bits = {WRITE_12, 0xf8, 0xff, 0xff,
  1223. 0xff, 0xff, 0xff, 0xff,
  1224. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1225. .update_usage_bits = set_dpofua_usage_bits,
  1226. };
  1227. static struct target_opcode_descriptor tcm_opcode_write16 = {
  1228. .support = SCSI_SUPPORT_FULL,
  1229. .opcode = WRITE_16,
  1230. .cdb_size = 16,
  1231. .usage_bits = {WRITE_16, 0xf8, 0xff, 0xff,
  1232. 0xff, 0xff, 0xff, 0xff,
  1233. 0xff, 0xff, 0xff, 0xff,
  1234. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1235. .update_usage_bits = set_dpofua_usage_bits,
  1236. };
  1237. static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
  1238. .support = SCSI_SUPPORT_FULL,
  1239. .opcode = WRITE_VERIFY_16,
  1240. .cdb_size = 16,
  1241. .usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff,
  1242. 0xff, 0xff, 0xff, 0xff,
  1243. 0xff, 0xff, 0xff, 0xff,
  1244. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1245. .update_usage_bits = set_dpofua_usage_bits,
  1246. };
  1247. static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
  1248. struct se_cmd *cmd)
  1249. {
  1250. struct exec_cmd_ops *ops = cmd->protocol_data;
  1251. struct se_device *dev = cmd->se_dev;
  1252. return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
  1253. !!ops->execute_write_same;
  1254. }
  1255. static struct target_opcode_descriptor tcm_opcode_write_same32 = {
  1256. .support = SCSI_SUPPORT_FULL,
  1257. .serv_action_valid = 1,
  1258. .opcode = VARIABLE_LENGTH_CMD,
  1259. .service_action = WRITE_SAME_32,
  1260. .cdb_size = 32,
  1261. .usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00,
  1262. 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18,
  1263. 0x00, WRITE_SAME_32, 0xe8, 0x00,
  1264. 0xff, 0xff, 0xff, 0xff,
  1265. 0xff, 0xff, 0xff, 0xff,
  1266. 0x00, 0x00, 0x00, 0x00,
  1267. 0x00, 0x00, 0x00, 0x00,
  1268. 0xff, 0xff, 0xff, 0xff},
  1269. .enabled = tcm_is_ws_enabled,
  1270. .update_usage_bits = set_dpofua_usage_bits32,
  1271. };
  1272. static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
  1273. struct se_cmd *cmd)
  1274. {
  1275. struct se_device *dev = cmd->se_dev;
  1276. return dev->dev_attrib.emulate_caw;
  1277. }
  1278. static struct target_opcode_descriptor tcm_opcode_compare_write = {
  1279. .support = SCSI_SUPPORT_FULL,
  1280. .opcode = COMPARE_AND_WRITE,
  1281. .cdb_size = 16,
  1282. .usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff,
  1283. 0xff, 0xff, 0xff, 0xff,
  1284. 0xff, 0xff, 0x00, 0x00,
  1285. 0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1286. .enabled = tcm_is_caw_enabled,
  1287. .update_usage_bits = set_dpofua_usage_bits,
  1288. };
  1289. static struct target_opcode_descriptor tcm_opcode_read_capacity = {
  1290. .support = SCSI_SUPPORT_FULL,
  1291. .opcode = READ_CAPACITY,
  1292. .cdb_size = 10,
  1293. .usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff,
  1294. 0xff, 0xff, 0x00, 0x00,
  1295. 0x01, SCSI_CONTROL_MASK},
  1296. };
  1297. static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
  1298. .support = SCSI_SUPPORT_FULL,
  1299. .serv_action_valid = 1,
  1300. .opcode = SERVICE_ACTION_IN_16,
  1301. .service_action = SAI_READ_CAPACITY_16,
  1302. .cdb_size = 16,
  1303. .usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00,
  1304. 0x00, 0x00, 0x00, 0x00,
  1305. 0x00, 0x00, 0xff, 0xff,
  1306. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1307. };
  1308. static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
  1309. struct se_cmd *cmd)
  1310. {
  1311. struct se_device *dev = cmd->se_dev;
  1312. spin_lock(&dev->t10_alua.lba_map_lock);
  1313. if (list_empty(&dev->t10_alua.lba_map_list)) {
  1314. spin_unlock(&dev->t10_alua.lba_map_lock);
  1315. return false;
  1316. }
  1317. spin_unlock(&dev->t10_alua.lba_map_lock);
  1318. return true;
  1319. }
  1320. static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
  1321. .support = SCSI_SUPPORT_FULL,
  1322. .serv_action_valid = 1,
  1323. .opcode = SERVICE_ACTION_IN_16,
  1324. .service_action = SAI_REPORT_REFERRALS,
  1325. .cdb_size = 16,
  1326. .usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00,
  1327. 0x00, 0x00, 0x00, 0x00,
  1328. 0x00, 0x00, 0xff, 0xff,
  1329. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1330. .enabled = tcm_is_rep_ref_enabled,
  1331. };
  1332. static struct target_opcode_descriptor tcm_opcode_sync_cache = {
  1333. .support = SCSI_SUPPORT_FULL,
  1334. .opcode = SYNCHRONIZE_CACHE,
  1335. .cdb_size = 10,
  1336. .usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff,
  1337. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
  1338. 0xff, SCSI_CONTROL_MASK},
  1339. };
  1340. static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
  1341. .support = SCSI_SUPPORT_FULL,
  1342. .opcode = SYNCHRONIZE_CACHE_16,
  1343. .cdb_size = 16,
  1344. .usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff,
  1345. 0xff, 0xff, 0xff, 0xff,
  1346. 0xff, 0xff, 0xff, 0xff,
  1347. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1348. };
  1349. static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
  1350. struct se_cmd *cmd)
  1351. {
  1352. struct exec_cmd_ops *ops = cmd->protocol_data;
  1353. struct se_device *dev = cmd->se_dev;
  1354. return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
  1355. }
  1356. static struct target_opcode_descriptor tcm_opcode_unmap = {
  1357. .support = SCSI_SUPPORT_FULL,
  1358. .opcode = UNMAP,
  1359. .cdb_size = 10,
  1360. .usage_bits = {UNMAP, 0x00, 0x00, 0x00,
  1361. 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff,
  1362. 0xff, SCSI_CONTROL_MASK},
  1363. .enabled = tcm_is_unmap_enabled,
  1364. };
  1365. static struct target_opcode_descriptor tcm_opcode_write_same = {
  1366. .support = SCSI_SUPPORT_FULL,
  1367. .opcode = WRITE_SAME,
  1368. .cdb_size = 10,
  1369. .usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff,
  1370. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
  1371. 0xff, SCSI_CONTROL_MASK},
  1372. .enabled = tcm_is_ws_enabled,
  1373. };
  1374. static struct target_opcode_descriptor tcm_opcode_write_same16 = {
  1375. .support = SCSI_SUPPORT_FULL,
  1376. .opcode = WRITE_SAME_16,
  1377. .cdb_size = 16,
  1378. .usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff,
  1379. 0xff, 0xff, 0xff, 0xff,
  1380. 0xff, 0xff, 0xff, 0xff,
  1381. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1382. .enabled = tcm_is_ws_enabled,
  1383. };
  1384. static struct target_opcode_descriptor tcm_opcode_verify = {
  1385. .support = SCSI_SUPPORT_FULL,
  1386. .opcode = VERIFY,
  1387. .cdb_size = 10,
  1388. .usage_bits = {VERIFY, 0x00, 0xff, 0xff,
  1389. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
  1390. 0xff, SCSI_CONTROL_MASK},
  1391. };
  1392. static struct target_opcode_descriptor tcm_opcode_verify16 = {
  1393. .support = SCSI_SUPPORT_FULL,
  1394. .opcode = VERIFY_16,
  1395. .cdb_size = 16,
  1396. .usage_bits = {VERIFY_16, 0x00, 0xff, 0xff,
  1397. 0xff, 0xff, 0xff, 0xff,
  1398. 0xff, 0xff, 0xff, 0xff,
  1399. 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
  1400. };
  1401. static struct target_opcode_descriptor tcm_opcode_start_stop = {
  1402. .support = SCSI_SUPPORT_FULL,
  1403. .opcode = START_STOP,
  1404. .cdb_size = 6,
  1405. .usage_bits = {START_STOP, 0x01, 0x00, 0x00,
  1406. 0x01, SCSI_CONTROL_MASK},
  1407. };
  1408. static struct target_opcode_descriptor tcm_opcode_mode_select = {
  1409. .support = SCSI_SUPPORT_FULL,
  1410. .opcode = MODE_SELECT,
  1411. .cdb_size = 6,
  1412. .usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00,
  1413. 0xff, SCSI_CONTROL_MASK},
  1414. };
  1415. static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
  1416. .support = SCSI_SUPPORT_FULL,
  1417. .opcode = MODE_SELECT_10,
  1418. .cdb_size = 10,
  1419. .usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00,
  1420. 0x00, 0x00, 0x00, 0xff,
  1421. 0xff, SCSI_CONTROL_MASK},
  1422. };
  1423. static struct target_opcode_descriptor tcm_opcode_mode_sense = {
  1424. .support = SCSI_SUPPORT_FULL,
  1425. .opcode = MODE_SENSE,
  1426. .cdb_size = 6,
  1427. .usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff,
  1428. 0xff, SCSI_CONTROL_MASK},
  1429. };
  1430. static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
  1431. .support = SCSI_SUPPORT_FULL,
  1432. .opcode = MODE_SENSE_10,
  1433. .cdb_size = 10,
  1434. .usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff,
  1435. 0x00, 0x00, 0x00, 0xff,
  1436. 0xff, SCSI_CONTROL_MASK},
  1437. };
  1438. static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
  1439. .support = SCSI_SUPPORT_FULL,
  1440. .serv_action_valid = 1,
  1441. .opcode = PERSISTENT_RESERVE_IN,
  1442. .service_action = PRI_READ_KEYS,
  1443. .cdb_size = 10,
  1444. .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00,
  1445. 0x00, 0x00, 0x00, 0xff,
  1446. 0xff, SCSI_CONTROL_MASK},
  1447. };
  1448. static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
  1449. .support = SCSI_SUPPORT_FULL,
  1450. .serv_action_valid = 1,
  1451. .opcode = PERSISTENT_RESERVE_IN,
  1452. .service_action = PRI_READ_RESERVATION,
  1453. .cdb_size = 10,
  1454. .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00,
  1455. 0x00, 0x00, 0x00, 0xff,
  1456. 0xff, SCSI_CONTROL_MASK},
  1457. };
  1458. static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
  1459. struct se_cmd *cmd)
  1460. {
  1461. struct se_device *dev = cmd->se_dev;
  1462. if (!dev->dev_attrib.emulate_pr)
  1463. return false;
  1464. if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
  1465. return true;
  1466. switch (descr->opcode) {
  1467. case RESERVE:
  1468. case RESERVE_10:
  1469. case RELEASE:
  1470. case RELEASE_10:
  1471. /*
  1472. * The pr_ops which are used by the backend modules don't
  1473. * support these commands.
  1474. */
  1475. return false;
  1476. case PERSISTENT_RESERVE_OUT:
  1477. switch (descr->service_action) {
  1478. case PRO_REGISTER_AND_MOVE:
  1479. case PRO_REPLACE_LOST_RESERVATION:
  1480. /*
  1481. * The backend modules don't have access to ports and
  1482. * I_T nexuses so they can't handle these type of
  1483. * requests.
  1484. */
  1485. return false;
  1486. }
  1487. break;
  1488. case PERSISTENT_RESERVE_IN:
  1489. if (descr->service_action == PRI_READ_FULL_STATUS)
  1490. return false;
  1491. break;
  1492. }
  1493. return true;
  1494. }
  1495. static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
  1496. .support = SCSI_SUPPORT_FULL,
  1497. .serv_action_valid = 1,
  1498. .opcode = PERSISTENT_RESERVE_IN,
  1499. .service_action = PRI_REPORT_CAPABILITIES,
  1500. .cdb_size = 10,
  1501. .usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00,
  1502. 0x00, 0x00, 0x00, 0xff,
  1503. 0xff, SCSI_CONTROL_MASK},
  1504. .enabled = tcm_is_pr_enabled,
  1505. };
  1506. static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
  1507. .support = SCSI_SUPPORT_FULL,
  1508. .serv_action_valid = 1,
  1509. .opcode = PERSISTENT_RESERVE_IN,
  1510. .service_action = PRI_READ_FULL_STATUS,
  1511. .cdb_size = 10,
  1512. .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00,
  1513. 0x00, 0x00, 0x00, 0xff,
  1514. 0xff, SCSI_CONTROL_MASK},
  1515. .enabled = tcm_is_pr_enabled,
  1516. };
  1517. static struct target_opcode_descriptor tcm_opcode_pro_register = {
  1518. .support = SCSI_SUPPORT_FULL,
  1519. .serv_action_valid = 1,
  1520. .opcode = PERSISTENT_RESERVE_OUT,
  1521. .service_action = PRO_REGISTER,
  1522. .cdb_size = 10,
  1523. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00,
  1524. 0x00, 0xff, 0xff, 0xff,
  1525. 0xff, SCSI_CONTROL_MASK},
  1526. .enabled = tcm_is_pr_enabled,
  1527. };
  1528. static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
  1529. .support = SCSI_SUPPORT_FULL,
  1530. .serv_action_valid = 1,
  1531. .opcode = PERSISTENT_RESERVE_OUT,
  1532. .service_action = PRO_RESERVE,
  1533. .cdb_size = 10,
  1534. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00,
  1535. 0x00, 0xff, 0xff, 0xff,
  1536. 0xff, SCSI_CONTROL_MASK},
  1537. .enabled = tcm_is_pr_enabled,
  1538. };
  1539. static struct target_opcode_descriptor tcm_opcode_pro_release = {
  1540. .support = SCSI_SUPPORT_FULL,
  1541. .serv_action_valid = 1,
  1542. .opcode = PERSISTENT_RESERVE_OUT,
  1543. .service_action = PRO_RELEASE,
  1544. .cdb_size = 10,
  1545. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00,
  1546. 0x00, 0xff, 0xff, 0xff,
  1547. 0xff, SCSI_CONTROL_MASK},
  1548. .enabled = tcm_is_pr_enabled,
  1549. };
  1550. static struct target_opcode_descriptor tcm_opcode_pro_clear = {
  1551. .support = SCSI_SUPPORT_FULL,
  1552. .serv_action_valid = 1,
  1553. .opcode = PERSISTENT_RESERVE_OUT,
  1554. .service_action = PRO_CLEAR,
  1555. .cdb_size = 10,
  1556. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00,
  1557. 0x00, 0xff, 0xff, 0xff,
  1558. 0xff, SCSI_CONTROL_MASK},
  1559. .enabled = tcm_is_pr_enabled,
  1560. };
  1561. static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
  1562. .support = SCSI_SUPPORT_FULL,
  1563. .serv_action_valid = 1,
  1564. .opcode = PERSISTENT_RESERVE_OUT,
  1565. .service_action = PRO_PREEMPT,
  1566. .cdb_size = 10,
  1567. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00,
  1568. 0x00, 0xff, 0xff, 0xff,
  1569. 0xff, SCSI_CONTROL_MASK},
  1570. .enabled = tcm_is_pr_enabled,
  1571. };
  1572. static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
  1573. .support = SCSI_SUPPORT_FULL,
  1574. .serv_action_valid = 1,
  1575. .opcode = PERSISTENT_RESERVE_OUT,
  1576. .service_action = PRO_PREEMPT_AND_ABORT,
  1577. .cdb_size = 10,
  1578. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00,
  1579. 0x00, 0xff, 0xff, 0xff,
  1580. 0xff, SCSI_CONTROL_MASK},
  1581. .enabled = tcm_is_pr_enabled,
  1582. };
  1583. static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
  1584. .support = SCSI_SUPPORT_FULL,
  1585. .serv_action_valid = 1,
  1586. .opcode = PERSISTENT_RESERVE_OUT,
  1587. .service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
  1588. .cdb_size = 10,
  1589. .usage_bits = {
  1590. PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
  1591. 0xff, 0x00,
  1592. 0x00, 0xff, 0xff, 0xff,
  1593. 0xff, SCSI_CONTROL_MASK},
  1594. .enabled = tcm_is_pr_enabled,
  1595. };
  1596. static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
  1597. .support = SCSI_SUPPORT_FULL,
  1598. .serv_action_valid = 1,
  1599. .opcode = PERSISTENT_RESERVE_OUT,
  1600. .service_action = PRO_REGISTER_AND_MOVE,
  1601. .cdb_size = 10,
  1602. .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00,
  1603. 0x00, 0xff, 0xff, 0xff,
  1604. 0xff, SCSI_CONTROL_MASK},
  1605. .enabled = tcm_is_pr_enabled,
  1606. };
  1607. static struct target_opcode_descriptor tcm_opcode_release = {
  1608. .support = SCSI_SUPPORT_FULL,
  1609. .opcode = RELEASE,
  1610. .cdb_size = 6,
  1611. .usage_bits = {RELEASE, 0x00, 0x00, 0x00,
  1612. 0x00, SCSI_CONTROL_MASK},
  1613. .enabled = tcm_is_pr_enabled,
  1614. };
  1615. static struct target_opcode_descriptor tcm_opcode_release10 = {
  1616. .support = SCSI_SUPPORT_FULL,
  1617. .opcode = RELEASE_10,
  1618. .cdb_size = 10,
  1619. .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
  1620. 0x00, 0x00, 0x00, 0xff,
  1621. 0xff, SCSI_CONTROL_MASK},
  1622. .enabled = tcm_is_pr_enabled,
  1623. };
  1624. static struct target_opcode_descriptor tcm_opcode_reserve = {
  1625. .support = SCSI_SUPPORT_FULL,
  1626. .opcode = RESERVE,
  1627. .cdb_size = 6,
  1628. .usage_bits = {RESERVE, 0x00, 0x00, 0x00,
  1629. 0x00, SCSI_CONTROL_MASK},
  1630. .enabled = tcm_is_pr_enabled,
  1631. };
  1632. static struct target_opcode_descriptor tcm_opcode_reserve10 = {
  1633. .support = SCSI_SUPPORT_FULL,
  1634. .opcode = RESERVE_10,
  1635. .cdb_size = 10,
  1636. .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
  1637. 0x00, 0x00, 0x00, 0xff,
  1638. 0xff, SCSI_CONTROL_MASK},
  1639. .enabled = tcm_is_pr_enabled,
  1640. };
  1641. static struct target_opcode_descriptor tcm_opcode_request_sense = {
  1642. .support = SCSI_SUPPORT_FULL,
  1643. .opcode = REQUEST_SENSE,
  1644. .cdb_size = 6,
  1645. .usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00,
  1646. 0xff, SCSI_CONTROL_MASK},
  1647. };
  1648. static struct target_opcode_descriptor tcm_opcode_inquiry = {
  1649. .support = SCSI_SUPPORT_FULL,
  1650. .opcode = INQUIRY,
  1651. .cdb_size = 6,
  1652. .usage_bits = {INQUIRY, 0x01, 0xff, 0xff,
  1653. 0xff, SCSI_CONTROL_MASK},
  1654. };
  1655. static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
  1656. struct se_cmd *cmd)
  1657. {
  1658. struct se_device *dev = cmd->se_dev;
  1659. return dev->dev_attrib.emulate_3pc;
  1660. }
  1661. static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
  1662. .support = SCSI_SUPPORT_FULL,
  1663. .serv_action_valid = 1,
  1664. .opcode = EXTENDED_COPY,
  1665. .cdb_size = 16,
  1666. .usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00,
  1667. 0x00, 0x00, 0x00, 0x00,
  1668. 0x00, 0x00, 0xff, 0xff,
  1669. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1670. .enabled = tcm_is_3pc_enabled,
  1671. };
  1672. static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
  1673. .support = SCSI_SUPPORT_FULL,
  1674. .serv_action_valid = 1,
  1675. .opcode = RECEIVE_COPY_RESULTS,
  1676. .service_action = RCR_SA_OPERATING_PARAMETERS,
  1677. .cdb_size = 16,
  1678. .usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS,
  1679. 0x00, 0x00,
  1680. 0x00, 0x00, 0x00, 0x00,
  1681. 0x00, 0x00, 0xff, 0xff,
  1682. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1683. .enabled = tcm_is_3pc_enabled,
  1684. };
  1685. static struct target_opcode_descriptor tcm_opcode_report_luns = {
  1686. .support = SCSI_SUPPORT_FULL,
  1687. .opcode = REPORT_LUNS,
  1688. .cdb_size = 12,
  1689. .usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00,
  1690. 0x00, 0x00, 0xff, 0xff,
  1691. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1692. };
  1693. static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
  1694. .support = SCSI_SUPPORT_FULL,
  1695. .opcode = TEST_UNIT_READY,
  1696. .cdb_size = 6,
  1697. .usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00,
  1698. 0x00, SCSI_CONTROL_MASK},
  1699. };
  1700. static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
  1701. .support = SCSI_SUPPORT_FULL,
  1702. .serv_action_valid = 1,
  1703. .opcode = MAINTENANCE_IN,
  1704. .service_action = MI_REPORT_TARGET_PGS,
  1705. .cdb_size = 12,
  1706. .usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00,
  1707. 0x00, 0x00, 0xff, 0xff,
  1708. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1709. };
  1710. static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
  1711. struct se_cmd *cmd)
  1712. {
  1713. struct se_device *dev = cmd->se_dev;
  1714. return dev->dev_attrib.emulate_rsoc;
  1715. }
  1716. static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
  1717. .support = SCSI_SUPPORT_FULL,
  1718. .serv_action_valid = 1,
  1719. .opcode = MAINTENANCE_IN,
  1720. .service_action = MI_REPORT_SUPPORTED_OPERATION_CODES,
  1721. .cdb_size = 12,
  1722. .usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES,
  1723. 0x87, 0xff,
  1724. 0xff, 0xff, 0xff, 0xff,
  1725. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1726. .enabled = spc_rsoc_enabled,
  1727. };
  1728. static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
  1729. struct se_cmd *cmd)
  1730. {
  1731. struct t10_alua_tg_pt_gp *l_tg_pt_gp;
  1732. struct se_lun *l_lun = cmd->se_lun;
  1733. rcu_read_lock();
  1734. l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
  1735. if (!l_tg_pt_gp) {
  1736. rcu_read_unlock();
  1737. return false;
  1738. }
  1739. if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
  1740. rcu_read_unlock();
  1741. return false;
  1742. }
  1743. rcu_read_unlock();
  1744. return true;
  1745. }
  1746. static struct target_opcode_descriptor tcm_opcode_set_tpg = {
  1747. .support = SCSI_SUPPORT_FULL,
  1748. .serv_action_valid = 1,
  1749. .opcode = MAINTENANCE_OUT,
  1750. .service_action = MO_SET_TARGET_PGS,
  1751. .cdb_size = 12,
  1752. .usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00,
  1753. 0x00, 0x00, 0xff, 0xff,
  1754. 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
  1755. .enabled = tcm_is_set_tpg_enabled,
  1756. };
  1757. static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
  1758. &tcm_opcode_read6,
  1759. &tcm_opcode_read10,
  1760. &tcm_opcode_read12,
  1761. &tcm_opcode_read16,
  1762. &tcm_opcode_write6,
  1763. &tcm_opcode_write10,
  1764. &tcm_opcode_write_verify10,
  1765. &tcm_opcode_write12,
  1766. &tcm_opcode_write16,
  1767. &tcm_opcode_write_verify16,
  1768. &tcm_opcode_write_same32,
  1769. &tcm_opcode_compare_write,
  1770. &tcm_opcode_read_capacity,
  1771. &tcm_opcode_read_capacity16,
  1772. &tcm_opcode_read_report_refferals,
  1773. &tcm_opcode_sync_cache,
  1774. &tcm_opcode_sync_cache16,
  1775. &tcm_opcode_unmap,
  1776. &tcm_opcode_write_same,
  1777. &tcm_opcode_write_same16,
  1778. &tcm_opcode_verify,
  1779. &tcm_opcode_verify16,
  1780. &tcm_opcode_start_stop,
  1781. &tcm_opcode_mode_select,
  1782. &tcm_opcode_mode_select10,
  1783. &tcm_opcode_mode_sense,
  1784. &tcm_opcode_mode_sense10,
  1785. &tcm_opcode_pri_read_keys,
  1786. &tcm_opcode_pri_read_resrv,
  1787. &tcm_opcode_pri_read_caps,
  1788. &tcm_opcode_pri_read_full_status,
  1789. &tcm_opcode_pro_register,
  1790. &tcm_opcode_pro_reserve,
  1791. &tcm_opcode_pro_release,
  1792. &tcm_opcode_pro_clear,
  1793. &tcm_opcode_pro_preempt,
  1794. &tcm_opcode_pro_preempt_abort,
  1795. &tcm_opcode_pro_reg_ign_exist,
  1796. &tcm_opcode_pro_register_move,
  1797. &tcm_opcode_release,
  1798. &tcm_opcode_release10,
  1799. &tcm_opcode_reserve,
  1800. &tcm_opcode_reserve10,
  1801. &tcm_opcode_request_sense,
  1802. &tcm_opcode_inquiry,
  1803. &tcm_opcode_extended_copy_lid1,
  1804. &tcm_opcode_rcv_copy_res_op_params,
  1805. &tcm_opcode_report_luns,
  1806. &tcm_opcode_test_unit_ready,
  1807. &tcm_opcode_report_target_pgs,
  1808. &tcm_opcode_report_supp_opcodes,
  1809. &tcm_opcode_set_tpg,
  1810. };
  1811. static int
  1812. spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
  1813. struct target_opcode_descriptor *descr)
  1814. {
  1815. if (!ctdp)
  1816. return 0;
  1817. put_unaligned_be16(0xa, buf);
  1818. buf[3] = descr->specific_timeout;
  1819. put_unaligned_be32(descr->nominal_timeout, &buf[4]);
  1820. put_unaligned_be32(descr->recommended_timeout, &buf[8]);
  1821. return 12;
  1822. }
  1823. static int
  1824. spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
  1825. struct target_opcode_descriptor *descr)
  1826. {
  1827. int td_size = 0;
  1828. buf[0] = descr->opcode;
  1829. put_unaligned_be16(descr->service_action, &buf[2]);
  1830. buf[5] = (ctdp << 1) | descr->serv_action_valid;
  1831. put_unaligned_be16(descr->cdb_size, &buf[6]);
  1832. td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp,
  1833. descr);
  1834. return 8 + td_size;
  1835. }
  1836. static int
  1837. spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
  1838. struct target_opcode_descriptor *descr,
  1839. struct se_device *dev)
  1840. {
  1841. int td_size = 0;
  1842. if (!descr) {
  1843. buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED;
  1844. return 2;
  1845. }
  1846. buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL;
  1847. put_unaligned_be16(descr->cdb_size, &buf[2]);
  1848. memcpy(&buf[4], descr->usage_bits, descr->cdb_size);
  1849. if (descr->update_usage_bits)
  1850. descr->update_usage_bits(&buf[4], dev);
  1851. td_size = spc_rsoc_encode_command_timeouts_descriptor(
  1852. &buf[4 + descr->cdb_size], ctdp, descr);
  1853. return 4 + descr->cdb_size + td_size;
  1854. }
  1855. static sense_reason_t
  1856. spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
  1857. {
  1858. struct target_opcode_descriptor *descr;
  1859. struct se_session *sess = cmd->se_sess;
  1860. unsigned char *cdb = cmd->t_task_cdb;
  1861. u8 opts = cdb[2] & 0x3;
  1862. u8 requested_opcode;
  1863. u16 requested_sa;
  1864. int i;
  1865. requested_opcode = cdb[3];
  1866. requested_sa = ((u16)cdb[4]) << 8 | cdb[5];
  1867. *opcode = NULL;
  1868. if (opts > 3) {
  1869. pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES"
  1870. " with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n",
  1871. cmd->se_tfo->fabric_name, opts,
  1872. cmd->se_lun->unpacked_lun,
  1873. sess->se_node_acl->initiatorname);
  1874. return TCM_INVALID_CDB_FIELD;
  1875. }
  1876. for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
  1877. descr = tcm_supported_opcodes[i];
  1878. if (descr->opcode != requested_opcode)
  1879. continue;
  1880. switch (opts) {
  1881. case 0x1:
  1882. /*
  1883. * If the REQUESTED OPERATION CODE field specifies an
  1884. * operation code for which the device server implements
  1885. * service actions, then the device server shall
  1886. * terminate the command with CHECK CONDITION status,
  1887. * with the sense key set to ILLEGAL REQUEST, and the
  1888. * additional sense code set to INVALID FIELD IN CDB
  1889. */
  1890. if (descr->serv_action_valid)
  1891. return TCM_INVALID_CDB_FIELD;
  1892. if (!descr->enabled || descr->enabled(descr, cmd))
  1893. *opcode = descr;
  1894. break;
  1895. case 0x2:
  1896. /*
  1897. * If the REQUESTED OPERATION CODE field specifies an
  1898. * operation code for which the device server does not
  1899. * implement service actions, then the device server
  1900. * shall terminate the command with CHECK CONDITION
  1901. * status, with the sense key set to ILLEGAL REQUEST,
  1902. * and the additional sense code set to INVALID FIELD IN CDB.
  1903. */
  1904. if (descr->serv_action_valid &&
  1905. descr->service_action == requested_sa) {
  1906. if (!descr->enabled || descr->enabled(descr,
  1907. cmd))
  1908. *opcode = descr;
  1909. } else if (!descr->serv_action_valid)
  1910. return TCM_INVALID_CDB_FIELD;
  1911. break;
  1912. case 0x3:
  1913. /*
  1914. * The command support data for the operation code and
  1915. * service action a specified in the REQUESTED OPERATION
  1916. * CODE field and REQUESTED SERVICE ACTION field shall
  1917. * be returned in the one_command parameter data format.
  1918. */
  1919. if (descr->service_action == requested_sa)
  1920. if (!descr->enabled || descr->enabled(descr,
  1921. cmd))
  1922. *opcode = descr;
  1923. break;
  1924. }
  1925. }
  1926. return 0;
  1927. }
  1928. static sense_reason_t
  1929. spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
  1930. {
  1931. int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
  1932. struct target_opcode_descriptor *descr = NULL;
  1933. unsigned char *cdb = cmd->t_task_cdb;
  1934. u8 rctd = (cdb[2] >> 7) & 0x1;
  1935. unsigned char *buf = NULL;
  1936. int response_length = 0;
  1937. u8 opts = cdb[2] & 0x3;
  1938. unsigned char *rbuf;
  1939. sense_reason_t ret = 0;
  1940. int i;
  1941. if (!cmd->se_dev->dev_attrib.emulate_rsoc)
  1942. return TCM_UNSUPPORTED_SCSI_OPCODE;
  1943. rbuf = transport_kmap_data_sg(cmd);
  1944. if (cmd->data_length && !rbuf) {
  1945. ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1946. goto out;
  1947. }
  1948. if (opts == 0)
  1949. response_length = 4 + (8 + rctd * 12) * descr_num;
  1950. else {
  1951. ret = spc_rsoc_get_descr(cmd, &descr);
  1952. if (ret)
  1953. goto out;
  1954. if (descr)
  1955. response_length = 4 + descr->cdb_size + rctd * 12;
  1956. else
  1957. response_length = 2;
  1958. }
  1959. buf = kzalloc(response_length, GFP_KERNEL);
  1960. if (!buf) {
  1961. ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1962. goto out;
  1963. }
  1964. response_length = 0;
  1965. if (opts == 0) {
  1966. response_length += 4;
  1967. for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
  1968. descr = tcm_supported_opcodes[i];
  1969. if (descr->enabled && !descr->enabled(descr, cmd))
  1970. continue;
  1971. response_length += spc_rsoc_encode_command_descriptor(
  1972. &buf[response_length], rctd, descr);
  1973. }
  1974. put_unaligned_be32(response_length - 3, buf);
  1975. } else {
  1976. response_length = spc_rsoc_encode_one_command_descriptor(
  1977. &buf[response_length], rctd, descr,
  1978. cmd->se_dev);
  1979. }
  1980. memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length));
  1981. out:
  1982. kfree(buf);
  1983. transport_kunmap_data_sg(cmd);
  1984. if (!ret)
  1985. target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length);
  1986. return ret;
  1987. }
  1988. sense_reason_t
  1989. spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
  1990. {
  1991. struct se_device *dev = cmd->se_dev;
  1992. unsigned char *cdb = cmd->t_task_cdb;
  1993. switch (cdb[0]) {
  1994. case RESERVE:
  1995. case RESERVE_10:
  1996. case RELEASE:
  1997. case RELEASE_10:
  1998. if (!dev->dev_attrib.emulate_pr)
  1999. return TCM_UNSUPPORTED_SCSI_OPCODE;
  2000. if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
  2001. return TCM_UNSUPPORTED_SCSI_OPCODE;
  2002. break;
  2003. case PERSISTENT_RESERVE_IN:
  2004. case PERSISTENT_RESERVE_OUT:
  2005. if (!dev->dev_attrib.emulate_pr)
  2006. return TCM_UNSUPPORTED_SCSI_OPCODE;
  2007. break;
  2008. }
  2009. switch (cdb[0]) {
  2010. case MODE_SELECT:
  2011. *size = cdb[4];
  2012. cmd->execute_cmd = spc_emulate_modeselect;
  2013. break;
  2014. case MODE_SELECT_10:
  2015. *size = get_unaligned_be16(&cdb[7]);
  2016. cmd->execute_cmd = spc_emulate_modeselect;
  2017. break;
  2018. case MODE_SENSE:
  2019. *size = cdb[4];
  2020. cmd->execute_cmd = spc_emulate_modesense;
  2021. break;
  2022. case MODE_SENSE_10:
  2023. *size = get_unaligned_be16(&cdb[7]);
  2024. cmd->execute_cmd = spc_emulate_modesense;
  2025. break;
  2026. case LOG_SELECT:
  2027. case LOG_SENSE:
  2028. *size = get_unaligned_be16(&cdb[7]);
  2029. break;
  2030. case PERSISTENT_RESERVE_IN:
  2031. *size = get_unaligned_be16(&cdb[7]);
  2032. cmd->execute_cmd = target_scsi3_emulate_pr_in;
  2033. break;
  2034. case PERSISTENT_RESERVE_OUT:
  2035. *size = get_unaligned_be32(&cdb[5]);
  2036. cmd->execute_cmd = target_scsi3_emulate_pr_out;
  2037. break;
  2038. case RELEASE:
  2039. case RELEASE_10:
  2040. if (cdb[0] == RELEASE_10)
  2041. *size = get_unaligned_be16(&cdb[7]);
  2042. else
  2043. *size = cmd->data_length;
  2044. cmd->execute_cmd = target_scsi2_reservation_release;
  2045. break;
  2046. case RESERVE:
  2047. case RESERVE_10:
  2048. /*
  2049. * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
  2050. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  2051. */
  2052. if (cdb[0] == RESERVE_10)
  2053. *size = get_unaligned_be16(&cdb[7]);
  2054. else
  2055. *size = cmd->data_length;
  2056. cmd->execute_cmd = target_scsi2_reservation_reserve;
  2057. break;
  2058. case REQUEST_SENSE:
  2059. *size = cdb[4];
  2060. cmd->execute_cmd = spc_emulate_request_sense;
  2061. break;
  2062. case INQUIRY:
  2063. *size = get_unaligned_be16(&cdb[3]);
  2064. /*
  2065. * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
  2066. * See spc4r17 section 5.3
  2067. */
  2068. cmd->sam_task_attr = TCM_HEAD_TAG;
  2069. cmd->execute_cmd = spc_emulate_inquiry;
  2070. break;
  2071. case SECURITY_PROTOCOL_IN:
  2072. case SECURITY_PROTOCOL_OUT:
  2073. *size = get_unaligned_be32(&cdb[6]);
  2074. break;
  2075. case EXTENDED_COPY:
  2076. *size = get_unaligned_be32(&cdb[10]);
  2077. cmd->execute_cmd = target_do_xcopy;
  2078. break;
  2079. case RECEIVE_COPY_RESULTS:
  2080. *size = get_unaligned_be32(&cdb[10]);
  2081. cmd->execute_cmd = target_do_receive_copy_results;
  2082. break;
  2083. case READ_ATTRIBUTE:
  2084. case WRITE_ATTRIBUTE:
  2085. *size = get_unaligned_be32(&cdb[10]);
  2086. break;
  2087. case RECEIVE_DIAGNOSTIC:
  2088. case SEND_DIAGNOSTIC:
  2089. *size = get_unaligned_be16(&cdb[3]);
  2090. break;
  2091. case WRITE_BUFFER:
  2092. *size = get_unaligned_be24(&cdb[6]);
  2093. break;
  2094. case REPORT_LUNS:
  2095. cmd->execute_cmd = spc_emulate_report_luns;
  2096. *size = get_unaligned_be32(&cdb[6]);
  2097. /*
  2098. * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
  2099. * See spc4r17 section 5.3
  2100. */
  2101. cmd->sam_task_attr = TCM_HEAD_TAG;
  2102. break;
  2103. case TEST_UNIT_READY:
  2104. cmd->execute_cmd = spc_emulate_testunitready;
  2105. *size = 0;
  2106. break;
  2107. case MAINTENANCE_IN:
  2108. if (dev->transport->get_device_type(dev) != TYPE_ROM) {
  2109. /*
  2110. * MAINTENANCE_IN from SCC-2
  2111. * Check for emulated MI_REPORT_TARGET_PGS
  2112. */
  2113. if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
  2114. cmd->execute_cmd =
  2115. target_emulate_report_target_port_groups;
  2116. }
  2117. if ((cdb[1] & 0x1f) ==
  2118. MI_REPORT_SUPPORTED_OPERATION_CODES)
  2119. cmd->execute_cmd =
  2120. spc_emulate_report_supp_op_codes;
  2121. *size = get_unaligned_be32(&cdb[6]);
  2122. } else {
  2123. /*
  2124. * GPCMD_SEND_KEY from multi media commands
  2125. */
  2126. *size = get_unaligned_be16(&cdb[8]);
  2127. }
  2128. break;
  2129. case MAINTENANCE_OUT:
  2130. if (dev->transport->get_device_type(dev) != TYPE_ROM) {
  2131. /*
  2132. * MAINTENANCE_OUT from SCC-2
  2133. * Check for emulated MO_SET_TARGET_PGS.
  2134. */
  2135. if (cdb[1] == MO_SET_TARGET_PGS) {
  2136. cmd->execute_cmd =
  2137. target_emulate_set_target_port_groups;
  2138. }
  2139. *size = get_unaligned_be32(&cdb[6]);
  2140. } else {
  2141. /*
  2142. * GPCMD_SEND_KEY from multi media commands
  2143. */
  2144. *size = get_unaligned_be16(&cdb[8]);
  2145. }
  2146. break;
  2147. default:
  2148. return TCM_UNSUPPORTED_SCSI_OPCODE;
  2149. }
  2150. return 0;
  2151. }
  2152. EXPORT_SYMBOL(spc_parse_cdb);