qcom_nandc.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/bitops.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/rawnand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/delay.h>
  24. #include <linux/dma/qcom_bam_dma.h>
  25. /* NANDc reg offsets */
  26. #define NAND_FLASH_CMD 0x00
  27. #define NAND_ADDR0 0x04
  28. #define NAND_ADDR1 0x08
  29. #define NAND_FLASH_CHIP_SELECT 0x0c
  30. #define NAND_EXEC_CMD 0x10
  31. #define NAND_FLASH_STATUS 0x14
  32. #define NAND_BUFFER_STATUS 0x18
  33. #define NAND_DEV0_CFG0 0x20
  34. #define NAND_DEV0_CFG1 0x24
  35. #define NAND_DEV0_ECC_CFG 0x28
  36. #define NAND_DEV1_ECC_CFG 0x2c
  37. #define NAND_DEV1_CFG0 0x30
  38. #define NAND_DEV1_CFG1 0x34
  39. #define NAND_READ_ID 0x40
  40. #define NAND_READ_STATUS 0x44
  41. #define NAND_DEV_CMD0 0xa0
  42. #define NAND_DEV_CMD1 0xa4
  43. #define NAND_DEV_CMD2 0xa8
  44. #define NAND_DEV_CMD_VLD 0xac
  45. #define SFLASHC_BURST_CFG 0xe0
  46. #define NAND_ERASED_CW_DETECT_CFG 0xe8
  47. #define NAND_ERASED_CW_DETECT_STATUS 0xec
  48. #define NAND_EBI2_ECC_BUF_CFG 0xf0
  49. #define FLASH_BUF_ACC 0x100
  50. #define NAND_CTRL 0xf00
  51. #define NAND_VERSION 0xf08
  52. #define NAND_READ_LOCATION_0 0xf20
  53. #define NAND_READ_LOCATION_1 0xf24
  54. #define NAND_READ_LOCATION_2 0xf28
  55. #define NAND_READ_LOCATION_3 0xf2c
  56. /* dummy register offsets, used by write_reg_dma */
  57. #define NAND_DEV_CMD1_RESTORE 0xdead
  58. #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
  59. /* NAND_FLASH_CMD bits */
  60. #define PAGE_ACC BIT(4)
  61. #define LAST_PAGE BIT(5)
  62. /* NAND_FLASH_CHIP_SELECT bits */
  63. #define NAND_DEV_SEL 0
  64. #define DM_EN BIT(2)
  65. /* NAND_FLASH_STATUS bits */
  66. #define FS_OP_ERR BIT(4)
  67. #define FS_READY_BSY_N BIT(5)
  68. #define FS_MPU_ERR BIT(8)
  69. #define FS_DEVICE_STS_ERR BIT(16)
  70. #define FS_DEVICE_WP BIT(23)
  71. /* NAND_BUFFER_STATUS bits */
  72. #define BS_UNCORRECTABLE_BIT BIT(8)
  73. #define BS_CORRECTABLE_ERR_MSK 0x1f
  74. /* NAND_DEVn_CFG0 bits */
  75. #define DISABLE_STATUS_AFTER_WRITE 4
  76. #define CW_PER_PAGE 6
  77. #define UD_SIZE_BYTES 9
  78. #define ECC_PARITY_SIZE_BYTES_RS 19
  79. #define SPARE_SIZE_BYTES 23
  80. #define NUM_ADDR_CYCLES 27
  81. #define STATUS_BFR_READ 30
  82. #define SET_RD_MODE_AFTER_STATUS 31
  83. /* NAND_DEVn_CFG0 bits */
  84. #define DEV0_CFG1_ECC_DISABLE 0
  85. #define WIDE_FLASH 1
  86. #define NAND_RECOVERY_CYCLES 2
  87. #define CS_ACTIVE_BSY 5
  88. #define BAD_BLOCK_BYTE_NUM 6
  89. #define BAD_BLOCK_IN_SPARE_AREA 16
  90. #define WR_RD_BSY_GAP 17
  91. #define ENABLE_BCH_ECC 27
  92. /* NAND_DEV0_ECC_CFG bits */
  93. #define ECC_CFG_ECC_DISABLE 0
  94. #define ECC_SW_RESET 1
  95. #define ECC_MODE 4
  96. #define ECC_PARITY_SIZE_BYTES_BCH 8
  97. #define ECC_NUM_DATA_BYTES 16
  98. #define ECC_FORCE_CLK_OPEN 30
  99. /* NAND_DEV_CMD1 bits */
  100. #define READ_ADDR 0
  101. /* NAND_DEV_CMD_VLD bits */
  102. #define READ_START_VLD BIT(0)
  103. #define READ_STOP_VLD BIT(1)
  104. #define WRITE_START_VLD BIT(2)
  105. #define ERASE_START_VLD BIT(3)
  106. #define SEQ_READ_START_VLD BIT(4)
  107. /* NAND_EBI2_ECC_BUF_CFG bits */
  108. #define NUM_STEPS 0
  109. /* NAND_ERASED_CW_DETECT_CFG bits */
  110. #define ERASED_CW_ECC_MASK 1
  111. #define AUTO_DETECT_RES 0
  112. #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
  113. #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
  114. #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
  115. #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
  116. #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
  117. /* NAND_ERASED_CW_DETECT_STATUS bits */
  118. #define PAGE_ALL_ERASED BIT(7)
  119. #define CODEWORD_ALL_ERASED BIT(6)
  120. #define PAGE_ERASED BIT(5)
  121. #define CODEWORD_ERASED BIT(4)
  122. #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
  123. #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
  124. /* NAND_READ_LOCATION_n bits */
  125. #define READ_LOCATION_OFFSET 0
  126. #define READ_LOCATION_SIZE 16
  127. #define READ_LOCATION_LAST 31
  128. /* Version Mask */
  129. #define NAND_VERSION_MAJOR_MASK 0xf0000000
  130. #define NAND_VERSION_MAJOR_SHIFT 28
  131. #define NAND_VERSION_MINOR_MASK 0x0fff0000
  132. #define NAND_VERSION_MINOR_SHIFT 16
  133. /* NAND OP_CMDs */
  134. #define OP_PAGE_READ 0x2
  135. #define OP_PAGE_READ_WITH_ECC 0x3
  136. #define OP_PAGE_READ_WITH_ECC_SPARE 0x4
  137. #define OP_PROGRAM_PAGE 0x6
  138. #define OP_PAGE_PROGRAM_WITH_ECC 0x7
  139. #define OP_PROGRAM_PAGE_SPARE 0x9
  140. #define OP_BLOCK_ERASE 0xa
  141. #define OP_FETCH_ID 0xb
  142. #define OP_RESET_DEVICE 0xd
  143. /* Default Value for NAND_DEV_CMD_VLD */
  144. #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
  145. ERASE_START_VLD | SEQ_READ_START_VLD)
  146. /* NAND_CTRL bits */
  147. #define BAM_MODE_EN BIT(0)
  148. /*
  149. * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  150. * the driver calls the chunks 'step' or 'codeword' interchangeably
  151. */
  152. #define NANDC_STEP_SIZE 512
  153. /*
  154. * the largest page size we support is 8K, this will have 16 steps/codewords
  155. * of 512 bytes each
  156. */
  157. #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
  158. /* we read at most 3 registers per codeword scan */
  159. #define MAX_REG_RD (3 * MAX_NUM_STEPS)
  160. /* ECC modes supported by the controller */
  161. #define ECC_NONE BIT(0)
  162. #define ECC_RS_4BIT BIT(1)
  163. #define ECC_BCH_4BIT BIT(2)
  164. #define ECC_BCH_8BIT BIT(3)
  165. #define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
  166. nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
  167. ((offset) << READ_LOCATION_OFFSET) | \
  168. ((size) << READ_LOCATION_SIZE) | \
  169. ((is_last) << READ_LOCATION_LAST))
  170. /*
  171. * Returns the actual register address for all NAND_DEV_ registers
  172. * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
  173. */
  174. #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
  175. /* Returns the NAND register physical address */
  176. #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
  177. /* Returns the dma address for reg read buffer */
  178. #define reg_buf_dma_addr(chip, vaddr) \
  179. ((chip)->reg_read_dma + \
  180. ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
  181. #define QPIC_PER_CW_CMD_ELEMENTS 32
  182. #define QPIC_PER_CW_CMD_SGL 32
  183. #define QPIC_PER_CW_DATA_SGL 8
  184. #define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
  185. /*
  186. * Flags used in DMA descriptor preparation helper functions
  187. * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
  188. */
  189. /* Don't set the EOT in current tx BAM sgl */
  190. #define NAND_BAM_NO_EOT BIT(0)
  191. /* Set the NWD flag in current BAM sgl */
  192. #define NAND_BAM_NWD BIT(1)
  193. /* Finish writing in the current BAM sgl and start writing in another BAM sgl */
  194. #define NAND_BAM_NEXT_SGL BIT(2)
  195. /*
  196. * Erased codeword status is being used two times in single transfer so this
  197. * flag will determine the current value of erased codeword status register
  198. */
  199. #define NAND_ERASED_CW_SET BIT(4)
  200. /*
  201. * This data type corresponds to the BAM transaction which will be used for all
  202. * NAND transfers.
  203. * @bam_ce - the array of BAM command elements
  204. * @cmd_sgl - sgl for NAND BAM command pipe
  205. * @data_sgl - sgl for NAND BAM consumer/producer pipe
  206. * @bam_ce_pos - the index in bam_ce which is available for next sgl
  207. * @bam_ce_start - the index in bam_ce which marks the start position ce
  208. * for current sgl. It will be used for size calculation
  209. * for current sgl
  210. * @cmd_sgl_pos - current index in command sgl.
  211. * @cmd_sgl_start - start index in command sgl.
  212. * @tx_sgl_pos - current index in data sgl for tx.
  213. * @tx_sgl_start - start index in data sgl for tx.
  214. * @rx_sgl_pos - current index in data sgl for rx.
  215. * @rx_sgl_start - start index in data sgl for rx.
  216. * @wait_second_completion - wait for second DMA desc completion before making
  217. * the NAND transfer completion.
  218. * @txn_done - completion for NAND transfer.
  219. * @last_data_desc - last DMA desc in data channel (tx/rx).
  220. * @last_cmd_desc - last DMA desc in command channel.
  221. */
  222. struct bam_transaction {
  223. struct bam_cmd_element *bam_ce;
  224. struct scatterlist *cmd_sgl;
  225. struct scatterlist *data_sgl;
  226. u32 bam_ce_pos;
  227. u32 bam_ce_start;
  228. u32 cmd_sgl_pos;
  229. u32 cmd_sgl_start;
  230. u32 tx_sgl_pos;
  231. u32 tx_sgl_start;
  232. u32 rx_sgl_pos;
  233. u32 rx_sgl_start;
  234. bool wait_second_completion;
  235. struct completion txn_done;
  236. struct dma_async_tx_descriptor *last_data_desc;
  237. struct dma_async_tx_descriptor *last_cmd_desc;
  238. };
  239. /*
  240. * This data type corresponds to the nand dma descriptor
  241. * @list - list for desc_info
  242. * @dir - DMA transfer direction
  243. * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
  244. * ADM
  245. * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
  246. * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
  247. * @dma_desc - low level DMA engine descriptor
  248. */
  249. struct desc_info {
  250. struct list_head node;
  251. enum dma_data_direction dir;
  252. union {
  253. struct scatterlist adm_sgl;
  254. struct {
  255. struct scatterlist *bam_sgl;
  256. int sgl_cnt;
  257. };
  258. };
  259. struct dma_async_tx_descriptor *dma_desc;
  260. };
  261. /*
  262. * holds the current register values that we want to write. acts as a contiguous
  263. * chunk of memory which we use to write the controller registers through DMA.
  264. */
  265. struct nandc_regs {
  266. __le32 cmd;
  267. __le32 addr0;
  268. __le32 addr1;
  269. __le32 chip_sel;
  270. __le32 exec;
  271. __le32 cfg0;
  272. __le32 cfg1;
  273. __le32 ecc_bch_cfg;
  274. __le32 clrflashstatus;
  275. __le32 clrreadstatus;
  276. __le32 cmd1;
  277. __le32 vld;
  278. __le32 orig_cmd1;
  279. __le32 orig_vld;
  280. __le32 ecc_buf_cfg;
  281. __le32 read_location0;
  282. __le32 read_location1;
  283. __le32 read_location2;
  284. __le32 read_location3;
  285. __le32 erased_cw_detect_cfg_clr;
  286. __le32 erased_cw_detect_cfg_set;
  287. };
  288. /*
  289. * NAND controller data struct
  290. *
  291. * @controller: base controller structure
  292. * @host_list: list containing all the chips attached to the
  293. * controller
  294. * @dev: parent device
  295. * @base: MMIO base
  296. * @base_phys: physical base address of controller registers
  297. * @base_dma: dma base address of controller registers
  298. * @core_clk: controller clock
  299. * @aon_clk: another controller clock
  300. *
  301. * @chan: dma channel
  302. * @cmd_crci: ADM DMA CRCI for command flow control
  303. * @data_crci: ADM DMA CRCI for data flow control
  304. * @desc_list: DMA descriptor list (list of desc_infos)
  305. *
  306. * @data_buffer: our local DMA buffer for page read/writes,
  307. * used when we can't use the buffer provided
  308. * by upper layers directly
  309. * @buf_size/count/start: markers for chip->read_buf/write_buf functions
  310. * @reg_read_buf: local buffer for reading back registers via DMA
  311. * @reg_read_dma: contains dma address for register read buffer
  312. * @reg_read_pos: marker for data read in reg_read_buf
  313. *
  314. * @regs: a contiguous chunk of memory for DMA register
  315. * writes. contains the register values to be
  316. * written to controller
  317. * @cmd1/vld: some fixed controller register values
  318. * @props: properties of current NAND controller,
  319. * initialized via DT match data
  320. * @max_cwperpage: maximum QPIC codewords required. calculated
  321. * from all connected NAND devices pagesize
  322. */
  323. struct qcom_nand_controller {
  324. struct nand_controller controller;
  325. struct list_head host_list;
  326. struct device *dev;
  327. void __iomem *base;
  328. phys_addr_t base_phys;
  329. dma_addr_t base_dma;
  330. struct clk *core_clk;
  331. struct clk *aon_clk;
  332. union {
  333. /* will be used only by QPIC for BAM DMA */
  334. struct {
  335. struct dma_chan *tx_chan;
  336. struct dma_chan *rx_chan;
  337. struct dma_chan *cmd_chan;
  338. };
  339. /* will be used only by EBI2 for ADM DMA */
  340. struct {
  341. struct dma_chan *chan;
  342. unsigned int cmd_crci;
  343. unsigned int data_crci;
  344. };
  345. };
  346. struct list_head desc_list;
  347. struct bam_transaction *bam_txn;
  348. u8 *data_buffer;
  349. int buf_size;
  350. int buf_count;
  351. int buf_start;
  352. unsigned int max_cwperpage;
  353. __le32 *reg_read_buf;
  354. dma_addr_t reg_read_dma;
  355. int reg_read_pos;
  356. struct nandc_regs *regs;
  357. u32 cmd1, vld;
  358. const struct qcom_nandc_props *props;
  359. };
  360. /*
  361. * NAND chip structure
  362. *
  363. * @chip: base NAND chip structure
  364. * @node: list node to add itself to host_list in
  365. * qcom_nand_controller
  366. *
  367. * @cs: chip select value for this chip
  368. * @cw_size: the number of bytes in a single step/codeword
  369. * of a page, consisting of all data, ecc, spare
  370. * and reserved bytes
  371. * @cw_data: the number of bytes within a codeword protected
  372. * by ECC
  373. * @use_ecc: request the controller to use ECC for the
  374. * upcoming read/write
  375. * @bch_enabled: flag to tell whether BCH ECC mode is used
  376. * @ecc_bytes_hw: ECC bytes used by controller hardware for this
  377. * chip
  378. * @status: value to be returned if NAND_CMD_STATUS command
  379. * is executed
  380. * @last_command: keeps track of last command on this chip. used
  381. * for reading correct status
  382. *
  383. * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
  384. * ecc/non-ecc mode for the current nand flash
  385. * device
  386. */
  387. struct qcom_nand_host {
  388. struct nand_chip chip;
  389. struct list_head node;
  390. int cs;
  391. int cw_size;
  392. int cw_data;
  393. bool use_ecc;
  394. bool bch_enabled;
  395. int ecc_bytes_hw;
  396. int spare_bytes;
  397. int bbm_size;
  398. u8 status;
  399. int last_command;
  400. u32 cfg0, cfg1;
  401. u32 cfg0_raw, cfg1_raw;
  402. u32 ecc_buf_cfg;
  403. u32 ecc_bch_cfg;
  404. u32 clrflashstatus;
  405. u32 clrreadstatus;
  406. };
  407. /*
  408. * This data type corresponds to the NAND controller properties which varies
  409. * among different NAND controllers.
  410. * @ecc_modes - ecc mode for NAND
  411. * @is_bam - whether NAND controller is using BAM
  412. * @is_qpic - whether NAND CTRL is part of qpic IP
  413. * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
  414. */
  415. struct qcom_nandc_props {
  416. u32 ecc_modes;
  417. bool is_bam;
  418. bool is_qpic;
  419. u32 dev_cmd_reg_start;
  420. };
  421. /* Frees the BAM transaction memory */
  422. static void free_bam_transaction(struct qcom_nand_controller *nandc)
  423. {
  424. struct bam_transaction *bam_txn = nandc->bam_txn;
  425. devm_kfree(nandc->dev, bam_txn);
  426. }
  427. /* Allocates and Initializes the BAM transaction */
  428. static struct bam_transaction *
  429. alloc_bam_transaction(struct qcom_nand_controller *nandc)
  430. {
  431. struct bam_transaction *bam_txn;
  432. size_t bam_txn_size;
  433. unsigned int num_cw = nandc->max_cwperpage;
  434. void *bam_txn_buf;
  435. bam_txn_size =
  436. sizeof(*bam_txn) + num_cw *
  437. ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
  438. (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
  439. (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
  440. bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
  441. if (!bam_txn_buf)
  442. return NULL;
  443. bam_txn = bam_txn_buf;
  444. bam_txn_buf += sizeof(*bam_txn);
  445. bam_txn->bam_ce = bam_txn_buf;
  446. bam_txn_buf +=
  447. sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
  448. bam_txn->cmd_sgl = bam_txn_buf;
  449. bam_txn_buf +=
  450. sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
  451. bam_txn->data_sgl = bam_txn_buf;
  452. init_completion(&bam_txn->txn_done);
  453. return bam_txn;
  454. }
  455. /* Clears the BAM transaction indexes */
  456. static void clear_bam_transaction(struct qcom_nand_controller *nandc)
  457. {
  458. struct bam_transaction *bam_txn = nandc->bam_txn;
  459. if (!nandc->props->is_bam)
  460. return;
  461. bam_txn->bam_ce_pos = 0;
  462. bam_txn->bam_ce_start = 0;
  463. bam_txn->cmd_sgl_pos = 0;
  464. bam_txn->cmd_sgl_start = 0;
  465. bam_txn->tx_sgl_pos = 0;
  466. bam_txn->tx_sgl_start = 0;
  467. bam_txn->rx_sgl_pos = 0;
  468. bam_txn->rx_sgl_start = 0;
  469. bam_txn->last_data_desc = NULL;
  470. bam_txn->wait_second_completion = false;
  471. sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
  472. QPIC_PER_CW_CMD_SGL);
  473. sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
  474. QPIC_PER_CW_DATA_SGL);
  475. reinit_completion(&bam_txn->txn_done);
  476. }
  477. /* Callback for DMA descriptor completion */
  478. static void qpic_bam_dma_done(void *data)
  479. {
  480. struct bam_transaction *bam_txn = data;
  481. /*
  482. * In case of data transfer with NAND, 2 callbacks will be generated.
  483. * One for command channel and another one for data channel.
  484. * If current transaction has data descriptors
  485. * (i.e. wait_second_completion is true), then set this to false
  486. * and wait for second DMA descriptor completion.
  487. */
  488. if (bam_txn->wait_second_completion)
  489. bam_txn->wait_second_completion = false;
  490. else
  491. complete(&bam_txn->txn_done);
  492. }
  493. static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
  494. {
  495. return container_of(chip, struct qcom_nand_host, chip);
  496. }
  497. static inline struct qcom_nand_controller *
  498. get_qcom_nand_controller(struct nand_chip *chip)
  499. {
  500. return container_of(chip->controller, struct qcom_nand_controller,
  501. controller);
  502. }
  503. static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
  504. {
  505. return ioread32(nandc->base + offset);
  506. }
  507. static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
  508. u32 val)
  509. {
  510. iowrite32(val, nandc->base + offset);
  511. }
  512. static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
  513. bool is_cpu)
  514. {
  515. if (!nandc->props->is_bam)
  516. return;
  517. if (is_cpu)
  518. dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
  519. MAX_REG_RD *
  520. sizeof(*nandc->reg_read_buf),
  521. DMA_FROM_DEVICE);
  522. else
  523. dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
  524. MAX_REG_RD *
  525. sizeof(*nandc->reg_read_buf),
  526. DMA_FROM_DEVICE);
  527. }
  528. static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
  529. {
  530. switch (offset) {
  531. case NAND_FLASH_CMD:
  532. return &regs->cmd;
  533. case NAND_ADDR0:
  534. return &regs->addr0;
  535. case NAND_ADDR1:
  536. return &regs->addr1;
  537. case NAND_FLASH_CHIP_SELECT:
  538. return &regs->chip_sel;
  539. case NAND_EXEC_CMD:
  540. return &regs->exec;
  541. case NAND_FLASH_STATUS:
  542. return &regs->clrflashstatus;
  543. case NAND_DEV0_CFG0:
  544. return &regs->cfg0;
  545. case NAND_DEV0_CFG1:
  546. return &regs->cfg1;
  547. case NAND_DEV0_ECC_CFG:
  548. return &regs->ecc_bch_cfg;
  549. case NAND_READ_STATUS:
  550. return &regs->clrreadstatus;
  551. case NAND_DEV_CMD1:
  552. return &regs->cmd1;
  553. case NAND_DEV_CMD1_RESTORE:
  554. return &regs->orig_cmd1;
  555. case NAND_DEV_CMD_VLD:
  556. return &regs->vld;
  557. case NAND_DEV_CMD_VLD_RESTORE:
  558. return &regs->orig_vld;
  559. case NAND_EBI2_ECC_BUF_CFG:
  560. return &regs->ecc_buf_cfg;
  561. case NAND_READ_LOCATION_0:
  562. return &regs->read_location0;
  563. case NAND_READ_LOCATION_1:
  564. return &regs->read_location1;
  565. case NAND_READ_LOCATION_2:
  566. return &regs->read_location2;
  567. case NAND_READ_LOCATION_3:
  568. return &regs->read_location3;
  569. default:
  570. return NULL;
  571. }
  572. }
  573. static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
  574. u32 val)
  575. {
  576. struct nandc_regs *regs = nandc->regs;
  577. __le32 *reg;
  578. reg = offset_to_nandc_reg(regs, offset);
  579. if (reg)
  580. *reg = cpu_to_le32(val);
  581. }
  582. /* helper to configure address register values */
  583. static void set_address(struct qcom_nand_host *host, u16 column, int page)
  584. {
  585. struct nand_chip *chip = &host->chip;
  586. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  587. if (chip->options & NAND_BUSWIDTH_16)
  588. column >>= 1;
  589. nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
  590. nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
  591. }
  592. /*
  593. * update_rw_regs: set up read/write register values, these will be
  594. * written to the NAND controller registers via DMA
  595. *
  596. * @num_cw: number of steps for the read/write operation
  597. * @read: read or write operation
  598. */
  599. static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
  600. {
  601. struct nand_chip *chip = &host->chip;
  602. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  603. u32 cmd, cfg0, cfg1, ecc_bch_cfg;
  604. if (read) {
  605. if (host->use_ecc)
  606. cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
  607. else
  608. cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
  609. } else {
  610. cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
  611. }
  612. if (host->use_ecc) {
  613. cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
  614. (num_cw - 1) << CW_PER_PAGE;
  615. cfg1 = host->cfg1;
  616. ecc_bch_cfg = host->ecc_bch_cfg;
  617. } else {
  618. cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
  619. (num_cw - 1) << CW_PER_PAGE;
  620. cfg1 = host->cfg1_raw;
  621. ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  622. }
  623. nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
  624. nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
  625. nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
  626. nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
  627. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
  628. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  629. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  630. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  631. if (read)
  632. nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
  633. host->cw_data : host->cw_size, 1);
  634. }
  635. /*
  636. * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
  637. * for BAM. This descriptor will be added in the NAND DMA descriptor queue
  638. * which will be submitted to DMA engine.
  639. */
  640. static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
  641. struct dma_chan *chan,
  642. unsigned long flags)
  643. {
  644. struct desc_info *desc;
  645. struct scatterlist *sgl;
  646. unsigned int sgl_cnt;
  647. int ret;
  648. struct bam_transaction *bam_txn = nandc->bam_txn;
  649. enum dma_transfer_direction dir_eng;
  650. struct dma_async_tx_descriptor *dma_desc;
  651. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  652. if (!desc)
  653. return -ENOMEM;
  654. if (chan == nandc->cmd_chan) {
  655. sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
  656. sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
  657. bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
  658. dir_eng = DMA_MEM_TO_DEV;
  659. desc->dir = DMA_TO_DEVICE;
  660. } else if (chan == nandc->tx_chan) {
  661. sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
  662. sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
  663. bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
  664. dir_eng = DMA_MEM_TO_DEV;
  665. desc->dir = DMA_TO_DEVICE;
  666. } else {
  667. sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
  668. sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
  669. bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
  670. dir_eng = DMA_DEV_TO_MEM;
  671. desc->dir = DMA_FROM_DEVICE;
  672. }
  673. sg_mark_end(sgl + sgl_cnt - 1);
  674. ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
  675. if (ret == 0) {
  676. dev_err(nandc->dev, "failure in mapping desc\n");
  677. kfree(desc);
  678. return -ENOMEM;
  679. }
  680. desc->sgl_cnt = sgl_cnt;
  681. desc->bam_sgl = sgl;
  682. dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
  683. flags);
  684. if (!dma_desc) {
  685. dev_err(nandc->dev, "failure in prep desc\n");
  686. dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
  687. kfree(desc);
  688. return -EINVAL;
  689. }
  690. desc->dma_desc = dma_desc;
  691. /* update last data/command descriptor */
  692. if (chan == nandc->cmd_chan)
  693. bam_txn->last_cmd_desc = dma_desc;
  694. else
  695. bam_txn->last_data_desc = dma_desc;
  696. list_add_tail(&desc->node, &nandc->desc_list);
  697. return 0;
  698. }
  699. /*
  700. * Prepares the command descriptor for BAM DMA which will be used for NAND
  701. * register reads and writes. The command descriptor requires the command
  702. * to be formed in command element type so this function uses the command
  703. * element from bam transaction ce array and fills the same with required
  704. * data. A single SGL can contain multiple command elements so
  705. * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
  706. * after the current command element.
  707. */
  708. static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
  709. int reg_off, const void *vaddr,
  710. int size, unsigned int flags)
  711. {
  712. int bam_ce_size;
  713. int i, ret;
  714. struct bam_cmd_element *bam_ce_buffer;
  715. struct bam_transaction *bam_txn = nandc->bam_txn;
  716. bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
  717. /* fill the command desc */
  718. for (i = 0; i < size; i++) {
  719. if (read)
  720. bam_prep_ce(&bam_ce_buffer[i],
  721. nandc_reg_phys(nandc, reg_off + 4 * i),
  722. BAM_READ_COMMAND,
  723. reg_buf_dma_addr(nandc,
  724. (__le32 *)vaddr + i));
  725. else
  726. bam_prep_ce_le32(&bam_ce_buffer[i],
  727. nandc_reg_phys(nandc, reg_off + 4 * i),
  728. BAM_WRITE_COMMAND,
  729. *((__le32 *)vaddr + i));
  730. }
  731. bam_txn->bam_ce_pos += size;
  732. /* use the separate sgl after this command */
  733. if (flags & NAND_BAM_NEXT_SGL) {
  734. bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
  735. bam_ce_size = (bam_txn->bam_ce_pos -
  736. bam_txn->bam_ce_start) *
  737. sizeof(struct bam_cmd_element);
  738. sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
  739. bam_ce_buffer, bam_ce_size);
  740. bam_txn->cmd_sgl_pos++;
  741. bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
  742. if (flags & NAND_BAM_NWD) {
  743. ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  744. DMA_PREP_FENCE |
  745. DMA_PREP_CMD);
  746. if (ret)
  747. return ret;
  748. }
  749. }
  750. return 0;
  751. }
  752. /*
  753. * Prepares the data descriptor for BAM DMA which will be used for NAND
  754. * data reads and writes.
  755. */
  756. static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
  757. const void *vaddr,
  758. int size, unsigned int flags)
  759. {
  760. int ret;
  761. struct bam_transaction *bam_txn = nandc->bam_txn;
  762. if (read) {
  763. sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
  764. vaddr, size);
  765. bam_txn->rx_sgl_pos++;
  766. } else {
  767. sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
  768. vaddr, size);
  769. bam_txn->tx_sgl_pos++;
  770. /*
  771. * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
  772. * is not set, form the DMA descriptor
  773. */
  774. if (!(flags & NAND_BAM_NO_EOT)) {
  775. ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
  776. DMA_PREP_INTERRUPT);
  777. if (ret)
  778. return ret;
  779. }
  780. }
  781. return 0;
  782. }
  783. static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
  784. int reg_off, const void *vaddr, int size,
  785. bool flow_control)
  786. {
  787. struct desc_info *desc;
  788. struct dma_async_tx_descriptor *dma_desc;
  789. struct scatterlist *sgl;
  790. struct dma_slave_config slave_conf;
  791. enum dma_transfer_direction dir_eng;
  792. int ret;
  793. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  794. if (!desc)
  795. return -ENOMEM;
  796. sgl = &desc->adm_sgl;
  797. sg_init_one(sgl, vaddr, size);
  798. if (read) {
  799. dir_eng = DMA_DEV_TO_MEM;
  800. desc->dir = DMA_FROM_DEVICE;
  801. } else {
  802. dir_eng = DMA_MEM_TO_DEV;
  803. desc->dir = DMA_TO_DEVICE;
  804. }
  805. ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
  806. if (ret == 0) {
  807. ret = -ENOMEM;
  808. goto err;
  809. }
  810. memset(&slave_conf, 0x00, sizeof(slave_conf));
  811. slave_conf.device_fc = flow_control;
  812. if (read) {
  813. slave_conf.src_maxburst = 16;
  814. slave_conf.src_addr = nandc->base_dma + reg_off;
  815. slave_conf.slave_id = nandc->data_crci;
  816. } else {
  817. slave_conf.dst_maxburst = 16;
  818. slave_conf.dst_addr = nandc->base_dma + reg_off;
  819. slave_conf.slave_id = nandc->cmd_crci;
  820. }
  821. ret = dmaengine_slave_config(nandc->chan, &slave_conf);
  822. if (ret) {
  823. dev_err(nandc->dev, "failed to configure dma channel\n");
  824. goto err;
  825. }
  826. dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
  827. if (!dma_desc) {
  828. dev_err(nandc->dev, "failed to prepare desc\n");
  829. ret = -EINVAL;
  830. goto err;
  831. }
  832. desc->dma_desc = dma_desc;
  833. list_add_tail(&desc->node, &nandc->desc_list);
  834. return 0;
  835. err:
  836. kfree(desc);
  837. return ret;
  838. }
  839. /*
  840. * read_reg_dma: prepares a descriptor to read a given number of
  841. * contiguous registers to the reg_read_buf pointer
  842. *
  843. * @first: offset of the first register in the contiguous block
  844. * @num_regs: number of registers to read
  845. * @flags: flags to control DMA descriptor preparation
  846. */
  847. static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
  848. int num_regs, unsigned int flags)
  849. {
  850. bool flow_control = false;
  851. void *vaddr;
  852. vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  853. nandc->reg_read_pos += num_regs;
  854. if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
  855. first = dev_cmd_reg_addr(nandc, first);
  856. if (nandc->props->is_bam)
  857. return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
  858. num_regs, flags);
  859. if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  860. flow_control = true;
  861. return prep_adm_dma_desc(nandc, true, first, vaddr,
  862. num_regs * sizeof(u32), flow_control);
  863. }
  864. /*
  865. * write_reg_dma: prepares a descriptor to write a given number of
  866. * contiguous registers
  867. *
  868. * @first: offset of the first register in the contiguous block
  869. * @num_regs: number of registers to write
  870. * @flags: flags to control DMA descriptor preparation
  871. */
  872. static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
  873. int num_regs, unsigned int flags)
  874. {
  875. bool flow_control = false;
  876. struct nandc_regs *regs = nandc->regs;
  877. void *vaddr;
  878. vaddr = offset_to_nandc_reg(regs, first);
  879. if (first == NAND_ERASED_CW_DETECT_CFG) {
  880. if (flags & NAND_ERASED_CW_SET)
  881. vaddr = &regs->erased_cw_detect_cfg_set;
  882. else
  883. vaddr = &regs->erased_cw_detect_cfg_clr;
  884. }
  885. if (first == NAND_EXEC_CMD)
  886. flags |= NAND_BAM_NWD;
  887. if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
  888. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
  889. if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
  890. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
  891. if (nandc->props->is_bam)
  892. return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
  893. num_regs, flags);
  894. if (first == NAND_FLASH_CMD)
  895. flow_control = true;
  896. return prep_adm_dma_desc(nandc, false, first, vaddr,
  897. num_regs * sizeof(u32), flow_control);
  898. }
  899. /*
  900. * read_data_dma: prepares a DMA descriptor to transfer data from the
  901. * controller's internal buffer to the buffer 'vaddr'
  902. *
  903. * @reg_off: offset within the controller's data buffer
  904. * @vaddr: virtual address of the buffer we want to write to
  905. * @size: DMA transaction size in bytes
  906. * @flags: flags to control DMA descriptor preparation
  907. */
  908. static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  909. const u8 *vaddr, int size, unsigned int flags)
  910. {
  911. if (nandc->props->is_bam)
  912. return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
  913. return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
  914. }
  915. /*
  916. * write_data_dma: prepares a DMA descriptor to transfer data from
  917. * 'vaddr' to the controller's internal buffer
  918. *
  919. * @reg_off: offset within the controller's data buffer
  920. * @vaddr: virtual address of the buffer we want to read from
  921. * @size: DMA transaction size in bytes
  922. * @flags: flags to control DMA descriptor preparation
  923. */
  924. static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  925. const u8 *vaddr, int size, unsigned int flags)
  926. {
  927. if (nandc->props->is_bam)
  928. return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
  929. return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
  930. }
  931. /*
  932. * Helper to prepare DMA descriptors for configuring registers
  933. * before reading a NAND page.
  934. */
  935. static void config_nand_page_read(struct qcom_nand_controller *nandc)
  936. {
  937. write_reg_dma(nandc, NAND_ADDR0, 2, 0);
  938. write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
  939. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
  940. write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
  941. write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
  942. NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
  943. }
  944. /*
  945. * Helper to prepare DMA descriptors for configuring registers
  946. * before reading each codeword in NAND page.
  947. */
  948. static void
  949. config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
  950. {
  951. if (nandc->props->is_bam)
  952. write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
  953. NAND_BAM_NEXT_SGL);
  954. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  955. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  956. if (use_ecc) {
  957. read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
  958. read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
  959. NAND_BAM_NEXT_SGL);
  960. } else {
  961. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  962. }
  963. }
  964. /*
  965. * Helper to prepare dma descriptors to configure registers needed for reading a
  966. * single codeword in page
  967. */
  968. static void
  969. config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
  970. bool use_ecc)
  971. {
  972. config_nand_page_read(nandc);
  973. config_nand_cw_read(nandc, use_ecc);
  974. }
  975. /*
  976. * Helper to prepare DMA descriptors used to configure registers needed for
  977. * before writing a NAND page.
  978. */
  979. static void config_nand_page_write(struct qcom_nand_controller *nandc)
  980. {
  981. write_reg_dma(nandc, NAND_ADDR0, 2, 0);
  982. write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
  983. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
  984. NAND_BAM_NEXT_SGL);
  985. }
  986. /*
  987. * Helper to prepare DMA descriptors for configuring registers
  988. * before writing each codeword in NAND page.
  989. */
  990. static void config_nand_cw_write(struct qcom_nand_controller *nandc)
  991. {
  992. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  993. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  994. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  995. write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
  996. write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
  997. }
  998. /*
  999. * the following functions are used within chip->cmdfunc() to perform different
  1000. * NAND_CMD_* commands
  1001. */
  1002. /* sets up descriptors for NAND_CMD_PARAM */
  1003. static int nandc_param(struct qcom_nand_host *host)
  1004. {
  1005. struct nand_chip *chip = &host->chip;
  1006. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1007. /*
  1008. * NAND_CMD_PARAM is called before we know much about the FLASH chip
  1009. * in use. we configure the controller to perform a raw read of 512
  1010. * bytes to read onfi params
  1011. */
  1012. nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
  1013. nandc_set_reg(nandc, NAND_ADDR0, 0);
  1014. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1015. nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
  1016. | 512 << UD_SIZE_BYTES
  1017. | 5 << NUM_ADDR_CYCLES
  1018. | 0 << SPARE_SIZE_BYTES);
  1019. nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
  1020. | 0 << CS_ACTIVE_BSY
  1021. | 17 << BAD_BLOCK_BYTE_NUM
  1022. | 1 << BAD_BLOCK_IN_SPARE_AREA
  1023. | 2 << WR_RD_BSY_GAP
  1024. | 0 << WIDE_FLASH
  1025. | 1 << DEV0_CFG1_ECC_DISABLE);
  1026. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
  1027. /* configure CMD1 and VLD for ONFI param probing */
  1028. nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
  1029. (nandc->vld & ~READ_START_VLD));
  1030. nandc_set_reg(nandc, NAND_DEV_CMD1,
  1031. (nandc->cmd1 & ~(0xFF << READ_ADDR))
  1032. | NAND_CMD_PARAM << READ_ADDR);
  1033. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1034. nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
  1035. nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
  1036. nandc_set_read_loc(nandc, 0, 0, 512, 1);
  1037. write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
  1038. write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
  1039. nandc->buf_count = 512;
  1040. memset(nandc->data_buffer, 0xff, nandc->buf_count);
  1041. config_nand_single_cw_page_read(nandc, false);
  1042. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  1043. nandc->buf_count, 0);
  1044. /* restore CMD1 and VLD regs */
  1045. write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
  1046. write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
  1047. return 0;
  1048. }
  1049. /* sets up descriptors for NAND_CMD_ERASE1 */
  1050. static int erase_block(struct qcom_nand_host *host, int page_addr)
  1051. {
  1052. struct nand_chip *chip = &host->chip;
  1053. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1054. nandc_set_reg(nandc, NAND_FLASH_CMD,
  1055. OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
  1056. nandc_set_reg(nandc, NAND_ADDR0, page_addr);
  1057. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1058. nandc_set_reg(nandc, NAND_DEV0_CFG0,
  1059. host->cfg0_raw & ~(7 << CW_PER_PAGE));
  1060. nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
  1061. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1062. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  1063. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  1064. write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
  1065. write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
  1066. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1067. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  1068. write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
  1069. write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
  1070. return 0;
  1071. }
  1072. /* sets up descriptors for NAND_CMD_READID */
  1073. static int read_id(struct qcom_nand_host *host, int column)
  1074. {
  1075. struct nand_chip *chip = &host->chip;
  1076. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1077. if (column == -1)
  1078. return 0;
  1079. nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
  1080. nandc_set_reg(nandc, NAND_ADDR0, column);
  1081. nandc_set_reg(nandc, NAND_ADDR1, 0);
  1082. nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
  1083. nandc->props->is_bam ? 0 : DM_EN);
  1084. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1085. write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
  1086. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1087. read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
  1088. return 0;
  1089. }
  1090. /* sets up descriptors for NAND_CMD_RESET */
  1091. static int reset(struct qcom_nand_host *host)
  1092. {
  1093. struct nand_chip *chip = &host->chip;
  1094. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1095. nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
  1096. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  1097. write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
  1098. write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
  1099. read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
  1100. return 0;
  1101. }
  1102. /* helpers to submit/free our list of dma descriptors */
  1103. static int submit_descs(struct qcom_nand_controller *nandc)
  1104. {
  1105. struct desc_info *desc;
  1106. dma_cookie_t cookie = 0;
  1107. struct bam_transaction *bam_txn = nandc->bam_txn;
  1108. int r;
  1109. if (nandc->props->is_bam) {
  1110. if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
  1111. r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
  1112. if (r)
  1113. return r;
  1114. }
  1115. if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
  1116. r = prepare_bam_async_desc(nandc, nandc->tx_chan,
  1117. DMA_PREP_INTERRUPT);
  1118. if (r)
  1119. return r;
  1120. }
  1121. if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
  1122. r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  1123. DMA_PREP_CMD);
  1124. if (r)
  1125. return r;
  1126. }
  1127. }
  1128. list_for_each_entry(desc, &nandc->desc_list, node)
  1129. cookie = dmaengine_submit(desc->dma_desc);
  1130. if (nandc->props->is_bam) {
  1131. bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
  1132. bam_txn->last_cmd_desc->callback_param = bam_txn;
  1133. if (bam_txn->last_data_desc) {
  1134. bam_txn->last_data_desc->callback = qpic_bam_dma_done;
  1135. bam_txn->last_data_desc->callback_param = bam_txn;
  1136. bam_txn->wait_second_completion = true;
  1137. }
  1138. dma_async_issue_pending(nandc->tx_chan);
  1139. dma_async_issue_pending(nandc->rx_chan);
  1140. dma_async_issue_pending(nandc->cmd_chan);
  1141. if (!wait_for_completion_timeout(&bam_txn->txn_done,
  1142. QPIC_NAND_COMPLETION_TIMEOUT))
  1143. return -ETIMEDOUT;
  1144. } else {
  1145. if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
  1146. return -ETIMEDOUT;
  1147. }
  1148. return 0;
  1149. }
  1150. static void free_descs(struct qcom_nand_controller *nandc)
  1151. {
  1152. struct desc_info *desc, *n;
  1153. list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
  1154. list_del(&desc->node);
  1155. if (nandc->props->is_bam)
  1156. dma_unmap_sg(nandc->dev, desc->bam_sgl,
  1157. desc->sgl_cnt, desc->dir);
  1158. else
  1159. dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
  1160. desc->dir);
  1161. kfree(desc);
  1162. }
  1163. }
  1164. /* reset the register read buffer for next NAND operation */
  1165. static void clear_read_regs(struct qcom_nand_controller *nandc)
  1166. {
  1167. nandc->reg_read_pos = 0;
  1168. nandc_read_buffer_sync(nandc, false);
  1169. }
  1170. static void pre_command(struct qcom_nand_host *host, int command)
  1171. {
  1172. struct nand_chip *chip = &host->chip;
  1173. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1174. nandc->buf_count = 0;
  1175. nandc->buf_start = 0;
  1176. host->use_ecc = false;
  1177. host->last_command = command;
  1178. clear_read_regs(nandc);
  1179. if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
  1180. command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
  1181. clear_bam_transaction(nandc);
  1182. }
  1183. /*
  1184. * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
  1185. * privately maintained status byte, this status byte can be read after
  1186. * NAND_CMD_STATUS is called
  1187. */
  1188. static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
  1189. {
  1190. struct nand_chip *chip = &host->chip;
  1191. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1192. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1193. int num_cw;
  1194. int i;
  1195. num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
  1196. nandc_read_buffer_sync(nandc, true);
  1197. for (i = 0; i < num_cw; i++) {
  1198. u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
  1199. if (flash_status & FS_MPU_ERR)
  1200. host->status &= ~NAND_STATUS_WP;
  1201. if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
  1202. (flash_status &
  1203. FS_DEVICE_STS_ERR)))
  1204. host->status |= NAND_STATUS_FAIL;
  1205. }
  1206. }
  1207. static void post_command(struct qcom_nand_host *host, int command)
  1208. {
  1209. struct nand_chip *chip = &host->chip;
  1210. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1211. switch (command) {
  1212. case NAND_CMD_READID:
  1213. nandc_read_buffer_sync(nandc, true);
  1214. memcpy(nandc->data_buffer, nandc->reg_read_buf,
  1215. nandc->buf_count);
  1216. break;
  1217. case NAND_CMD_PAGEPROG:
  1218. case NAND_CMD_ERASE1:
  1219. parse_erase_write_errors(host, command);
  1220. break;
  1221. default:
  1222. break;
  1223. }
  1224. }
  1225. /*
  1226. * Implements chip->cmdfunc. It's only used for a limited set of commands.
  1227. * The rest of the commands wouldn't be called by upper layers. For example,
  1228. * NAND_CMD_READOOB would never be called because we have our own versions
  1229. * of read_oob ops for nand_ecc_ctrl.
  1230. */
  1231. static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
  1232. int column, int page_addr)
  1233. {
  1234. struct nand_chip *chip = mtd_to_nand(mtd);
  1235. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1236. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1237. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1238. bool wait = false;
  1239. int ret = 0;
  1240. pre_command(host, command);
  1241. switch (command) {
  1242. case NAND_CMD_RESET:
  1243. ret = reset(host);
  1244. wait = true;
  1245. break;
  1246. case NAND_CMD_READID:
  1247. nandc->buf_count = 4;
  1248. ret = read_id(host, column);
  1249. wait = true;
  1250. break;
  1251. case NAND_CMD_PARAM:
  1252. ret = nandc_param(host);
  1253. wait = true;
  1254. break;
  1255. case NAND_CMD_ERASE1:
  1256. ret = erase_block(host, page_addr);
  1257. wait = true;
  1258. break;
  1259. case NAND_CMD_READ0:
  1260. /* we read the entire page for now */
  1261. WARN_ON(column != 0);
  1262. host->use_ecc = true;
  1263. set_address(host, 0, page_addr);
  1264. update_rw_regs(host, ecc->steps, true);
  1265. break;
  1266. case NAND_CMD_SEQIN:
  1267. WARN_ON(column != 0);
  1268. set_address(host, 0, page_addr);
  1269. break;
  1270. case NAND_CMD_PAGEPROG:
  1271. case NAND_CMD_STATUS:
  1272. case NAND_CMD_NONE:
  1273. default:
  1274. break;
  1275. }
  1276. if (ret) {
  1277. dev_err(nandc->dev, "failure executing command %d\n",
  1278. command);
  1279. free_descs(nandc);
  1280. return;
  1281. }
  1282. if (wait) {
  1283. ret = submit_descs(nandc);
  1284. if (ret)
  1285. dev_err(nandc->dev,
  1286. "failure submitting descs for command %d\n",
  1287. command);
  1288. }
  1289. free_descs(nandc);
  1290. post_command(host, command);
  1291. }
  1292. /*
  1293. * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
  1294. * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
  1295. *
  1296. * when using RS ECC, the HW reports the same erros when reading an erased CW,
  1297. * but it notifies that it is an erased CW by placing special characters at
  1298. * certain offsets in the buffer.
  1299. *
  1300. * verify if the page is erased or not, and fix up the page for RS ECC by
  1301. * replacing the special characters with 0xff.
  1302. */
  1303. static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
  1304. {
  1305. u8 empty1, empty2;
  1306. /*
  1307. * an erased page flags an error in NAND_FLASH_STATUS, check if the page
  1308. * is erased by looking for 0x54s at offsets 3 and 175 from the
  1309. * beginning of each codeword
  1310. */
  1311. empty1 = data_buf[3];
  1312. empty2 = data_buf[175];
  1313. /*
  1314. * if the erased codework markers, if they exist override them with
  1315. * 0xffs
  1316. */
  1317. if ((empty1 == 0x54 && empty2 == 0xff) ||
  1318. (empty1 == 0xff && empty2 == 0x54)) {
  1319. data_buf[3] = 0xff;
  1320. data_buf[175] = 0xff;
  1321. }
  1322. /*
  1323. * check if the entire chunk contains 0xffs or not. if it doesn't, then
  1324. * restore the original values at the special offsets
  1325. */
  1326. if (memchr_inv(data_buf, 0xff, data_len)) {
  1327. data_buf[3] = empty1;
  1328. data_buf[175] = empty2;
  1329. return false;
  1330. }
  1331. return true;
  1332. }
  1333. struct read_stats {
  1334. __le32 flash;
  1335. __le32 buffer;
  1336. __le32 erased_cw;
  1337. };
  1338. /* reads back FLASH_STATUS register set by the controller */
  1339. static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
  1340. {
  1341. struct nand_chip *chip = &host->chip;
  1342. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1343. int i;
  1344. nandc_read_buffer_sync(nandc, true);
  1345. for (i = 0; i < cw_cnt; i++) {
  1346. u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
  1347. if (flash & (FS_OP_ERR | FS_MPU_ERR))
  1348. return -EIO;
  1349. }
  1350. return 0;
  1351. }
  1352. /* performs raw read for one codeword */
  1353. static int
  1354. qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1355. u8 *data_buf, u8 *oob_buf, int page, int cw)
  1356. {
  1357. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1358. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1359. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1360. int data_size1, data_size2, oob_size1, oob_size2;
  1361. int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
  1362. nand_read_page_op(chip, page, 0, NULL, 0);
  1363. host->use_ecc = false;
  1364. clear_bam_transaction(nandc);
  1365. set_address(host, host->cw_size * cw, page);
  1366. update_rw_regs(host, 1, true);
  1367. config_nand_page_read(nandc);
  1368. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1369. oob_size1 = host->bbm_size;
  1370. if (cw == (ecc->steps - 1)) {
  1371. data_size2 = ecc->size - data_size1 -
  1372. ((ecc->steps - 1) * 4);
  1373. oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
  1374. host->spare_bytes;
  1375. } else {
  1376. data_size2 = host->cw_data - data_size1;
  1377. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1378. }
  1379. if (nandc->props->is_bam) {
  1380. nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
  1381. read_loc += data_size1;
  1382. nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
  1383. read_loc += oob_size1;
  1384. nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
  1385. read_loc += data_size2;
  1386. nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
  1387. }
  1388. config_nand_cw_read(nandc, false);
  1389. read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
  1390. reg_off += data_size1;
  1391. read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
  1392. reg_off += oob_size1;
  1393. read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
  1394. reg_off += data_size2;
  1395. read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
  1396. ret = submit_descs(nandc);
  1397. free_descs(nandc);
  1398. if (ret) {
  1399. dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
  1400. return ret;
  1401. }
  1402. return check_flash_errors(host, 1);
  1403. }
  1404. /*
  1405. * Bitflips can happen in erased codewords also so this function counts the
  1406. * number of 0 in each CW for which ECC engine returns the uncorrectable
  1407. * error. The page will be assumed as erased if this count is less than or
  1408. * equal to the ecc->strength for each CW.
  1409. *
  1410. * 1. Both DATA and OOB need to be checked for number of 0. The
  1411. * top-level API can be called with only data buf or OOB buf so use
  1412. * chip->data_buf if data buf is null and chip->oob_poi if oob buf
  1413. * is null for copying the raw bytes.
  1414. * 2. Perform raw read for all the CW which has uncorrectable errors.
  1415. * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
  1416. * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
  1417. * the number of bitflips in this area.
  1418. */
  1419. static int
  1420. check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
  1421. u8 *oob_buf, unsigned long uncorrectable_cws,
  1422. int page, unsigned int max_bitflips)
  1423. {
  1424. struct nand_chip *chip = &host->chip;
  1425. struct mtd_info *mtd = nand_to_mtd(chip);
  1426. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1427. u8 *cw_data_buf, *cw_oob_buf;
  1428. int cw, data_size, oob_size, ret = 0;
  1429. if (!data_buf) {
  1430. data_buf = chip->data_buf;
  1431. chip->pagebuf = -1;
  1432. }
  1433. if (!oob_buf) {
  1434. oob_buf = chip->oob_poi;
  1435. chip->pagebuf = -1;
  1436. }
  1437. for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
  1438. if (cw == (ecc->steps - 1)) {
  1439. data_size = ecc->size - ((ecc->steps - 1) * 4);
  1440. oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
  1441. } else {
  1442. data_size = host->cw_data;
  1443. oob_size = host->ecc_bytes_hw;
  1444. }
  1445. /* determine starting buffer address for current CW */
  1446. cw_data_buf = data_buf + (cw * host->cw_data);
  1447. cw_oob_buf = oob_buf + (cw * ecc->bytes);
  1448. ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
  1449. cw_oob_buf, page, cw);
  1450. if (ret)
  1451. return ret;
  1452. /*
  1453. * make sure it isn't an erased page reported
  1454. * as not-erased by HW because of a few bitflips
  1455. */
  1456. ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
  1457. cw_oob_buf + host->bbm_size,
  1458. oob_size, NULL,
  1459. 0, ecc->strength);
  1460. if (ret < 0) {
  1461. mtd->ecc_stats.failed++;
  1462. } else {
  1463. mtd->ecc_stats.corrected += ret;
  1464. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  1465. }
  1466. }
  1467. return max_bitflips;
  1468. }
  1469. /*
  1470. * reads back status registers set by the controller to notify page read
  1471. * errors. this is equivalent to what 'ecc->correct()' would do.
  1472. */
  1473. static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
  1474. u8 *oob_buf, int page)
  1475. {
  1476. struct nand_chip *chip = &host->chip;
  1477. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1478. struct mtd_info *mtd = nand_to_mtd(chip);
  1479. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1480. unsigned int max_bitflips = 0, uncorrectable_cws = 0;
  1481. struct read_stats *buf;
  1482. bool flash_op_err = false, erased;
  1483. int i;
  1484. u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
  1485. buf = (struct read_stats *)nandc->reg_read_buf;
  1486. nandc_read_buffer_sync(nandc, true);
  1487. for (i = 0; i < ecc->steps; i++, buf++) {
  1488. u32 flash, buffer, erased_cw;
  1489. int data_len, oob_len;
  1490. if (i == (ecc->steps - 1)) {
  1491. data_len = ecc->size - ((ecc->steps - 1) << 2);
  1492. oob_len = ecc->steps << 2;
  1493. } else {
  1494. data_len = host->cw_data;
  1495. oob_len = 0;
  1496. }
  1497. flash = le32_to_cpu(buf->flash);
  1498. buffer = le32_to_cpu(buf->buffer);
  1499. erased_cw = le32_to_cpu(buf->erased_cw);
  1500. /*
  1501. * Check ECC failure for each codeword. ECC failure can
  1502. * happen in either of the following conditions
  1503. * 1. If number of bitflips are greater than ECC engine
  1504. * capability.
  1505. * 2. If this codeword contains all 0xff for which erased
  1506. * codeword detection check will be done.
  1507. */
  1508. if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
  1509. /*
  1510. * For BCH ECC, ignore erased codeword errors, if
  1511. * ERASED_CW bits are set.
  1512. */
  1513. if (host->bch_enabled) {
  1514. erased = (erased_cw & ERASED_CW) == ERASED_CW ?
  1515. true : false;
  1516. /*
  1517. * For RS ECC, HW reports the erased CW by placing
  1518. * special characters at certain offsets in the buffer.
  1519. * These special characters will be valid only if
  1520. * complete page is read i.e. data_buf is not NULL.
  1521. */
  1522. } else if (data_buf) {
  1523. erased = erased_chunk_check_and_fixup(data_buf,
  1524. data_len);
  1525. } else {
  1526. erased = false;
  1527. }
  1528. if (!erased)
  1529. uncorrectable_cws |= BIT(i);
  1530. /*
  1531. * Check if MPU or any other operational error (timeout,
  1532. * device failure, etc.) happened for this codeword and
  1533. * make flash_op_err true. If flash_op_err is set, then
  1534. * EIO will be returned for page read.
  1535. */
  1536. } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
  1537. flash_op_err = true;
  1538. /*
  1539. * No ECC or operational errors happened. Check the number of
  1540. * bits corrected and update the ecc_stats.corrected.
  1541. */
  1542. } else {
  1543. unsigned int stat;
  1544. stat = buffer & BS_CORRECTABLE_ERR_MSK;
  1545. mtd->ecc_stats.corrected += stat;
  1546. max_bitflips = max(max_bitflips, stat);
  1547. }
  1548. if (data_buf)
  1549. data_buf += data_len;
  1550. if (oob_buf)
  1551. oob_buf += oob_len + ecc->bytes;
  1552. }
  1553. if (flash_op_err)
  1554. return -EIO;
  1555. if (!uncorrectable_cws)
  1556. return max_bitflips;
  1557. return check_for_erased_page(host, data_buf_start, oob_buf_start,
  1558. uncorrectable_cws, page,
  1559. max_bitflips);
  1560. }
  1561. /*
  1562. * helper to perform the actual page read operation, used by ecc->read_page(),
  1563. * ecc->read_oob()
  1564. */
  1565. static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
  1566. u8 *oob_buf, int page)
  1567. {
  1568. struct nand_chip *chip = &host->chip;
  1569. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1570. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1571. u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
  1572. int i, ret;
  1573. config_nand_page_read(nandc);
  1574. /* queue cmd descs for each codeword */
  1575. for (i = 0; i < ecc->steps; i++) {
  1576. int data_size, oob_size;
  1577. if (i == (ecc->steps - 1)) {
  1578. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1579. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1580. host->spare_bytes;
  1581. } else {
  1582. data_size = host->cw_data;
  1583. oob_size = host->ecc_bytes_hw + host->spare_bytes;
  1584. }
  1585. if (nandc->props->is_bam) {
  1586. if (data_buf && oob_buf) {
  1587. nandc_set_read_loc(nandc, 0, 0, data_size, 0);
  1588. nandc_set_read_loc(nandc, 1, data_size,
  1589. oob_size, 1);
  1590. } else if (data_buf) {
  1591. nandc_set_read_loc(nandc, 0, 0, data_size, 1);
  1592. } else {
  1593. nandc_set_read_loc(nandc, 0, data_size,
  1594. oob_size, 1);
  1595. }
  1596. }
  1597. config_nand_cw_read(nandc, true);
  1598. if (data_buf)
  1599. read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
  1600. data_size, 0);
  1601. /*
  1602. * when ecc is enabled, the controller doesn't read the real
  1603. * or dummy bad block markers in each chunk. To maintain a
  1604. * consistent layout across RAW and ECC reads, we just
  1605. * leave the real/dummy BBM offsets empty (i.e, filled with
  1606. * 0xffs)
  1607. */
  1608. if (oob_buf) {
  1609. int j;
  1610. for (j = 0; j < host->bbm_size; j++)
  1611. *oob_buf++ = 0xff;
  1612. read_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1613. oob_buf, oob_size, 0);
  1614. }
  1615. if (data_buf)
  1616. data_buf += data_size;
  1617. if (oob_buf)
  1618. oob_buf += oob_size;
  1619. }
  1620. ret = submit_descs(nandc);
  1621. free_descs(nandc);
  1622. if (ret) {
  1623. dev_err(nandc->dev, "failure to read page/oob\n");
  1624. return ret;
  1625. }
  1626. return parse_read_errors(host, data_buf_start, oob_buf_start, page);
  1627. }
  1628. /*
  1629. * a helper that copies the last step/codeword of a page (containing free oob)
  1630. * into our local buffer
  1631. */
  1632. static int copy_last_cw(struct qcom_nand_host *host, int page)
  1633. {
  1634. struct nand_chip *chip = &host->chip;
  1635. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1636. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1637. int size;
  1638. int ret;
  1639. clear_read_regs(nandc);
  1640. size = host->use_ecc ? host->cw_data : host->cw_size;
  1641. /* prepare a clean read buffer */
  1642. memset(nandc->data_buffer, 0xff, size);
  1643. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1644. update_rw_regs(host, 1, true);
  1645. config_nand_single_cw_page_read(nandc, host->use_ecc);
  1646. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
  1647. ret = submit_descs(nandc);
  1648. if (ret)
  1649. dev_err(nandc->dev, "failed to copy last codeword\n");
  1650. free_descs(nandc);
  1651. return ret;
  1652. }
  1653. /* implements ecc->read_page() */
  1654. static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  1655. uint8_t *buf, int oob_required, int page)
  1656. {
  1657. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1658. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1659. u8 *data_buf, *oob_buf = NULL;
  1660. nand_read_page_op(chip, page, 0, NULL, 0);
  1661. data_buf = buf;
  1662. oob_buf = oob_required ? chip->oob_poi : NULL;
  1663. clear_bam_transaction(nandc);
  1664. return read_page_ecc(host, data_buf, oob_buf, page);
  1665. }
  1666. /* implements ecc->read_page_raw() */
  1667. static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
  1668. struct nand_chip *chip, uint8_t *buf,
  1669. int oob_required, int page)
  1670. {
  1671. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1672. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1673. int cw, ret;
  1674. u8 *data_buf = buf, *oob_buf = chip->oob_poi;
  1675. for (cw = 0; cw < ecc->steps; cw++) {
  1676. ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
  1677. page, cw);
  1678. if (ret)
  1679. return ret;
  1680. data_buf += host->cw_data;
  1681. oob_buf += ecc->bytes;
  1682. }
  1683. return 0;
  1684. }
  1685. /* implements ecc->read_oob() */
  1686. static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1687. int page)
  1688. {
  1689. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1690. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1691. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1692. clear_read_regs(nandc);
  1693. clear_bam_transaction(nandc);
  1694. host->use_ecc = true;
  1695. set_address(host, 0, page);
  1696. update_rw_regs(host, ecc->steps, true);
  1697. return read_page_ecc(host, NULL, chip->oob_poi, page);
  1698. }
  1699. /* implements ecc->write_page() */
  1700. static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1701. const uint8_t *buf, int oob_required, int page)
  1702. {
  1703. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1704. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1705. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1706. u8 *data_buf, *oob_buf;
  1707. int i, ret;
  1708. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  1709. clear_read_regs(nandc);
  1710. clear_bam_transaction(nandc);
  1711. data_buf = (u8 *)buf;
  1712. oob_buf = chip->oob_poi;
  1713. host->use_ecc = true;
  1714. update_rw_regs(host, ecc->steps, false);
  1715. config_nand_page_write(nandc);
  1716. for (i = 0; i < ecc->steps; i++) {
  1717. int data_size, oob_size;
  1718. if (i == (ecc->steps - 1)) {
  1719. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1720. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1721. host->spare_bytes;
  1722. } else {
  1723. data_size = host->cw_data;
  1724. oob_size = ecc->bytes;
  1725. }
  1726. write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
  1727. i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
  1728. /*
  1729. * when ECC is enabled, we don't really need to write anything
  1730. * to oob for the first n - 1 codewords since these oob regions
  1731. * just contain ECC bytes that's written by the controller
  1732. * itself. For the last codeword, we skip the bbm positions and
  1733. * write to the free oob area.
  1734. */
  1735. if (i == (ecc->steps - 1)) {
  1736. oob_buf += host->bbm_size;
  1737. write_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1738. oob_buf, oob_size, 0);
  1739. }
  1740. config_nand_cw_write(nandc);
  1741. data_buf += data_size;
  1742. oob_buf += oob_size;
  1743. }
  1744. ret = submit_descs(nandc);
  1745. if (ret)
  1746. dev_err(nandc->dev, "failure to write page\n");
  1747. free_descs(nandc);
  1748. if (!ret)
  1749. ret = nand_prog_page_end_op(chip);
  1750. return ret;
  1751. }
  1752. /* implements ecc->write_page_raw() */
  1753. static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
  1754. struct nand_chip *chip, const uint8_t *buf,
  1755. int oob_required, int page)
  1756. {
  1757. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1758. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1759. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1760. u8 *data_buf, *oob_buf;
  1761. int i, ret;
  1762. nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  1763. clear_read_regs(nandc);
  1764. clear_bam_transaction(nandc);
  1765. data_buf = (u8 *)buf;
  1766. oob_buf = chip->oob_poi;
  1767. host->use_ecc = false;
  1768. update_rw_regs(host, ecc->steps, false);
  1769. config_nand_page_write(nandc);
  1770. for (i = 0; i < ecc->steps; i++) {
  1771. int data_size1, data_size2, oob_size1, oob_size2;
  1772. int reg_off = FLASH_BUF_ACC;
  1773. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1774. oob_size1 = host->bbm_size;
  1775. if (i == (ecc->steps - 1)) {
  1776. data_size2 = ecc->size - data_size1 -
  1777. ((ecc->steps - 1) << 2);
  1778. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1779. host->spare_bytes;
  1780. } else {
  1781. data_size2 = host->cw_data - data_size1;
  1782. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1783. }
  1784. write_data_dma(nandc, reg_off, data_buf, data_size1,
  1785. NAND_BAM_NO_EOT);
  1786. reg_off += data_size1;
  1787. data_buf += data_size1;
  1788. write_data_dma(nandc, reg_off, oob_buf, oob_size1,
  1789. NAND_BAM_NO_EOT);
  1790. reg_off += oob_size1;
  1791. oob_buf += oob_size1;
  1792. write_data_dma(nandc, reg_off, data_buf, data_size2,
  1793. NAND_BAM_NO_EOT);
  1794. reg_off += data_size2;
  1795. data_buf += data_size2;
  1796. write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
  1797. oob_buf += oob_size2;
  1798. config_nand_cw_write(nandc);
  1799. }
  1800. ret = submit_descs(nandc);
  1801. if (ret)
  1802. dev_err(nandc->dev, "failure to write raw page\n");
  1803. free_descs(nandc);
  1804. if (!ret)
  1805. ret = nand_prog_page_end_op(chip);
  1806. return ret;
  1807. }
  1808. /*
  1809. * implements ecc->write_oob()
  1810. *
  1811. * the NAND controller cannot write only data or only OOB within a codeword
  1812. * since ECC is calculated for the combined codeword. So update the OOB from
  1813. * chip->oob_poi, and pad the data area with OxFF before writing.
  1814. */
  1815. static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1816. int page)
  1817. {
  1818. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1819. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1820. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1821. u8 *oob = chip->oob_poi;
  1822. int data_size, oob_size;
  1823. int ret;
  1824. host->use_ecc = true;
  1825. clear_bam_transaction(nandc);
  1826. /* calculate the data and oob size for the last codeword/step */
  1827. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1828. oob_size = mtd->oobavail;
  1829. memset(nandc->data_buffer, 0xff, host->cw_data);
  1830. /* override new oob content to last codeword */
  1831. mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
  1832. 0, mtd->oobavail);
  1833. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1834. update_rw_regs(host, 1, false);
  1835. config_nand_page_write(nandc);
  1836. write_data_dma(nandc, FLASH_BUF_ACC,
  1837. nandc->data_buffer, data_size + oob_size, 0);
  1838. config_nand_cw_write(nandc);
  1839. ret = submit_descs(nandc);
  1840. free_descs(nandc);
  1841. if (ret) {
  1842. dev_err(nandc->dev, "failure to write oob\n");
  1843. return -EIO;
  1844. }
  1845. return nand_prog_page_end_op(chip);
  1846. }
  1847. static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
  1848. {
  1849. struct nand_chip *chip = mtd_to_nand(mtd);
  1850. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1851. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1852. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1853. int page, ret, bbpos, bad = 0;
  1854. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1855. /*
  1856. * configure registers for a raw sub page read, the address is set to
  1857. * the beginning of the last codeword, we don't care about reading ecc
  1858. * portion of oob. we just want the first few bytes from this codeword
  1859. * that contains the BBM
  1860. */
  1861. host->use_ecc = false;
  1862. clear_bam_transaction(nandc);
  1863. ret = copy_last_cw(host, page);
  1864. if (ret)
  1865. goto err;
  1866. if (check_flash_errors(host, 1)) {
  1867. dev_warn(nandc->dev, "error when trying to read BBM\n");
  1868. goto err;
  1869. }
  1870. bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1871. bad = nandc->data_buffer[bbpos] != 0xff;
  1872. if (chip->options & NAND_BUSWIDTH_16)
  1873. bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
  1874. err:
  1875. return bad;
  1876. }
  1877. static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1878. {
  1879. struct nand_chip *chip = mtd_to_nand(mtd);
  1880. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1881. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1882. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1883. int page, ret;
  1884. clear_read_regs(nandc);
  1885. clear_bam_transaction(nandc);
  1886. /*
  1887. * to mark the BBM as bad, we flash the entire last codeword with 0s.
  1888. * we don't care about the rest of the content in the codeword since
  1889. * we aren't going to use this block again
  1890. */
  1891. memset(nandc->data_buffer, 0x00, host->cw_size);
  1892. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1893. /* prepare write */
  1894. host->use_ecc = false;
  1895. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1896. update_rw_regs(host, 1, false);
  1897. config_nand_page_write(nandc);
  1898. write_data_dma(nandc, FLASH_BUF_ACC,
  1899. nandc->data_buffer, host->cw_size, 0);
  1900. config_nand_cw_write(nandc);
  1901. ret = submit_descs(nandc);
  1902. free_descs(nandc);
  1903. if (ret) {
  1904. dev_err(nandc->dev, "failure to update BBM\n");
  1905. return -EIO;
  1906. }
  1907. return nand_prog_page_end_op(chip);
  1908. }
  1909. /*
  1910. * the three functions below implement chip->read_byte(), chip->read_buf()
  1911. * and chip->write_buf() respectively. these aren't used for
  1912. * reading/writing page data, they are used for smaller data like reading
  1913. * id, status etc
  1914. */
  1915. static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
  1916. {
  1917. struct nand_chip *chip = mtd_to_nand(mtd);
  1918. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1919. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1920. u8 *buf = nandc->data_buffer;
  1921. u8 ret = 0x0;
  1922. if (host->last_command == NAND_CMD_STATUS) {
  1923. ret = host->status;
  1924. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1925. return ret;
  1926. }
  1927. if (nandc->buf_start < nandc->buf_count)
  1928. ret = buf[nandc->buf_start++];
  1929. return ret;
  1930. }
  1931. static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1932. {
  1933. struct nand_chip *chip = mtd_to_nand(mtd);
  1934. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1935. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1936. memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
  1937. nandc->buf_start += real_len;
  1938. }
  1939. static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  1940. int len)
  1941. {
  1942. struct nand_chip *chip = mtd_to_nand(mtd);
  1943. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1944. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1945. memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
  1946. nandc->buf_start += real_len;
  1947. }
  1948. /* we support only one external chip for now */
  1949. static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
  1950. {
  1951. struct nand_chip *chip = mtd_to_nand(mtd);
  1952. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1953. if (chipnr <= 0)
  1954. return;
  1955. dev_warn(nandc->dev, "invalid chip select\n");
  1956. }
  1957. /*
  1958. * NAND controller page layout info
  1959. *
  1960. * Layout with ECC enabled:
  1961. *
  1962. * |----------------------| |---------------------------------|
  1963. * | xx.......yy| | *********xx.......yy|
  1964. * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
  1965. * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
  1966. * | xx.......yy| | *********xx.......yy|
  1967. * |----------------------| |---------------------------------|
  1968. * codeword 1,2..n-1 codeword n
  1969. * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
  1970. *
  1971. * n = Number of codewords in the page
  1972. * . = ECC bytes
  1973. * * = Spare/free bytes
  1974. * x = Unused byte(s)
  1975. * y = Reserved byte(s)
  1976. *
  1977. * 2K page: n = 4, spare = 16 bytes
  1978. * 4K page: n = 8, spare = 32 bytes
  1979. * 8K page: n = 16, spare = 64 bytes
  1980. *
  1981. * the qcom nand controller operates at a sub page/codeword level. each
  1982. * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
  1983. * the number of ECC bytes vary based on the ECC strength and the bus width.
  1984. *
  1985. * the first n - 1 codewords contains 516 bytes of user data, the remaining
  1986. * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
  1987. * both user data and spare(oobavail) bytes that sum up to 516 bytes.
  1988. *
  1989. * When we access a page with ECC enabled, the reserved bytes(s) are not
  1990. * accessible at all. When reading, we fill up these unreadable positions
  1991. * with 0xffs. When writing, the controller skips writing the inaccessible
  1992. * bytes.
  1993. *
  1994. * Layout with ECC disabled:
  1995. *
  1996. * |------------------------------| |---------------------------------------|
  1997. * | yy xx.......| | bb *********xx.......|
  1998. * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
  1999. * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
  2000. * | yy xx.......| | bb *********xx.......|
  2001. * |------------------------------| |---------------------------------------|
  2002. * codeword 1,2..n-1 codeword n
  2003. * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
  2004. *
  2005. * n = Number of codewords in the page
  2006. * . = ECC bytes
  2007. * * = Spare/free bytes
  2008. * x = Unused byte(s)
  2009. * y = Dummy Bad Bock byte(s)
  2010. * b = Real Bad Block byte(s)
  2011. * size1/size2 = function of codeword size and 'n'
  2012. *
  2013. * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
  2014. * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
  2015. * Block Markers. In the last codeword, this position contains the real BBM
  2016. *
  2017. * In order to have a consistent layout between RAW and ECC modes, we assume
  2018. * the following OOB layout arrangement:
  2019. *
  2020. * |-----------| |--------------------|
  2021. * |yyxx.......| |bb*********xx.......|
  2022. * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
  2023. * |yyxx.......| |bb*********xx.......|
  2024. * |yyxx.......| |bb*********xx.......|
  2025. * |-----------| |--------------------|
  2026. * first n - 1 nth OOB region
  2027. * OOB regions
  2028. *
  2029. * n = Number of codewords in the page
  2030. * . = ECC bytes
  2031. * * = FREE OOB bytes
  2032. * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
  2033. * x = Unused byte(s)
  2034. * b = Real bad block byte(s) (inaccessible when ECC enabled)
  2035. *
  2036. * This layout is read as is when ECC is disabled. When ECC is enabled, the
  2037. * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  2038. * and assumed as 0xffs when we read a page/oob. The ECC, unused and
  2039. * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
  2040. * the sum of the three).
  2041. */
  2042. static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  2043. struct mtd_oob_region *oobregion)
  2044. {
  2045. struct nand_chip *chip = mtd_to_nand(mtd);
  2046. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  2047. struct nand_ecc_ctrl *ecc = &chip->ecc;
  2048. if (section > 1)
  2049. return -ERANGE;
  2050. if (!section) {
  2051. oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
  2052. host->bbm_size;
  2053. oobregion->offset = 0;
  2054. } else {
  2055. oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
  2056. oobregion->offset = mtd->oobsize - oobregion->length;
  2057. }
  2058. return 0;
  2059. }
  2060. static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
  2061. struct mtd_oob_region *oobregion)
  2062. {
  2063. struct nand_chip *chip = mtd_to_nand(mtd);
  2064. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  2065. struct nand_ecc_ctrl *ecc = &chip->ecc;
  2066. if (section)
  2067. return -ERANGE;
  2068. oobregion->length = ecc->steps * 4;
  2069. oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
  2070. return 0;
  2071. }
  2072. static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
  2073. .ecc = qcom_nand_ooblayout_ecc,
  2074. .free = qcom_nand_ooblayout_free,
  2075. };
  2076. static int
  2077. qcom_nandc_calc_ecc_bytes(int step_size, int strength)
  2078. {
  2079. return strength == 4 ? 12 : 16;
  2080. }
  2081. NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
  2082. NANDC_STEP_SIZE, 4, 8);
  2083. static int qcom_nand_attach_chip(struct nand_chip *chip)
  2084. {
  2085. struct mtd_info *mtd = nand_to_mtd(chip);
  2086. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  2087. struct nand_ecc_ctrl *ecc = &chip->ecc;
  2088. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  2089. int cwperpage, bad_block_byte, ret;
  2090. bool wide_bus;
  2091. int ecc_mode = 1;
  2092. /* controller only supports 512 bytes data steps */
  2093. ecc->size = NANDC_STEP_SIZE;
  2094. wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
  2095. cwperpage = mtd->writesize / NANDC_STEP_SIZE;
  2096. /*
  2097. * Each CW has 4 available OOB bytes which will be protected with ECC
  2098. * so remaining bytes can be used for ECC.
  2099. */
  2100. ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
  2101. mtd->oobsize - (cwperpage * 4));
  2102. if (ret) {
  2103. dev_err(nandc->dev, "No valid ECC settings possible\n");
  2104. return ret;
  2105. }
  2106. if (ecc->strength >= 8) {
  2107. /* 8 bit ECC defaults to BCH ECC on all platforms */
  2108. host->bch_enabled = true;
  2109. ecc_mode = 1;
  2110. if (wide_bus) {
  2111. host->ecc_bytes_hw = 14;
  2112. host->spare_bytes = 0;
  2113. host->bbm_size = 2;
  2114. } else {
  2115. host->ecc_bytes_hw = 13;
  2116. host->spare_bytes = 2;
  2117. host->bbm_size = 1;
  2118. }
  2119. } else {
  2120. /*
  2121. * if the controller supports BCH for 4 bit ECC, the controller
  2122. * uses lesser bytes for ECC. If RS is used, the ECC bytes is
  2123. * always 10 bytes
  2124. */
  2125. if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
  2126. /* BCH */
  2127. host->bch_enabled = true;
  2128. ecc_mode = 0;
  2129. if (wide_bus) {
  2130. host->ecc_bytes_hw = 8;
  2131. host->spare_bytes = 2;
  2132. host->bbm_size = 2;
  2133. } else {
  2134. host->ecc_bytes_hw = 7;
  2135. host->spare_bytes = 4;
  2136. host->bbm_size = 1;
  2137. }
  2138. } else {
  2139. /* RS */
  2140. host->ecc_bytes_hw = 10;
  2141. if (wide_bus) {
  2142. host->spare_bytes = 0;
  2143. host->bbm_size = 2;
  2144. } else {
  2145. host->spare_bytes = 1;
  2146. host->bbm_size = 1;
  2147. }
  2148. }
  2149. }
  2150. /*
  2151. * we consider ecc->bytes as the sum of all the non-data content in a
  2152. * step. It gives us a clean representation of the oob area (even if
  2153. * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
  2154. * ECC and 12 bytes for 4 bit ECC
  2155. */
  2156. ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
  2157. ecc->read_page = qcom_nandc_read_page;
  2158. ecc->read_page_raw = qcom_nandc_read_page_raw;
  2159. ecc->read_oob = qcom_nandc_read_oob;
  2160. ecc->write_page = qcom_nandc_write_page;
  2161. ecc->write_page_raw = qcom_nandc_write_page_raw;
  2162. ecc->write_oob = qcom_nandc_write_oob;
  2163. ecc->mode = NAND_ECC_HW;
  2164. mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
  2165. nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
  2166. cwperpage);
  2167. /*
  2168. * DATA_UD_BYTES varies based on whether the read/write command protects
  2169. * spare data with ECC too. We protect spare data by default, so we set
  2170. * it to main + spare data, which are 512 and 4 bytes respectively.
  2171. */
  2172. host->cw_data = 516;
  2173. /*
  2174. * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
  2175. * for 8 bit ECC
  2176. */
  2177. host->cw_size = host->cw_data + ecc->bytes;
  2178. bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
  2179. host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
  2180. | host->cw_data << UD_SIZE_BYTES
  2181. | 0 << DISABLE_STATUS_AFTER_WRITE
  2182. | 5 << NUM_ADDR_CYCLES
  2183. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
  2184. | 0 << STATUS_BFR_READ
  2185. | 1 << SET_RD_MODE_AFTER_STATUS
  2186. | host->spare_bytes << SPARE_SIZE_BYTES;
  2187. host->cfg1 = 7 << NAND_RECOVERY_CYCLES
  2188. | 0 << CS_ACTIVE_BSY
  2189. | bad_block_byte << BAD_BLOCK_BYTE_NUM
  2190. | 0 << BAD_BLOCK_IN_SPARE_AREA
  2191. | 2 << WR_RD_BSY_GAP
  2192. | wide_bus << WIDE_FLASH
  2193. | host->bch_enabled << ENABLE_BCH_ECC;
  2194. host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
  2195. | host->cw_size << UD_SIZE_BYTES
  2196. | 5 << NUM_ADDR_CYCLES
  2197. | 0 << SPARE_SIZE_BYTES;
  2198. host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
  2199. | 0 << CS_ACTIVE_BSY
  2200. | 17 << BAD_BLOCK_BYTE_NUM
  2201. | 1 << BAD_BLOCK_IN_SPARE_AREA
  2202. | 2 << WR_RD_BSY_GAP
  2203. | wide_bus << WIDE_FLASH
  2204. | 1 << DEV0_CFG1_ECC_DISABLE;
  2205. host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
  2206. | 0 << ECC_SW_RESET
  2207. | host->cw_data << ECC_NUM_DATA_BYTES
  2208. | 1 << ECC_FORCE_CLK_OPEN
  2209. | ecc_mode << ECC_MODE
  2210. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
  2211. host->ecc_buf_cfg = 0x203 << NUM_STEPS;
  2212. host->clrflashstatus = FS_READY_BSY_N;
  2213. host->clrreadstatus = 0xc0;
  2214. nandc->regs->erased_cw_detect_cfg_clr =
  2215. cpu_to_le32(CLR_ERASED_PAGE_DET);
  2216. nandc->regs->erased_cw_detect_cfg_set =
  2217. cpu_to_le32(SET_ERASED_PAGE_DET);
  2218. dev_dbg(nandc->dev,
  2219. "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
  2220. host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
  2221. host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
  2222. cwperpage);
  2223. return 0;
  2224. }
  2225. static const struct nand_controller_ops qcom_nandc_ops = {
  2226. .attach_chip = qcom_nand_attach_chip,
  2227. };
  2228. static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
  2229. {
  2230. int ret;
  2231. ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
  2232. if (ret) {
  2233. dev_err(nandc->dev, "failed to set DMA mask\n");
  2234. return ret;
  2235. }
  2236. /*
  2237. * we use the internal buffer for reading ONFI params, reading small
  2238. * data like ID and status, and preforming read-copy-write operations
  2239. * when writing to a codeword partially. 532 is the maximum possible
  2240. * size of a codeword for our nand controller
  2241. */
  2242. nandc->buf_size = 532;
  2243. nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
  2244. GFP_KERNEL);
  2245. if (!nandc->data_buffer)
  2246. return -ENOMEM;
  2247. nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
  2248. GFP_KERNEL);
  2249. if (!nandc->regs)
  2250. return -ENOMEM;
  2251. nandc->reg_read_buf = devm_kcalloc(nandc->dev,
  2252. MAX_REG_RD, sizeof(*nandc->reg_read_buf),
  2253. GFP_KERNEL);
  2254. if (!nandc->reg_read_buf)
  2255. return -ENOMEM;
  2256. if (nandc->props->is_bam) {
  2257. nandc->reg_read_dma =
  2258. dma_map_single(nandc->dev, nandc->reg_read_buf,
  2259. MAX_REG_RD *
  2260. sizeof(*nandc->reg_read_buf),
  2261. DMA_FROM_DEVICE);
  2262. if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
  2263. dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
  2264. return -EIO;
  2265. }
  2266. nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
  2267. if (!nandc->tx_chan) {
  2268. dev_err(nandc->dev, "failed to request tx channel\n");
  2269. return -ENODEV;
  2270. }
  2271. nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
  2272. if (!nandc->rx_chan) {
  2273. dev_err(nandc->dev, "failed to request rx channel\n");
  2274. return -ENODEV;
  2275. }
  2276. nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
  2277. if (!nandc->cmd_chan) {
  2278. dev_err(nandc->dev, "failed to request cmd channel\n");
  2279. return -ENODEV;
  2280. }
  2281. /*
  2282. * Initially allocate BAM transaction to read ONFI param page.
  2283. * After detecting all the devices, this BAM transaction will
  2284. * be freed and the next BAM tranasction will be allocated with
  2285. * maximum codeword size
  2286. */
  2287. nandc->max_cwperpage = 1;
  2288. nandc->bam_txn = alloc_bam_transaction(nandc);
  2289. if (!nandc->bam_txn) {
  2290. dev_err(nandc->dev,
  2291. "failed to allocate bam transaction\n");
  2292. return -ENOMEM;
  2293. }
  2294. } else {
  2295. nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
  2296. if (!nandc->chan) {
  2297. dev_err(nandc->dev,
  2298. "failed to request slave channel\n");
  2299. return -ENODEV;
  2300. }
  2301. }
  2302. INIT_LIST_HEAD(&nandc->desc_list);
  2303. INIT_LIST_HEAD(&nandc->host_list);
  2304. nand_controller_init(&nandc->controller);
  2305. nandc->controller.ops = &qcom_nandc_ops;
  2306. return 0;
  2307. }
  2308. static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
  2309. {
  2310. if (nandc->props->is_bam) {
  2311. if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
  2312. dma_unmap_single(nandc->dev, nandc->reg_read_dma,
  2313. MAX_REG_RD *
  2314. sizeof(*nandc->reg_read_buf),
  2315. DMA_FROM_DEVICE);
  2316. if (nandc->tx_chan)
  2317. dma_release_channel(nandc->tx_chan);
  2318. if (nandc->rx_chan)
  2319. dma_release_channel(nandc->rx_chan);
  2320. if (nandc->cmd_chan)
  2321. dma_release_channel(nandc->cmd_chan);
  2322. } else {
  2323. if (nandc->chan)
  2324. dma_release_channel(nandc->chan);
  2325. }
  2326. }
  2327. /* one time setup of a few nand controller registers */
  2328. static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
  2329. {
  2330. u32 nand_ctrl;
  2331. /* kill onenand */
  2332. if (!nandc->props->is_qpic)
  2333. nandc_write(nandc, SFLASHC_BURST_CFG, 0);
  2334. nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
  2335. NAND_DEV_CMD_VLD_VAL);
  2336. /* enable ADM or BAM DMA */
  2337. if (nandc->props->is_bam) {
  2338. nand_ctrl = nandc_read(nandc, NAND_CTRL);
  2339. nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
  2340. } else {
  2341. nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  2342. }
  2343. /* save the original values of these registers */
  2344. nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
  2345. nandc->vld = NAND_DEV_CMD_VLD_VAL;
  2346. return 0;
  2347. }
  2348. static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
  2349. struct qcom_nand_host *host,
  2350. struct device_node *dn)
  2351. {
  2352. struct nand_chip *chip = &host->chip;
  2353. struct mtd_info *mtd = nand_to_mtd(chip);
  2354. struct device *dev = nandc->dev;
  2355. int ret;
  2356. ret = of_property_read_u32(dn, "reg", &host->cs);
  2357. if (ret) {
  2358. dev_err(dev, "can't get chip-select\n");
  2359. return -ENXIO;
  2360. }
  2361. nand_set_flash_node(chip, dn);
  2362. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
  2363. if (!mtd->name)
  2364. return -ENOMEM;
  2365. mtd->owner = THIS_MODULE;
  2366. mtd->dev.parent = dev;
  2367. chip->cmdfunc = qcom_nandc_command;
  2368. chip->select_chip = qcom_nandc_select_chip;
  2369. chip->read_byte = qcom_nandc_read_byte;
  2370. chip->read_buf = qcom_nandc_read_buf;
  2371. chip->write_buf = qcom_nandc_write_buf;
  2372. chip->set_features = nand_get_set_features_notsupp;
  2373. chip->get_features = nand_get_set_features_notsupp;
  2374. /*
  2375. * the bad block marker is readable only when we read the last codeword
  2376. * of a page with ECC disabled. currently, the nand_base and nand_bbt
  2377. * helpers don't allow us to read BB from a nand chip with ECC
  2378. * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
  2379. * and block_markbad helpers until we permanently switch to using
  2380. * MTD_OPS_RAW for all drivers (with the help of badblockbits)
  2381. */
  2382. chip->block_bad = qcom_nandc_block_bad;
  2383. chip->block_markbad = qcom_nandc_block_markbad;
  2384. chip->controller = &nandc->controller;
  2385. chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
  2386. NAND_SKIP_BBTSCAN;
  2387. /* set up initial status value */
  2388. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  2389. ret = nand_scan(chip, 1);
  2390. if (ret)
  2391. return ret;
  2392. if (nandc->props->is_bam) {
  2393. free_bam_transaction(nandc);
  2394. nandc->bam_txn = alloc_bam_transaction(nandc);
  2395. if (!nandc->bam_txn) {
  2396. dev_err(nandc->dev,
  2397. "failed to allocate bam transaction\n");
  2398. return -ENOMEM;
  2399. }
  2400. }
  2401. ret = mtd_device_register(mtd, NULL, 0);
  2402. if (ret)
  2403. nand_cleanup(chip);
  2404. return ret;
  2405. }
  2406. static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
  2407. {
  2408. struct device *dev = nandc->dev;
  2409. struct device_node *dn = dev->of_node, *child;
  2410. struct qcom_nand_host *host;
  2411. int ret = -ENODEV;
  2412. for_each_available_child_of_node(dn, child) {
  2413. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  2414. if (!host) {
  2415. of_node_put(child);
  2416. return -ENOMEM;
  2417. }
  2418. ret = qcom_nand_host_init_and_register(nandc, host, child);
  2419. if (ret) {
  2420. devm_kfree(dev, host);
  2421. continue;
  2422. }
  2423. list_add_tail(&host->node, &nandc->host_list);
  2424. }
  2425. return ret;
  2426. }
  2427. /* parse custom DT properties here */
  2428. static int qcom_nandc_parse_dt(struct platform_device *pdev)
  2429. {
  2430. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  2431. struct device_node *np = nandc->dev->of_node;
  2432. int ret;
  2433. if (!nandc->props->is_bam) {
  2434. ret = of_property_read_u32(np, "qcom,cmd-crci",
  2435. &nandc->cmd_crci);
  2436. if (ret) {
  2437. dev_err(nandc->dev, "command CRCI unspecified\n");
  2438. return ret;
  2439. }
  2440. ret = of_property_read_u32(np, "qcom,data-crci",
  2441. &nandc->data_crci);
  2442. if (ret) {
  2443. dev_err(nandc->dev, "data CRCI unspecified\n");
  2444. return ret;
  2445. }
  2446. }
  2447. return 0;
  2448. }
  2449. static int qcom_nandc_probe(struct platform_device *pdev)
  2450. {
  2451. struct qcom_nand_controller *nandc;
  2452. const void *dev_data;
  2453. struct device *dev = &pdev->dev;
  2454. struct resource *res;
  2455. int ret;
  2456. nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
  2457. if (!nandc)
  2458. return -ENOMEM;
  2459. platform_set_drvdata(pdev, nandc);
  2460. nandc->dev = dev;
  2461. dev_data = of_device_get_match_data(dev);
  2462. if (!dev_data) {
  2463. dev_err(&pdev->dev, "failed to get device data\n");
  2464. return -ENODEV;
  2465. }
  2466. nandc->props = dev_data;
  2467. nandc->core_clk = devm_clk_get(dev, "core");
  2468. if (IS_ERR(nandc->core_clk))
  2469. return PTR_ERR(nandc->core_clk);
  2470. nandc->aon_clk = devm_clk_get(dev, "aon");
  2471. if (IS_ERR(nandc->aon_clk))
  2472. return PTR_ERR(nandc->aon_clk);
  2473. ret = qcom_nandc_parse_dt(pdev);
  2474. if (ret)
  2475. return ret;
  2476. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2477. nandc->base = devm_ioremap_resource(dev, res);
  2478. if (IS_ERR(nandc->base))
  2479. return PTR_ERR(nandc->base);
  2480. nandc->base_phys = res->start;
  2481. nandc->base_dma = dma_map_resource(dev, res->start,
  2482. resource_size(res),
  2483. DMA_BIDIRECTIONAL, 0);
  2484. if (!nandc->base_dma)
  2485. return -ENXIO;
  2486. ret = qcom_nandc_alloc(nandc);
  2487. if (ret)
  2488. goto err_nandc_alloc;
  2489. ret = clk_prepare_enable(nandc->core_clk);
  2490. if (ret)
  2491. goto err_core_clk;
  2492. ret = clk_prepare_enable(nandc->aon_clk);
  2493. if (ret)
  2494. goto err_aon_clk;
  2495. ret = qcom_nandc_setup(nandc);
  2496. if (ret)
  2497. goto err_setup;
  2498. ret = qcom_probe_nand_devices(nandc);
  2499. if (ret)
  2500. goto err_setup;
  2501. return 0;
  2502. err_setup:
  2503. clk_disable_unprepare(nandc->aon_clk);
  2504. err_aon_clk:
  2505. clk_disable_unprepare(nandc->core_clk);
  2506. err_core_clk:
  2507. qcom_nandc_unalloc(nandc);
  2508. err_nandc_alloc:
  2509. dma_unmap_resource(dev, res->start, resource_size(res),
  2510. DMA_BIDIRECTIONAL, 0);
  2511. return ret;
  2512. }
  2513. static int qcom_nandc_remove(struct platform_device *pdev)
  2514. {
  2515. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  2516. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2517. struct qcom_nand_host *host;
  2518. list_for_each_entry(host, &nandc->host_list, node)
  2519. nand_release(&host->chip);
  2520. qcom_nandc_unalloc(nandc);
  2521. clk_disable_unprepare(nandc->aon_clk);
  2522. clk_disable_unprepare(nandc->core_clk);
  2523. dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
  2524. DMA_BIDIRECTIONAL, 0);
  2525. return 0;
  2526. }
  2527. static const struct qcom_nandc_props ipq806x_nandc_props = {
  2528. .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
  2529. .is_bam = false,
  2530. .dev_cmd_reg_start = 0x0,
  2531. };
  2532. static const struct qcom_nandc_props ipq4019_nandc_props = {
  2533. .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
  2534. .is_bam = true,
  2535. .is_qpic = true,
  2536. .dev_cmd_reg_start = 0x0,
  2537. };
  2538. static const struct qcom_nandc_props ipq8074_nandc_props = {
  2539. .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
  2540. .is_bam = true,
  2541. .is_qpic = true,
  2542. .dev_cmd_reg_start = 0x7000,
  2543. };
  2544. /*
  2545. * data will hold a struct pointer containing more differences once we support
  2546. * more controller variants
  2547. */
  2548. static const struct of_device_id qcom_nandc_of_match[] = {
  2549. {
  2550. .compatible = "qcom,ipq806x-nand",
  2551. .data = &ipq806x_nandc_props,
  2552. },
  2553. {
  2554. .compatible = "qcom,ipq4019-nand",
  2555. .data = &ipq4019_nandc_props,
  2556. },
  2557. {
  2558. .compatible = "qcom,ipq8074-nand",
  2559. .data = &ipq8074_nandc_props,
  2560. },
  2561. {}
  2562. };
  2563. MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
  2564. static struct platform_driver qcom_nandc_driver = {
  2565. .driver = {
  2566. .name = "qcom-nandc",
  2567. .of_match_table = qcom_nandc_of_match,
  2568. },
  2569. .probe = qcom_nandc_probe,
  2570. .remove = qcom_nandc_remove,
  2571. };
  2572. module_platform_driver(qcom_nandc_driver);
  2573. MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
  2574. MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
  2575. MODULE_LICENSE("GPL v2");