at_xdmac.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
  4. *
  5. * Copyright (C) 2014 Atmel Corporation
  6. *
  7. * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
  8. */
  9. #include <asm/barrier.h>
  10. #include <dt-bindings/dma/at91.h>
  11. #include <linux/clk.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/irq.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm.h>
  23. #include <linux/pm_runtime.h>
  24. #include "dmaengine.h"
  25. /* Global registers */
  26. #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
  27. #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
  28. #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
  29. #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
  30. #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
  31. #define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
  32. #define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
  33. #define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
  34. #define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
  35. #define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
  36. #define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
  37. #define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
  38. #define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
  39. #define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
  40. AT_XDMAC_WRHP(0x5))
  41. #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
  42. #define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
  43. #define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
  44. #define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
  45. #define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
  46. #define AT_XDMAC_GWAC_M2M 0
  47. #define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
  48. #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
  49. #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
  50. #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
  51. #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
  52. #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
  53. #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
  54. #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
  55. #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
  56. /* Channel relative registers offsets */
  57. #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
  58. #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
  59. #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
  60. #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
  61. #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
  62. #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
  63. #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
  64. #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
  65. #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
  66. #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
  67. #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
  68. #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
  69. #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
  70. #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
  71. #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
  72. #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
  73. #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
  74. #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
  75. #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
  76. #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
  77. #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
  78. #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
  79. #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
  80. #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
  81. #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
  82. #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
  83. #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
  84. #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
  85. #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
  86. #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
  87. #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
  88. #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
  89. #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
  90. #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
  91. #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
  92. #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
  93. #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
  94. #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
  95. #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
  96. #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
  97. #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
  98. #define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
  99. #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
  100. #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
  101. #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
  102. #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
  103. #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
  104. #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
  105. #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
  106. #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
  107. #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
  108. #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
  109. #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
  110. #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
  111. #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
  112. #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
  113. #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
  114. #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
  115. #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
  116. #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
  117. #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
  118. #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
  119. #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
  120. #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
  121. #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
  122. #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
  123. #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
  124. #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
  125. #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
  126. #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
  127. #define AT_XDMAC_CC_DWIDTH_OFFSET 11
  128. #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
  129. #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
  130. #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
  131. #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
  132. #define AT_XDMAC_CC_DWIDTH_WORD 0x2
  133. #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
  134. #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
  135. #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
  136. #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
  137. #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
  138. #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
  139. #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
  140. #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
  141. #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
  142. #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
  143. #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
  144. #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
  145. #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
  146. #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
  147. #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
  148. #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
  149. #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
  150. #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
  151. #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
  152. #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
  153. #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
  154. #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
  155. #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
  156. #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
  157. #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
  158. #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
  159. /* Microblock control members */
  160. #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
  161. #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
  162. #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
  163. #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
  164. #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
  165. #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
  166. #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
  167. #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
  168. #define AT_XDMAC_MAX_CHAN 0x20
  169. #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
  170. #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
  171. #define AT_XDMAC_RESIDUE_MAX_RETRIES 5
  172. #define AT_XDMAC_DMA_BUSWIDTHS\
  173. (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  174. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  175. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  176. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
  177. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  178. enum atc_status {
  179. AT_XDMAC_CHAN_IS_CYCLIC = 0,
  180. AT_XDMAC_CHAN_IS_PAUSED,
  181. AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
  182. };
  183. struct at_xdmac_layout {
  184. /* Global Channel Read Suspend Register */
  185. u8 grs;
  186. /* Global Write Suspend Register */
  187. u8 gws;
  188. /* Global Channel Read Write Suspend Register */
  189. u8 grws;
  190. /* Global Channel Read Write Resume Register */
  191. u8 grwr;
  192. /* Global Channel Software Request Register */
  193. u8 gswr;
  194. /* Global channel Software Request Status Register */
  195. u8 gsws;
  196. /* Global Channel Software Flush Request Register */
  197. u8 gswf;
  198. /* Channel reg base */
  199. u8 chan_cc_reg_base;
  200. /* Source/Destination Interface must be specified or not */
  201. bool sdif;
  202. /* AXI queue priority configuration supported */
  203. bool axi_config;
  204. };
  205. /* ----- Channels ----- */
  206. struct at_xdmac_chan {
  207. struct dma_chan chan;
  208. void __iomem *ch_regs;
  209. u32 mask; /* Channel Mask */
  210. u32 cfg; /* Channel Configuration Register */
  211. u8 perid; /* Peripheral ID */
  212. u8 perif; /* Peripheral Interface */
  213. u8 memif; /* Memory Interface */
  214. u32 save_cc;
  215. u32 save_cim;
  216. u32 save_cnda;
  217. u32 save_cndc;
  218. u32 irq_status;
  219. unsigned long status;
  220. struct tasklet_struct tasklet;
  221. struct dma_slave_config sconfig;
  222. spinlock_t lock;
  223. struct list_head xfers_list;
  224. struct list_head free_descs_list;
  225. };
  226. /* ----- Controller ----- */
  227. struct at_xdmac {
  228. struct dma_device dma;
  229. void __iomem *regs;
  230. struct device *dev;
  231. int irq;
  232. struct clk *clk;
  233. u32 save_gim;
  234. u32 save_gs;
  235. struct dma_pool *at_xdmac_desc_pool;
  236. const struct at_xdmac_layout *layout;
  237. struct at_xdmac_chan chan[];
  238. };
  239. /* ----- Descriptors ----- */
  240. /* Linked List Descriptor */
  241. struct at_xdmac_lld {
  242. u32 mbr_nda; /* Next Descriptor Member */
  243. u32 mbr_ubc; /* Microblock Control Member */
  244. u32 mbr_sa; /* Source Address Member */
  245. u32 mbr_da; /* Destination Address Member */
  246. u32 mbr_cfg; /* Configuration Register */
  247. u32 mbr_bc; /* Block Control Register */
  248. u32 mbr_ds; /* Data Stride Register */
  249. u32 mbr_sus; /* Source Microblock Stride Register */
  250. u32 mbr_dus; /* Destination Microblock Stride Register */
  251. };
  252. /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
  253. struct at_xdmac_desc {
  254. struct at_xdmac_lld lld;
  255. enum dma_transfer_direction direction;
  256. struct dma_async_tx_descriptor tx_dma_desc;
  257. struct list_head desc_node;
  258. /* Following members are only used by the first descriptor */
  259. bool active_xfer;
  260. unsigned int xfer_size;
  261. struct list_head descs_list;
  262. struct list_head xfer_node;
  263. } __aligned(sizeof(u64));
  264. static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
  265. .grs = 0x28,
  266. .gws = 0x2C,
  267. .grws = 0x30,
  268. .grwr = 0x34,
  269. .gswr = 0x38,
  270. .gsws = 0x3C,
  271. .gswf = 0x40,
  272. .chan_cc_reg_base = 0x50,
  273. .sdif = true,
  274. .axi_config = false,
  275. };
  276. static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
  277. .grs = 0x30,
  278. .gws = 0x38,
  279. .grws = 0x40,
  280. .grwr = 0x44,
  281. .gswr = 0x48,
  282. .gsws = 0x4C,
  283. .gswf = 0x50,
  284. .chan_cc_reg_base = 0x60,
  285. .sdif = false,
  286. .axi_config = true,
  287. };
  288. static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
  289. {
  290. return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
  291. }
  292. #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
  293. #define at_xdmac_write(atxdmac, reg, value) \
  294. writel_relaxed((value), (atxdmac)->regs + (reg))
  295. #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
  296. #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
  297. static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
  298. {
  299. return container_of(dchan, struct at_xdmac_chan, chan);
  300. }
  301. static struct device *chan2dev(struct dma_chan *chan)
  302. {
  303. return &chan->dev->device;
  304. }
  305. static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
  306. {
  307. return container_of(ddev, struct at_xdmac, dma);
  308. }
  309. static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
  310. {
  311. return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
  312. }
  313. static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
  314. {
  315. return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
  316. }
  317. static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
  318. {
  319. return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  320. }
  321. static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
  322. {
  323. return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
  324. }
  325. static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
  326. {
  327. return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
  328. }
  329. static inline u8 at_xdmac_get_dwidth(u32 cfg)
  330. {
  331. return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
  332. };
  333. static unsigned int init_nr_desc_per_channel = 64;
  334. module_param(init_nr_desc_per_channel, uint, 0644);
  335. MODULE_PARM_DESC(init_nr_desc_per_channel,
  336. "initial descriptors per channel (default: 64)");
  337. static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
  338. {
  339. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  340. struct at_xdmac_desc *desc, *_desc;
  341. list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
  342. if (!desc->active_xfer)
  343. continue;
  344. pm_runtime_mark_last_busy(atxdmac->dev);
  345. pm_runtime_put_autosuspend(atxdmac->dev);
  346. }
  347. }
  348. static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
  349. {
  350. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  351. struct at_xdmac_desc *desc, *_desc;
  352. int ret;
  353. list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
  354. if (!desc->active_xfer)
  355. continue;
  356. ret = pm_runtime_resume_and_get(atxdmac->dev);
  357. if (ret < 0)
  358. return ret;
  359. }
  360. return 0;
  361. }
  362. static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
  363. {
  364. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  365. int ret;
  366. ret = pm_runtime_resume_and_get(atxdmac->dev);
  367. if (ret < 0)
  368. return false;
  369. ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
  370. pm_runtime_mark_last_busy(atxdmac->dev);
  371. pm_runtime_put_autosuspend(atxdmac->dev);
  372. return ret;
  373. }
  374. static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
  375. {
  376. struct dma_chan *chan, *_chan;
  377. struct at_xdmac_chan *atchan;
  378. int ret;
  379. ret = pm_runtime_resume_and_get(atxdmac->dev);
  380. if (ret < 0)
  381. return;
  382. at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
  383. /* Wait that all chans are disabled. */
  384. while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
  385. cpu_relax();
  386. at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
  387. /* Decrement runtime PM ref counter for each active descriptor. */
  388. if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
  389. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
  390. device_node) {
  391. atchan = to_at_xdmac_chan(chan);
  392. at_xdmac_runtime_suspend_descriptors(atchan);
  393. }
  394. }
  395. pm_runtime_mark_last_busy(atxdmac->dev);
  396. pm_runtime_put_autosuspend(atxdmac->dev);
  397. }
  398. /* Call with lock hold. */
  399. static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
  400. struct at_xdmac_desc *first)
  401. {
  402. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  403. u32 reg;
  404. int ret;
  405. ret = pm_runtime_resume_and_get(atxdmac->dev);
  406. if (ret < 0)
  407. return;
  408. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
  409. /* Set transfer as active to not try to start it again. */
  410. first->active_xfer = true;
  411. /* Tell xdmac where to get the first descriptor. */
  412. reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
  413. if (atxdmac->layout->sdif)
  414. reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
  415. at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
  416. /*
  417. * When doing non cyclic transfer we need to use the next
  418. * descriptor view 2 since some fields of the configuration register
  419. * depend on transfer size and src/dest addresses.
  420. */
  421. if (at_xdmac_chan_is_cyclic(atchan))
  422. reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
  423. else if ((first->lld.mbr_ubc &
  424. AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
  425. reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
  426. else
  427. reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
  428. /*
  429. * Even if the register will be updated from the configuration in the
  430. * descriptor when using view 2 or higher, the PROT bit won't be set
  431. * properly. This bit can be modified only by using the channel
  432. * configuration register.
  433. */
  434. at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
  435. reg |= AT_XDMAC_CNDC_NDDUP
  436. | AT_XDMAC_CNDC_NDSUP
  437. | AT_XDMAC_CNDC_NDE;
  438. at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
  439. dev_vdbg(chan2dev(&atchan->chan),
  440. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  441. __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  442. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  443. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  444. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  445. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  446. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  447. at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
  448. reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
  449. /*
  450. * Request Overflow Error is only for peripheral synchronized transfers
  451. */
  452. if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
  453. reg |= AT_XDMAC_CIE_ROIE;
  454. /*
  455. * There is no end of list when doing cyclic dma, we need to get
  456. * an interrupt after each periods.
  457. */
  458. if (at_xdmac_chan_is_cyclic(atchan))
  459. at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
  460. reg | AT_XDMAC_CIE_BIE);
  461. else
  462. at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
  463. reg | AT_XDMAC_CIE_LIE);
  464. at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
  465. dev_vdbg(chan2dev(&atchan->chan),
  466. "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
  467. wmb();
  468. at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
  469. dev_vdbg(chan2dev(&atchan->chan),
  470. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  471. __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  472. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  473. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  474. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  475. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  476. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  477. }
  478. static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
  479. {
  480. struct at_xdmac_desc *desc = txd_to_at_desc(tx);
  481. struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
  482. dma_cookie_t cookie;
  483. unsigned long irqflags;
  484. spin_lock_irqsave(&atchan->lock, irqflags);
  485. cookie = dma_cookie_assign(tx);
  486. list_add_tail(&desc->xfer_node, &atchan->xfers_list);
  487. spin_unlock_irqrestore(&atchan->lock, irqflags);
  488. dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
  489. __func__, atchan, desc);
  490. return cookie;
  491. }
  492. static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
  493. gfp_t gfp_flags)
  494. {
  495. struct at_xdmac_desc *desc;
  496. struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
  497. dma_addr_t phys;
  498. desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
  499. if (desc) {
  500. INIT_LIST_HEAD(&desc->descs_list);
  501. dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
  502. desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
  503. desc->tx_dma_desc.phys = phys;
  504. }
  505. return desc;
  506. }
  507. static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
  508. {
  509. memset(&desc->lld, 0, sizeof(desc->lld));
  510. INIT_LIST_HEAD(&desc->descs_list);
  511. desc->direction = DMA_TRANS_NONE;
  512. desc->xfer_size = 0;
  513. desc->active_xfer = false;
  514. }
  515. /* Call must be protected by lock. */
  516. static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
  517. {
  518. struct at_xdmac_desc *desc;
  519. if (list_empty(&atchan->free_descs_list)) {
  520. desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
  521. } else {
  522. desc = list_first_entry(&atchan->free_descs_list,
  523. struct at_xdmac_desc, desc_node);
  524. list_del(&desc->desc_node);
  525. at_xdmac_init_used_desc(desc);
  526. }
  527. return desc;
  528. }
  529. static void at_xdmac_queue_desc(struct dma_chan *chan,
  530. struct at_xdmac_desc *prev,
  531. struct at_xdmac_desc *desc)
  532. {
  533. if (!prev || !desc)
  534. return;
  535. prev->lld.mbr_nda = desc->tx_dma_desc.phys;
  536. prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
  537. dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
  538. __func__, prev, &prev->lld.mbr_nda);
  539. }
  540. static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
  541. struct at_xdmac_desc *desc)
  542. {
  543. if (!desc)
  544. return;
  545. desc->lld.mbr_bc++;
  546. dev_dbg(chan2dev(chan),
  547. "%s: incrementing the block count of the desc 0x%p\n",
  548. __func__, desc);
  549. }
  550. static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
  551. struct of_dma *of_dma)
  552. {
  553. struct at_xdmac *atxdmac = of_dma->of_dma_data;
  554. struct at_xdmac_chan *atchan;
  555. struct dma_chan *chan;
  556. struct device *dev = atxdmac->dma.dev;
  557. if (dma_spec->args_count != 1) {
  558. dev_err(dev, "dma phandler args: bad number of args\n");
  559. return NULL;
  560. }
  561. chan = dma_get_any_slave_channel(&atxdmac->dma);
  562. if (!chan) {
  563. dev_err(dev, "can't get a dma channel\n");
  564. return NULL;
  565. }
  566. atchan = to_at_xdmac_chan(chan);
  567. atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
  568. atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
  569. atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
  570. dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
  571. atchan->memif, atchan->perif, atchan->perid);
  572. return chan;
  573. }
  574. static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
  575. enum dma_transfer_direction direction)
  576. {
  577. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  578. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  579. int csize, dwidth;
  580. if (direction == DMA_DEV_TO_MEM) {
  581. atchan->cfg =
  582. AT91_XDMAC_DT_PERID(atchan->perid)
  583. | AT_XDMAC_CC_DAM_INCREMENTED_AM
  584. | AT_XDMAC_CC_SAM_FIXED_AM
  585. | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
  586. | AT_XDMAC_CC_DSYNC_PER2MEM
  587. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  588. | AT_XDMAC_CC_TYPE_PER_TRAN;
  589. if (atxdmac->layout->sdif)
  590. atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
  591. AT_XDMAC_CC_SIF(atchan->perif);
  592. csize = ffs(atchan->sconfig.src_maxburst) - 1;
  593. if (csize < 0) {
  594. dev_err(chan2dev(chan), "invalid src maxburst value\n");
  595. return -EINVAL;
  596. }
  597. atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
  598. dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
  599. if (dwidth < 0) {
  600. dev_err(chan2dev(chan), "invalid src addr width value\n");
  601. return -EINVAL;
  602. }
  603. atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
  604. } else if (direction == DMA_MEM_TO_DEV) {
  605. atchan->cfg =
  606. AT91_XDMAC_DT_PERID(atchan->perid)
  607. | AT_XDMAC_CC_DAM_FIXED_AM
  608. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  609. | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
  610. | AT_XDMAC_CC_DSYNC_MEM2PER
  611. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  612. | AT_XDMAC_CC_TYPE_PER_TRAN;
  613. if (atxdmac->layout->sdif)
  614. atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
  615. AT_XDMAC_CC_SIF(atchan->memif);
  616. csize = ffs(atchan->sconfig.dst_maxburst) - 1;
  617. if (csize < 0) {
  618. dev_err(chan2dev(chan), "invalid src maxburst value\n");
  619. return -EINVAL;
  620. }
  621. atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
  622. dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
  623. if (dwidth < 0) {
  624. dev_err(chan2dev(chan), "invalid dst addr width value\n");
  625. return -EINVAL;
  626. }
  627. atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
  628. }
  629. dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
  630. return 0;
  631. }
  632. /*
  633. * Only check that maxburst and addr width values are supported by
  634. * the controller but not that the configuration is good to perform the
  635. * transfer since we don't know the direction at this stage.
  636. */
  637. static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
  638. {
  639. if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
  640. || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
  641. return -EINVAL;
  642. if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
  643. || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
  644. return -EINVAL;
  645. return 0;
  646. }
  647. static int at_xdmac_set_slave_config(struct dma_chan *chan,
  648. struct dma_slave_config *sconfig)
  649. {
  650. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  651. if (at_xdmac_check_slave_config(sconfig)) {
  652. dev_err(chan2dev(chan), "invalid slave configuration\n");
  653. return -EINVAL;
  654. }
  655. memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
  656. return 0;
  657. }
  658. static struct dma_async_tx_descriptor *
  659. at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  660. unsigned int sg_len, enum dma_transfer_direction direction,
  661. unsigned long flags, void *context)
  662. {
  663. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  664. struct at_xdmac_desc *first = NULL, *prev = NULL;
  665. struct scatterlist *sg;
  666. int i;
  667. unsigned int xfer_size = 0;
  668. unsigned long irqflags;
  669. struct dma_async_tx_descriptor *ret = NULL;
  670. if (!sgl)
  671. return NULL;
  672. if (!is_slave_direction(direction)) {
  673. dev_err(chan2dev(chan), "invalid DMA direction\n");
  674. return NULL;
  675. }
  676. dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
  677. __func__, sg_len,
  678. direction == DMA_MEM_TO_DEV ? "to device" : "from device",
  679. flags);
  680. /* Protect dma_sconfig field that can be modified by set_slave_conf. */
  681. spin_lock_irqsave(&atchan->lock, irqflags);
  682. if (at_xdmac_compute_chan_conf(chan, direction))
  683. goto spin_unlock;
  684. /* Prepare descriptors. */
  685. for_each_sg(sgl, sg, sg_len, i) {
  686. struct at_xdmac_desc *desc = NULL;
  687. u32 len, mem, dwidth, fixed_dwidth;
  688. len = sg_dma_len(sg);
  689. mem = sg_dma_address(sg);
  690. if (unlikely(!len)) {
  691. dev_err(chan2dev(chan), "sg data length is zero\n");
  692. goto spin_unlock;
  693. }
  694. dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
  695. __func__, i, len, mem);
  696. desc = at_xdmac_get_desc(atchan);
  697. if (!desc) {
  698. dev_err(chan2dev(chan), "can't get descriptor\n");
  699. if (first)
  700. list_splice_tail_init(&first->descs_list,
  701. &atchan->free_descs_list);
  702. goto spin_unlock;
  703. }
  704. /* Linked list descriptor setup. */
  705. if (direction == DMA_DEV_TO_MEM) {
  706. desc->lld.mbr_sa = atchan->sconfig.src_addr;
  707. desc->lld.mbr_da = mem;
  708. } else {
  709. desc->lld.mbr_sa = mem;
  710. desc->lld.mbr_da = atchan->sconfig.dst_addr;
  711. }
  712. dwidth = at_xdmac_get_dwidth(atchan->cfg);
  713. fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
  714. ? dwidth
  715. : AT_XDMAC_CC_DWIDTH_BYTE;
  716. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
  717. | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
  718. | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
  719. | (len >> fixed_dwidth); /* microblock length */
  720. desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
  721. AT_XDMAC_CC_DWIDTH(fixed_dwidth);
  722. dev_dbg(chan2dev(chan),
  723. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  724. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
  725. /* Chain lld. */
  726. if (prev)
  727. at_xdmac_queue_desc(chan, prev, desc);
  728. prev = desc;
  729. if (!first)
  730. first = desc;
  731. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  732. __func__, desc, first);
  733. list_add_tail(&desc->desc_node, &first->descs_list);
  734. xfer_size += len;
  735. }
  736. first->tx_dma_desc.flags = flags;
  737. first->xfer_size = xfer_size;
  738. first->direction = direction;
  739. ret = &first->tx_dma_desc;
  740. spin_unlock:
  741. spin_unlock_irqrestore(&atchan->lock, irqflags);
  742. return ret;
  743. }
  744. static struct dma_async_tx_descriptor *
  745. at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
  746. size_t buf_len, size_t period_len,
  747. enum dma_transfer_direction direction,
  748. unsigned long flags)
  749. {
  750. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  751. struct at_xdmac_desc *first = NULL, *prev = NULL;
  752. unsigned int periods = buf_len / period_len;
  753. int i;
  754. unsigned long irqflags;
  755. dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
  756. __func__, &buf_addr, buf_len, period_len,
  757. direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
  758. if (!is_slave_direction(direction)) {
  759. dev_err(chan2dev(chan), "invalid DMA direction\n");
  760. return NULL;
  761. }
  762. if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
  763. dev_err(chan2dev(chan), "channel currently used\n");
  764. return NULL;
  765. }
  766. if (at_xdmac_compute_chan_conf(chan, direction))
  767. return NULL;
  768. for (i = 0; i < periods; i++) {
  769. struct at_xdmac_desc *desc = NULL;
  770. spin_lock_irqsave(&atchan->lock, irqflags);
  771. desc = at_xdmac_get_desc(atchan);
  772. if (!desc) {
  773. dev_err(chan2dev(chan), "can't get descriptor\n");
  774. if (first)
  775. list_splice_tail_init(&first->descs_list,
  776. &atchan->free_descs_list);
  777. spin_unlock_irqrestore(&atchan->lock, irqflags);
  778. return NULL;
  779. }
  780. spin_unlock_irqrestore(&atchan->lock, irqflags);
  781. dev_dbg(chan2dev(chan),
  782. "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
  783. __func__, desc, &desc->tx_dma_desc.phys);
  784. if (direction == DMA_DEV_TO_MEM) {
  785. desc->lld.mbr_sa = atchan->sconfig.src_addr;
  786. desc->lld.mbr_da = buf_addr + i * period_len;
  787. } else {
  788. desc->lld.mbr_sa = buf_addr + i * period_len;
  789. desc->lld.mbr_da = atchan->sconfig.dst_addr;
  790. }
  791. desc->lld.mbr_cfg = atchan->cfg;
  792. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
  793. | AT_XDMAC_MBR_UBC_NDEN
  794. | AT_XDMAC_MBR_UBC_NSEN
  795. | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
  796. dev_dbg(chan2dev(chan),
  797. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  798. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
  799. /* Chain lld. */
  800. if (prev)
  801. at_xdmac_queue_desc(chan, prev, desc);
  802. prev = desc;
  803. if (!first)
  804. first = desc;
  805. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  806. __func__, desc, first);
  807. list_add_tail(&desc->desc_node, &first->descs_list);
  808. }
  809. at_xdmac_queue_desc(chan, prev, first);
  810. first->tx_dma_desc.flags = flags;
  811. first->xfer_size = buf_len;
  812. first->direction = direction;
  813. return &first->tx_dma_desc;
  814. }
  815. static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
  816. {
  817. u32 width;
  818. /*
  819. * Check address alignment to select the greater data width we
  820. * can use.
  821. *
  822. * Some XDMAC implementations don't provide dword transfer, in
  823. * this case selecting dword has the same behavior as
  824. * selecting word transfers.
  825. */
  826. if (!(addr & 7)) {
  827. width = AT_XDMAC_CC_DWIDTH_DWORD;
  828. dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
  829. } else if (!(addr & 3)) {
  830. width = AT_XDMAC_CC_DWIDTH_WORD;
  831. dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
  832. } else if (!(addr & 1)) {
  833. width = AT_XDMAC_CC_DWIDTH_HALFWORD;
  834. dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
  835. } else {
  836. width = AT_XDMAC_CC_DWIDTH_BYTE;
  837. dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
  838. }
  839. return width;
  840. }
  841. static struct at_xdmac_desc *
  842. at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
  843. struct at_xdmac_chan *atchan,
  844. struct at_xdmac_desc *prev,
  845. dma_addr_t src, dma_addr_t dst,
  846. struct dma_interleaved_template *xt,
  847. struct data_chunk *chunk)
  848. {
  849. struct at_xdmac_desc *desc;
  850. u32 dwidth;
  851. unsigned long flags;
  852. size_t ublen;
  853. /*
  854. * WARNING: The channel configuration is set here since there is no
  855. * dmaengine_slave_config call in this case. Moreover we don't know the
  856. * direction, it involves we can't dynamically set the source and dest
  857. * interface so we have to use the same one. Only interface 0 allows EBI
  858. * access. Hopefully we can access DDR through both ports (at least on
  859. * SAMA5D4x), so we can use the same interface for source and dest,
  860. * that solves the fact we don't know the direction.
  861. * ERRATA: Even if useless for memory transfers, the PERID has to not
  862. * match the one of another channel. If not, it could lead to spurious
  863. * flag status.
  864. * For SAMA7G5x case, the SIF and DIF fields are no longer used.
  865. * Thus, no need to have the SIF/DIF interfaces here.
  866. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
  867. * zero.
  868. */
  869. u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
  870. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  871. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  872. dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
  873. if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
  874. dev_dbg(chan2dev(chan),
  875. "%s: chunk too big (%zu, max size %lu)...\n",
  876. __func__, chunk->size,
  877. AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
  878. return NULL;
  879. }
  880. if (prev)
  881. dev_dbg(chan2dev(chan),
  882. "Adding items at the end of desc 0x%p\n", prev);
  883. if (xt->src_inc) {
  884. if (xt->src_sgl)
  885. chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
  886. else
  887. chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
  888. }
  889. if (xt->dst_inc) {
  890. if (xt->dst_sgl)
  891. chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
  892. else
  893. chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
  894. }
  895. spin_lock_irqsave(&atchan->lock, flags);
  896. desc = at_xdmac_get_desc(atchan);
  897. spin_unlock_irqrestore(&atchan->lock, flags);
  898. if (!desc) {
  899. dev_err(chan2dev(chan), "can't get descriptor\n");
  900. return NULL;
  901. }
  902. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  903. ublen = chunk->size >> dwidth;
  904. desc->lld.mbr_sa = src;
  905. desc->lld.mbr_da = dst;
  906. desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
  907. desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
  908. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
  909. | AT_XDMAC_MBR_UBC_NDEN
  910. | AT_XDMAC_MBR_UBC_NSEN
  911. | ublen;
  912. desc->lld.mbr_cfg = chan_cc;
  913. dev_dbg(chan2dev(chan),
  914. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  915. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
  916. desc->lld.mbr_ubc, desc->lld.mbr_cfg);
  917. /* Chain lld. */
  918. if (prev)
  919. at_xdmac_queue_desc(chan, prev, desc);
  920. return desc;
  921. }
  922. static struct dma_async_tx_descriptor *
  923. at_xdmac_prep_interleaved(struct dma_chan *chan,
  924. struct dma_interleaved_template *xt,
  925. unsigned long flags)
  926. {
  927. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  928. struct at_xdmac_desc *prev = NULL, *first = NULL;
  929. dma_addr_t dst_addr, src_addr;
  930. size_t src_skip = 0, dst_skip = 0, len = 0;
  931. struct data_chunk *chunk;
  932. int i;
  933. if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
  934. return NULL;
  935. /*
  936. * TODO: Handle the case where we have to repeat a chain of
  937. * descriptors...
  938. */
  939. if ((xt->numf > 1) && (xt->frame_size > 1))
  940. return NULL;
  941. dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
  942. __func__, &xt->src_start, &xt->dst_start, xt->numf,
  943. xt->frame_size, flags);
  944. src_addr = xt->src_start;
  945. dst_addr = xt->dst_start;
  946. if (xt->numf > 1) {
  947. first = at_xdmac_interleaved_queue_desc(chan, atchan,
  948. NULL,
  949. src_addr, dst_addr,
  950. xt, xt->sgl);
  951. if (!first)
  952. return NULL;
  953. /* Length of the block is (BLEN+1) microblocks. */
  954. for (i = 0; i < xt->numf - 1; i++)
  955. at_xdmac_increment_block_count(chan, first);
  956. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  957. __func__, first, first);
  958. list_add_tail(&first->desc_node, &first->descs_list);
  959. } else {
  960. for (i = 0; i < xt->frame_size; i++) {
  961. size_t src_icg = 0, dst_icg = 0;
  962. struct at_xdmac_desc *desc;
  963. chunk = xt->sgl + i;
  964. dst_icg = dmaengine_get_dst_icg(xt, chunk);
  965. src_icg = dmaengine_get_src_icg(xt, chunk);
  966. src_skip = chunk->size + src_icg;
  967. dst_skip = chunk->size + dst_icg;
  968. dev_dbg(chan2dev(chan),
  969. "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
  970. __func__, chunk->size, src_icg, dst_icg);
  971. desc = at_xdmac_interleaved_queue_desc(chan, atchan,
  972. prev,
  973. src_addr, dst_addr,
  974. xt, chunk);
  975. if (!desc) {
  976. if (first)
  977. list_splice_tail_init(&first->descs_list,
  978. &atchan->free_descs_list);
  979. return NULL;
  980. }
  981. if (!first)
  982. first = desc;
  983. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  984. __func__, desc, first);
  985. list_add_tail(&desc->desc_node, &first->descs_list);
  986. if (xt->src_sgl)
  987. src_addr += src_skip;
  988. if (xt->dst_sgl)
  989. dst_addr += dst_skip;
  990. len += chunk->size;
  991. prev = desc;
  992. }
  993. }
  994. first->tx_dma_desc.cookie = -EBUSY;
  995. first->tx_dma_desc.flags = flags;
  996. first->xfer_size = len;
  997. return &first->tx_dma_desc;
  998. }
  999. static struct dma_async_tx_descriptor *
  1000. at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  1001. size_t len, unsigned long flags)
  1002. {
  1003. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1004. struct at_xdmac_desc *first = NULL, *prev = NULL;
  1005. size_t remaining_size = len, xfer_size = 0, ublen;
  1006. dma_addr_t src_addr = src, dst_addr = dest;
  1007. u32 dwidth;
  1008. /*
  1009. * WARNING: We don't know the direction, it involves we can't
  1010. * dynamically set the source and dest interface so we have to use the
  1011. * same one. Only interface 0 allows EBI access. Hopefully we can
  1012. * access DDR through both ports (at least on SAMA5D4x), so we can use
  1013. * the same interface for source and dest, that solves the fact we
  1014. * don't know the direction.
  1015. * ERRATA: Even if useless for memory transfers, the PERID has to not
  1016. * match the one of another channel. If not, it could lead to spurious
  1017. * flag status.
  1018. * For SAMA7G5x case, the SIF and DIF fields are no longer used.
  1019. * Thus, no need to have the SIF/DIF interfaces here.
  1020. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
  1021. * zero.
  1022. */
  1023. u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
  1024. | AT_XDMAC_CC_DAM_INCREMENTED_AM
  1025. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  1026. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  1027. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  1028. unsigned long irqflags;
  1029. dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
  1030. __func__, &src, &dest, len, flags);
  1031. if (unlikely(!len))
  1032. return NULL;
  1033. dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
  1034. /* Prepare descriptors. */
  1035. while (remaining_size) {
  1036. struct at_xdmac_desc *desc = NULL;
  1037. dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
  1038. spin_lock_irqsave(&atchan->lock, irqflags);
  1039. desc = at_xdmac_get_desc(atchan);
  1040. spin_unlock_irqrestore(&atchan->lock, irqflags);
  1041. if (!desc) {
  1042. dev_err(chan2dev(chan), "can't get descriptor\n");
  1043. if (first)
  1044. list_splice_tail_init(&first->descs_list,
  1045. &atchan->free_descs_list);
  1046. return NULL;
  1047. }
  1048. /* Update src and dest addresses. */
  1049. src_addr += xfer_size;
  1050. dst_addr += xfer_size;
  1051. if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
  1052. xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
  1053. else
  1054. xfer_size = remaining_size;
  1055. dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
  1056. /* Check remaining length and change data width if needed. */
  1057. dwidth = at_xdmac_align_width(chan,
  1058. src_addr | dst_addr | xfer_size);
  1059. chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
  1060. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  1061. ublen = xfer_size >> dwidth;
  1062. remaining_size -= xfer_size;
  1063. desc->lld.mbr_sa = src_addr;
  1064. desc->lld.mbr_da = dst_addr;
  1065. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
  1066. | AT_XDMAC_MBR_UBC_NDEN
  1067. | AT_XDMAC_MBR_UBC_NSEN
  1068. | ublen;
  1069. desc->lld.mbr_cfg = chan_cc;
  1070. dev_dbg(chan2dev(chan),
  1071. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  1072. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
  1073. /* Chain lld. */
  1074. if (prev)
  1075. at_xdmac_queue_desc(chan, prev, desc);
  1076. prev = desc;
  1077. if (!first)
  1078. first = desc;
  1079. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  1080. __func__, desc, first);
  1081. list_add_tail(&desc->desc_node, &first->descs_list);
  1082. }
  1083. first->tx_dma_desc.flags = flags;
  1084. first->xfer_size = len;
  1085. return &first->tx_dma_desc;
  1086. }
  1087. static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
  1088. struct at_xdmac_chan *atchan,
  1089. dma_addr_t dst_addr,
  1090. size_t len,
  1091. int value)
  1092. {
  1093. struct at_xdmac_desc *desc;
  1094. unsigned long flags;
  1095. size_t ublen;
  1096. u32 dwidth;
  1097. char pattern;
  1098. /*
  1099. * WARNING: The channel configuration is set here since there is no
  1100. * dmaengine_slave_config call in this case. Moreover we don't know the
  1101. * direction, it involves we can't dynamically set the source and dest
  1102. * interface so we have to use the same one. Only interface 0 allows EBI
  1103. * access. Hopefully we can access DDR through both ports (at least on
  1104. * SAMA5D4x), so we can use the same interface for source and dest,
  1105. * that solves the fact we don't know the direction.
  1106. * ERRATA: Even if useless for memory transfers, the PERID has to not
  1107. * match the one of another channel. If not, it could lead to spurious
  1108. * flag status.
  1109. * For SAMA7G5x case, the SIF and DIF fields are no longer used.
  1110. * Thus, no need to have the SIF/DIF interfaces here.
  1111. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
  1112. * zero.
  1113. */
  1114. u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
  1115. | AT_XDMAC_CC_DAM_UBS_AM
  1116. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  1117. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  1118. | AT_XDMAC_CC_MEMSET_HW_MODE
  1119. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  1120. dwidth = at_xdmac_align_width(chan, dst_addr);
  1121. if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
  1122. dev_err(chan2dev(chan),
  1123. "%s: Transfer too large, aborting...\n",
  1124. __func__);
  1125. return NULL;
  1126. }
  1127. spin_lock_irqsave(&atchan->lock, flags);
  1128. desc = at_xdmac_get_desc(atchan);
  1129. spin_unlock_irqrestore(&atchan->lock, flags);
  1130. if (!desc) {
  1131. dev_err(chan2dev(chan), "can't get descriptor\n");
  1132. return NULL;
  1133. }
  1134. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  1135. /* Only the first byte of value is to be used according to dmaengine */
  1136. pattern = (char)value;
  1137. ublen = len >> dwidth;
  1138. desc->lld.mbr_da = dst_addr;
  1139. desc->lld.mbr_ds = (pattern << 24) |
  1140. (pattern << 16) |
  1141. (pattern << 8) |
  1142. pattern;
  1143. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
  1144. | AT_XDMAC_MBR_UBC_NDEN
  1145. | AT_XDMAC_MBR_UBC_NSEN
  1146. | ublen;
  1147. desc->lld.mbr_cfg = chan_cc;
  1148. dev_dbg(chan2dev(chan),
  1149. "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  1150. __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
  1151. desc->lld.mbr_cfg);
  1152. return desc;
  1153. }
  1154. static struct dma_async_tx_descriptor *
  1155. at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  1156. size_t len, unsigned long flags)
  1157. {
  1158. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1159. struct at_xdmac_desc *desc;
  1160. dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
  1161. __func__, &dest, len, value, flags);
  1162. if (unlikely(!len))
  1163. return NULL;
  1164. desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
  1165. if (!desc)
  1166. return NULL;
  1167. list_add_tail(&desc->desc_node, &desc->descs_list);
  1168. desc->tx_dma_desc.cookie = -EBUSY;
  1169. desc->tx_dma_desc.flags = flags;
  1170. desc->xfer_size = len;
  1171. return &desc->tx_dma_desc;
  1172. }
  1173. static struct dma_async_tx_descriptor *
  1174. at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
  1175. unsigned int sg_len, int value,
  1176. unsigned long flags)
  1177. {
  1178. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1179. struct at_xdmac_desc *desc, *pdesc = NULL,
  1180. *ppdesc = NULL, *first = NULL;
  1181. struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
  1182. size_t stride = 0, pstride = 0, len = 0;
  1183. int i;
  1184. if (!sgl)
  1185. return NULL;
  1186. dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
  1187. __func__, sg_len, value, flags);
  1188. /* Prepare descriptors. */
  1189. for_each_sg(sgl, sg, sg_len, i) {
  1190. dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
  1191. __func__, &sg_dma_address(sg), sg_dma_len(sg),
  1192. value, flags);
  1193. desc = at_xdmac_memset_create_desc(chan, atchan,
  1194. sg_dma_address(sg),
  1195. sg_dma_len(sg),
  1196. value);
  1197. if (!desc && first)
  1198. list_splice_tail_init(&first->descs_list,
  1199. &atchan->free_descs_list);
  1200. if (!first)
  1201. first = desc;
  1202. /* Update our strides */
  1203. pstride = stride;
  1204. if (psg)
  1205. stride = sg_dma_address(sg) -
  1206. (sg_dma_address(psg) + sg_dma_len(psg));
  1207. /*
  1208. * The scatterlist API gives us only the address and
  1209. * length of each elements.
  1210. *
  1211. * Unfortunately, we don't have the stride, which we
  1212. * will need to compute.
  1213. *
  1214. * That make us end up in a situation like this one:
  1215. * len stride len stride len
  1216. * +-------+ +-------+ +-------+
  1217. * | N-2 | | N-1 | | N |
  1218. * +-------+ +-------+ +-------+
  1219. *
  1220. * We need all these three elements (N-2, N-1 and N)
  1221. * to actually take the decision on whether we need to
  1222. * queue N-1 or reuse N-2.
  1223. *
  1224. * We will only consider N if it is the last element.
  1225. */
  1226. if (ppdesc && pdesc) {
  1227. if ((stride == pstride) &&
  1228. (sg_dma_len(ppsg) == sg_dma_len(psg))) {
  1229. dev_dbg(chan2dev(chan),
  1230. "%s: desc 0x%p can be merged with desc 0x%p\n",
  1231. __func__, pdesc, ppdesc);
  1232. /*
  1233. * Increment the block count of the
  1234. * N-2 descriptor
  1235. */
  1236. at_xdmac_increment_block_count(chan, ppdesc);
  1237. ppdesc->lld.mbr_dus = stride;
  1238. /*
  1239. * Put back the N-1 descriptor in the
  1240. * free descriptor list
  1241. */
  1242. list_add_tail(&pdesc->desc_node,
  1243. &atchan->free_descs_list);
  1244. /*
  1245. * Make our N-1 descriptor pointer
  1246. * point to the N-2 since they were
  1247. * actually merged.
  1248. */
  1249. pdesc = ppdesc;
  1250. /*
  1251. * Rule out the case where we don't have
  1252. * pstride computed yet (our second sg
  1253. * element)
  1254. *
  1255. * We also want to catch the case where there
  1256. * would be a negative stride,
  1257. */
  1258. } else if (pstride ||
  1259. sg_dma_address(sg) < sg_dma_address(psg)) {
  1260. /*
  1261. * Queue the N-1 descriptor after the
  1262. * N-2
  1263. */
  1264. at_xdmac_queue_desc(chan, ppdesc, pdesc);
  1265. /*
  1266. * Add the N-1 descriptor to the list
  1267. * of the descriptors used for this
  1268. * transfer
  1269. */
  1270. list_add_tail(&desc->desc_node,
  1271. &first->descs_list);
  1272. dev_dbg(chan2dev(chan),
  1273. "%s: add desc 0x%p to descs_list 0x%p\n",
  1274. __func__, desc, first);
  1275. }
  1276. }
  1277. /*
  1278. * If we are the last element, just see if we have the
  1279. * same size than the previous element.
  1280. *
  1281. * If so, we can merge it with the previous descriptor
  1282. * since we don't care about the stride anymore.
  1283. */
  1284. if ((i == (sg_len - 1)) &&
  1285. sg_dma_len(psg) == sg_dma_len(sg)) {
  1286. dev_dbg(chan2dev(chan),
  1287. "%s: desc 0x%p can be merged with desc 0x%p\n",
  1288. __func__, desc, pdesc);
  1289. /*
  1290. * Increment the block count of the N-1
  1291. * descriptor
  1292. */
  1293. at_xdmac_increment_block_count(chan, pdesc);
  1294. pdesc->lld.mbr_dus = stride;
  1295. /*
  1296. * Put back the N descriptor in the free
  1297. * descriptor list
  1298. */
  1299. list_add_tail(&desc->desc_node,
  1300. &atchan->free_descs_list);
  1301. }
  1302. /* Update our descriptors */
  1303. ppdesc = pdesc;
  1304. pdesc = desc;
  1305. /* Update our scatter pointers */
  1306. ppsg = psg;
  1307. psg = sg;
  1308. len += sg_dma_len(sg);
  1309. }
  1310. first->tx_dma_desc.cookie = -EBUSY;
  1311. first->tx_dma_desc.flags = flags;
  1312. first->xfer_size = len;
  1313. return &first->tx_dma_desc;
  1314. }
  1315. static enum dma_status
  1316. at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  1317. struct dma_tx_state *txstate)
  1318. {
  1319. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1320. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1321. struct at_xdmac_desc *desc, *_desc, *iter;
  1322. struct list_head *descs_list;
  1323. enum dma_status ret;
  1324. int residue, retry, pm_status;
  1325. u32 cur_nda, check_nda, cur_ubc, mask, value;
  1326. u8 dwidth = 0;
  1327. unsigned long flags;
  1328. bool initd;
  1329. ret = dma_cookie_status(chan, cookie, txstate);
  1330. if (ret == DMA_COMPLETE || !txstate)
  1331. return ret;
  1332. pm_status = pm_runtime_resume_and_get(atxdmac->dev);
  1333. if (pm_status < 0)
  1334. return DMA_ERROR;
  1335. spin_lock_irqsave(&atchan->lock, flags);
  1336. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
  1337. /*
  1338. * If the transfer has not been started yet, don't need to compute the
  1339. * residue, it's the transfer length.
  1340. */
  1341. if (!desc->active_xfer) {
  1342. dma_set_residue(txstate, desc->xfer_size);
  1343. goto spin_unlock;
  1344. }
  1345. residue = desc->xfer_size;
  1346. /*
  1347. * Flush FIFO: only relevant when the transfer is source peripheral
  1348. * synchronized. Flush is needed before reading CUBC because data in
  1349. * the FIFO are not reported by CUBC. Reporting a residue of the
  1350. * transfer length while we have data in FIFO can cause issue.
  1351. * Usecase: atmel USART has a timeout which means I have received
  1352. * characters but there is no more character received for a while. On
  1353. * timeout, it requests the residue. If the data are in the DMA FIFO,
  1354. * we will return a residue of the transfer length. It means no data
  1355. * received. If an application is waiting for these data, it will hang
  1356. * since we won't have another USART timeout without receiving new
  1357. * data.
  1358. */
  1359. mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
  1360. value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
  1361. if ((desc->lld.mbr_cfg & mask) == value) {
  1362. at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
  1363. while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
  1364. cpu_relax();
  1365. }
  1366. /*
  1367. * The easiest way to compute the residue should be to pause the DMA
  1368. * but doing this can lead to miss some data as some devices don't
  1369. * have FIFO.
  1370. * We need to read several registers because:
  1371. * - DMA is running therefore a descriptor change is possible while
  1372. * reading these registers
  1373. * - When the block transfer is done, the value of the CUBC register
  1374. * is set to its initial value until the fetch of the next descriptor.
  1375. * This value will corrupt the residue calculation so we have to skip
  1376. * it.
  1377. *
  1378. * INITD -------- ------------
  1379. * |____________________|
  1380. * _______________________ _______________
  1381. * NDA @desc2 \/ @desc3
  1382. * _______________________/\_______________
  1383. * __________ ___________ _______________
  1384. * CUBC 0 \/ MAX desc1 \/ MAX desc2
  1385. * __________/\___________/\_______________
  1386. *
  1387. * Since descriptors are aligned on 64 bits, we can assume that
  1388. * the update of NDA and CUBC is atomic.
  1389. * Memory barriers are used to ensure the read order of the registers.
  1390. * A max number of retries is set because unlikely it could never ends.
  1391. */
  1392. for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
  1393. check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
  1394. rmb();
  1395. cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
  1396. rmb();
  1397. initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
  1398. rmb();
  1399. cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
  1400. rmb();
  1401. if ((check_nda == cur_nda) && initd)
  1402. break;
  1403. }
  1404. if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
  1405. ret = DMA_ERROR;
  1406. goto spin_unlock;
  1407. }
  1408. /*
  1409. * Flush FIFO: only relevant when the transfer is source peripheral
  1410. * synchronized. Another flush is needed here because CUBC is updated
  1411. * when the controller sends the data write command. It can lead to
  1412. * report data that are not written in the memory or the device. The
  1413. * FIFO flush ensures that data are really written.
  1414. */
  1415. if ((desc->lld.mbr_cfg & mask) == value) {
  1416. at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
  1417. while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
  1418. cpu_relax();
  1419. }
  1420. /*
  1421. * Remove size of all microblocks already transferred and the current
  1422. * one. Then add the remaining size to transfer of the current
  1423. * microblock.
  1424. */
  1425. descs_list = &desc->descs_list;
  1426. list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
  1427. dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
  1428. residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
  1429. if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
  1430. desc = iter;
  1431. break;
  1432. }
  1433. }
  1434. residue += cur_ubc << dwidth;
  1435. dma_set_residue(txstate, residue);
  1436. dev_dbg(chan2dev(chan),
  1437. "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
  1438. __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
  1439. spin_unlock:
  1440. spin_unlock_irqrestore(&atchan->lock, flags);
  1441. pm_runtime_mark_last_busy(atxdmac->dev);
  1442. pm_runtime_put_autosuspend(atxdmac->dev);
  1443. return ret;
  1444. }
  1445. static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
  1446. {
  1447. struct at_xdmac_desc *desc;
  1448. /*
  1449. * If channel is enabled, do nothing, advance_work will be triggered
  1450. * after the interruption.
  1451. */
  1452. if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
  1453. return;
  1454. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
  1455. xfer_node);
  1456. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1457. if (!desc->active_xfer)
  1458. at_xdmac_start_xfer(atchan, desc);
  1459. }
  1460. static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
  1461. {
  1462. struct at_xdmac_desc *desc;
  1463. struct dma_async_tx_descriptor *txd;
  1464. spin_lock_irq(&atchan->lock);
  1465. dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
  1466. __func__, atchan->irq_status);
  1467. if (list_empty(&atchan->xfers_list)) {
  1468. spin_unlock_irq(&atchan->lock);
  1469. return;
  1470. }
  1471. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
  1472. xfer_node);
  1473. spin_unlock_irq(&atchan->lock);
  1474. txd = &desc->tx_dma_desc;
  1475. if (txd->flags & DMA_PREP_INTERRUPT)
  1476. dmaengine_desc_get_callback_invoke(txd, NULL);
  1477. }
  1478. /* Called with atchan->lock held. */
  1479. static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
  1480. {
  1481. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1482. struct at_xdmac_desc *bad_desc;
  1483. int ret;
  1484. ret = pm_runtime_resume_and_get(atxdmac->dev);
  1485. if (ret < 0)
  1486. return;
  1487. /*
  1488. * The descriptor currently at the head of the active list is
  1489. * broken. Since we don't have any way to report errors, we'll
  1490. * just have to scream loudly and try to continue with other
  1491. * descriptors queued (if any).
  1492. */
  1493. if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
  1494. dev_err(chan2dev(&atchan->chan), "read bus error!!!");
  1495. if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
  1496. dev_err(chan2dev(&atchan->chan), "write bus error!!!");
  1497. if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
  1498. dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
  1499. /* Channel must be disabled first as it's not done automatically */
  1500. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1501. while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
  1502. cpu_relax();
  1503. bad_desc = list_first_entry(&atchan->xfers_list,
  1504. struct at_xdmac_desc,
  1505. xfer_node);
  1506. /* Print bad descriptor's details if needed */
  1507. dev_dbg(chan2dev(&atchan->chan),
  1508. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  1509. __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
  1510. bad_desc->lld.mbr_ubc);
  1511. pm_runtime_mark_last_busy(atxdmac->dev);
  1512. pm_runtime_put_autosuspend(atxdmac->dev);
  1513. /* Then continue with usual descriptor management */
  1514. }
  1515. static void at_xdmac_tasklet(struct tasklet_struct *t)
  1516. {
  1517. struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
  1518. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1519. struct at_xdmac_desc *desc;
  1520. struct dma_async_tx_descriptor *txd;
  1521. u32 error_mask;
  1522. if (at_xdmac_chan_is_cyclic(atchan))
  1523. return at_xdmac_handle_cyclic(atchan);
  1524. error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
  1525. AT_XDMAC_CIS_ROIS;
  1526. spin_lock_irq(&atchan->lock);
  1527. dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
  1528. __func__, atchan->irq_status);
  1529. if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
  1530. !(atchan->irq_status & error_mask)) {
  1531. spin_unlock_irq(&atchan->lock);
  1532. return;
  1533. }
  1534. if (atchan->irq_status & error_mask)
  1535. at_xdmac_handle_error(atchan);
  1536. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
  1537. xfer_node);
  1538. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1539. if (!desc->active_xfer) {
  1540. dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
  1541. spin_unlock_irq(&atchan->lock);
  1542. return;
  1543. }
  1544. txd = &desc->tx_dma_desc;
  1545. dma_cookie_complete(txd);
  1546. /* Remove the transfer from the transfer list. */
  1547. list_del(&desc->xfer_node);
  1548. spin_unlock_irq(&atchan->lock);
  1549. if (txd->flags & DMA_PREP_INTERRUPT)
  1550. dmaengine_desc_get_callback_invoke(txd, NULL);
  1551. dma_run_dependencies(txd);
  1552. spin_lock_irq(&atchan->lock);
  1553. /* Move the xfer descriptors into the free descriptors list. */
  1554. list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
  1555. at_xdmac_advance_work(atchan);
  1556. spin_unlock_irq(&atchan->lock);
  1557. /*
  1558. * Decrement runtime PM ref counter incremented in
  1559. * at_xdmac_start_xfer().
  1560. */
  1561. pm_runtime_mark_last_busy(atxdmac->dev);
  1562. pm_runtime_put_autosuspend(atxdmac->dev);
  1563. }
  1564. static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
  1565. {
  1566. struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
  1567. struct at_xdmac_chan *atchan;
  1568. u32 imr, status, pending;
  1569. u32 chan_imr, chan_status;
  1570. int i, ret = IRQ_NONE;
  1571. do {
  1572. imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
  1573. status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
  1574. pending = status & imr;
  1575. dev_vdbg(atxdmac->dma.dev,
  1576. "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
  1577. __func__, status, imr, pending);
  1578. if (!pending)
  1579. break;
  1580. /* We have to find which channel has generated the interrupt. */
  1581. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1582. if (!((1 << i) & pending))
  1583. continue;
  1584. atchan = &atxdmac->chan[i];
  1585. chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
  1586. chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
  1587. atchan->irq_status = chan_status & chan_imr;
  1588. dev_vdbg(atxdmac->dma.dev,
  1589. "%s: chan%d: imr=0x%x, status=0x%x\n",
  1590. __func__, i, chan_imr, chan_status);
  1591. dev_vdbg(chan2dev(&atchan->chan),
  1592. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  1593. __func__,
  1594. at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  1595. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  1596. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  1597. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  1598. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  1599. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  1600. if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
  1601. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1602. tasklet_schedule(&atchan->tasklet);
  1603. ret = IRQ_HANDLED;
  1604. }
  1605. } while (pending);
  1606. return ret;
  1607. }
  1608. static void at_xdmac_issue_pending(struct dma_chan *chan)
  1609. {
  1610. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1611. unsigned long flags;
  1612. dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
  1613. spin_lock_irqsave(&atchan->lock, flags);
  1614. at_xdmac_advance_work(atchan);
  1615. spin_unlock_irqrestore(&atchan->lock, flags);
  1616. return;
  1617. }
  1618. static int at_xdmac_device_config(struct dma_chan *chan,
  1619. struct dma_slave_config *config)
  1620. {
  1621. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1622. int ret;
  1623. unsigned long flags;
  1624. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1625. spin_lock_irqsave(&atchan->lock, flags);
  1626. ret = at_xdmac_set_slave_config(chan, config);
  1627. spin_unlock_irqrestore(&atchan->lock, flags);
  1628. return ret;
  1629. }
  1630. static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
  1631. struct at_xdmac_chan *atchan)
  1632. {
  1633. at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
  1634. while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
  1635. (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
  1636. cpu_relax();
  1637. }
  1638. static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
  1639. {
  1640. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1641. unsigned long flags;
  1642. spin_lock_irqsave(&atchan->lock, flags);
  1643. set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
  1644. at_xdmac_device_pause_set(atxdmac, atchan);
  1645. spin_unlock_irqrestore(&atchan->lock, flags);
  1646. }
  1647. static int at_xdmac_device_pause(struct dma_chan *chan)
  1648. {
  1649. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1650. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1651. unsigned long flags;
  1652. int ret;
  1653. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1654. if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
  1655. return 0;
  1656. ret = pm_runtime_resume_and_get(atxdmac->dev);
  1657. if (ret < 0)
  1658. return ret;
  1659. spin_lock_irqsave(&atchan->lock, flags);
  1660. at_xdmac_device_pause_set(atxdmac, atchan);
  1661. /* Decrement runtime PM ref counter for each active descriptor. */
  1662. at_xdmac_runtime_suspend_descriptors(atchan);
  1663. spin_unlock_irqrestore(&atchan->lock, flags);
  1664. pm_runtime_mark_last_busy(atxdmac->dev);
  1665. pm_runtime_put_autosuspend(atxdmac->dev);
  1666. return 0;
  1667. }
  1668. static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
  1669. {
  1670. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1671. unsigned long flags;
  1672. spin_lock_irqsave(&atchan->lock, flags);
  1673. at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
  1674. clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
  1675. spin_unlock_irqrestore(&atchan->lock, flags);
  1676. }
  1677. static int at_xdmac_device_resume(struct dma_chan *chan)
  1678. {
  1679. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1680. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1681. unsigned long flags;
  1682. int ret;
  1683. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1684. ret = pm_runtime_resume_and_get(atxdmac->dev);
  1685. if (ret < 0)
  1686. return ret;
  1687. spin_lock_irqsave(&atchan->lock, flags);
  1688. if (!at_xdmac_chan_is_paused(atchan))
  1689. goto unlock;
  1690. /* Increment runtime PM ref counter for each active descriptor. */
  1691. ret = at_xdmac_runtime_resume_descriptors(atchan);
  1692. if (ret < 0)
  1693. goto unlock;
  1694. at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
  1695. clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  1696. unlock:
  1697. spin_unlock_irqrestore(&atchan->lock, flags);
  1698. pm_runtime_mark_last_busy(atxdmac->dev);
  1699. pm_runtime_put_autosuspend(atxdmac->dev);
  1700. return ret;
  1701. }
  1702. static int at_xdmac_device_terminate_all(struct dma_chan *chan)
  1703. {
  1704. struct at_xdmac_desc *desc, *_desc;
  1705. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1706. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1707. unsigned long flags;
  1708. int ret;
  1709. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1710. ret = pm_runtime_resume_and_get(atxdmac->dev);
  1711. if (ret < 0)
  1712. return ret;
  1713. spin_lock_irqsave(&atchan->lock, flags);
  1714. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1715. while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
  1716. cpu_relax();
  1717. /* Cancel all pending transfers. */
  1718. list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
  1719. list_del(&desc->xfer_node);
  1720. list_splice_tail_init(&desc->descs_list,
  1721. &atchan->free_descs_list);
  1722. /*
  1723. * We incremented the runtime PM reference count on
  1724. * at_xdmac_start_xfer() for this descriptor. Now it's time
  1725. * to release it.
  1726. */
  1727. if (desc->active_xfer) {
  1728. pm_runtime_put_autosuspend(atxdmac->dev);
  1729. pm_runtime_mark_last_busy(atxdmac->dev);
  1730. }
  1731. }
  1732. clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  1733. clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
  1734. spin_unlock_irqrestore(&atchan->lock, flags);
  1735. pm_runtime_mark_last_busy(atxdmac->dev);
  1736. pm_runtime_put_autosuspend(atxdmac->dev);
  1737. return 0;
  1738. }
  1739. static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
  1740. {
  1741. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1742. struct at_xdmac_desc *desc;
  1743. int i;
  1744. if (at_xdmac_chan_is_enabled(atchan)) {
  1745. dev_err(chan2dev(chan),
  1746. "can't allocate channel resources (channel enabled)\n");
  1747. return -EIO;
  1748. }
  1749. if (!list_empty(&atchan->free_descs_list)) {
  1750. dev_err(chan2dev(chan),
  1751. "can't allocate channel resources (channel not free from a previous use)\n");
  1752. return -EIO;
  1753. }
  1754. for (i = 0; i < init_nr_desc_per_channel; i++) {
  1755. desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
  1756. if (!desc) {
  1757. if (i == 0) {
  1758. dev_warn(chan2dev(chan),
  1759. "can't allocate any descriptors\n");
  1760. return -EIO;
  1761. }
  1762. dev_warn(chan2dev(chan),
  1763. "only %d descriptors have been allocated\n", i);
  1764. break;
  1765. }
  1766. list_add_tail(&desc->desc_node, &atchan->free_descs_list);
  1767. }
  1768. dma_cookie_init(chan);
  1769. dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
  1770. return i;
  1771. }
  1772. static void at_xdmac_free_chan_resources(struct dma_chan *chan)
  1773. {
  1774. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1775. struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
  1776. struct at_xdmac_desc *desc, *_desc;
  1777. list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
  1778. dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
  1779. list_del(&desc->desc_node);
  1780. dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
  1781. }
  1782. return;
  1783. }
  1784. static void at_xdmac_axi_config(struct platform_device *pdev)
  1785. {
  1786. struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
  1787. bool dev_m2m = false;
  1788. u32 dma_requests;
  1789. if (!atxdmac->layout->axi_config)
  1790. return; /* Not supported */
  1791. if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
  1792. &dma_requests)) {
  1793. dev_info(&pdev->dev, "controller in mem2mem mode.\n");
  1794. dev_m2m = true;
  1795. }
  1796. if (dev_m2m) {
  1797. at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
  1798. at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
  1799. } else {
  1800. at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
  1801. at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
  1802. }
  1803. }
  1804. static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
  1805. {
  1806. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1807. struct dma_chan *chan, *_chan;
  1808. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1809. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1810. /* Wait for transfer completion, except in cyclic case. */
  1811. if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
  1812. return -EAGAIN;
  1813. }
  1814. return 0;
  1815. }
  1816. static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
  1817. {
  1818. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1819. struct dma_chan *chan, *_chan;
  1820. int ret;
  1821. ret = pm_runtime_resume_and_get(atxdmac->dev);
  1822. if (ret < 0)
  1823. return ret;
  1824. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1825. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1826. atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
  1827. if (at_xdmac_chan_is_cyclic(atchan)) {
  1828. if (!at_xdmac_chan_is_paused(atchan)) {
  1829. dev_warn(chan2dev(chan), "%s: channel %d not paused\n",
  1830. __func__, chan->chan_id);
  1831. at_xdmac_device_pause_internal(atchan);
  1832. at_xdmac_runtime_suspend_descriptors(atchan);
  1833. }
  1834. atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
  1835. atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
  1836. atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
  1837. }
  1838. }
  1839. atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
  1840. atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
  1841. at_xdmac_off(atxdmac, false);
  1842. pm_runtime_mark_last_busy(atxdmac->dev);
  1843. pm_runtime_put_noidle(atxdmac->dev);
  1844. clk_disable_unprepare(atxdmac->clk);
  1845. return 0;
  1846. }
  1847. static int __maybe_unused atmel_xdmac_resume(struct device *dev)
  1848. {
  1849. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1850. struct at_xdmac_chan *atchan;
  1851. struct dma_chan *chan, *_chan;
  1852. struct platform_device *pdev = container_of(dev, struct platform_device, dev);
  1853. int i, ret;
  1854. ret = clk_prepare_enable(atxdmac->clk);
  1855. if (ret)
  1856. return ret;
  1857. pm_runtime_get_noresume(atxdmac->dev);
  1858. at_xdmac_axi_config(pdev);
  1859. /* Clear pending interrupts. */
  1860. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1861. atchan = &atxdmac->chan[i];
  1862. while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
  1863. cpu_relax();
  1864. }
  1865. at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
  1866. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1867. atchan = to_at_xdmac_chan(chan);
  1868. at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
  1869. if (at_xdmac_chan_is_cyclic(atchan)) {
  1870. /*
  1871. * Resume only channels not explicitly paused by
  1872. * consumers.
  1873. */
  1874. if (at_xdmac_chan_is_paused_internal(atchan)) {
  1875. ret = at_xdmac_runtime_resume_descriptors(atchan);
  1876. if (ret < 0)
  1877. return ret;
  1878. at_xdmac_device_resume_internal(atchan);
  1879. }
  1880. /*
  1881. * We may resume from a deep sleep state where power
  1882. * to DMA controller is cut-off. Thus, restore the
  1883. * suspend state of channels set though dmaengine API.
  1884. */
  1885. else if (at_xdmac_chan_is_paused(atchan))
  1886. at_xdmac_device_pause_set(atxdmac, atchan);
  1887. at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
  1888. at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
  1889. at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
  1890. wmb();
  1891. if (atxdmac->save_gs & atchan->mask)
  1892. at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
  1893. }
  1894. }
  1895. pm_runtime_mark_last_busy(atxdmac->dev);
  1896. pm_runtime_put_autosuspend(atxdmac->dev);
  1897. return 0;
  1898. }
  1899. static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
  1900. {
  1901. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1902. clk_disable(atxdmac->clk);
  1903. return 0;
  1904. }
  1905. static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
  1906. {
  1907. struct at_xdmac *atxdmac = dev_get_drvdata(dev);
  1908. return clk_enable(atxdmac->clk);
  1909. }
  1910. static int at_xdmac_probe(struct platform_device *pdev)
  1911. {
  1912. struct at_xdmac *atxdmac;
  1913. int irq, nr_channels, i, ret;
  1914. void __iomem *base;
  1915. u32 reg;
  1916. irq = platform_get_irq(pdev, 0);
  1917. if (irq < 0)
  1918. return irq;
  1919. base = devm_platform_ioremap_resource(pdev, 0);
  1920. if (IS_ERR(base))
  1921. return PTR_ERR(base);
  1922. /*
  1923. * Read number of xdmac channels, read helper function can't be used
  1924. * since atxdmac is not yet allocated and we need to know the number
  1925. * of channels to do the allocation.
  1926. */
  1927. reg = readl_relaxed(base + AT_XDMAC_GTYPE);
  1928. nr_channels = AT_XDMAC_NB_CH(reg);
  1929. if (nr_channels > AT_XDMAC_MAX_CHAN) {
  1930. dev_err(&pdev->dev, "invalid number of channels (%u)\n",
  1931. nr_channels);
  1932. return -EINVAL;
  1933. }
  1934. atxdmac = devm_kzalloc(&pdev->dev,
  1935. struct_size(atxdmac, chan, nr_channels),
  1936. GFP_KERNEL);
  1937. if (!atxdmac) {
  1938. dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
  1939. return -ENOMEM;
  1940. }
  1941. atxdmac->regs = base;
  1942. atxdmac->irq = irq;
  1943. atxdmac->dev = &pdev->dev;
  1944. atxdmac->layout = of_device_get_match_data(&pdev->dev);
  1945. if (!atxdmac->layout)
  1946. return -ENODEV;
  1947. atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
  1948. if (IS_ERR(atxdmac->clk)) {
  1949. dev_err(&pdev->dev, "can't get dma_clk\n");
  1950. return PTR_ERR(atxdmac->clk);
  1951. }
  1952. /* Do not use dev res to prevent races with tasklet */
  1953. ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
  1954. if (ret) {
  1955. dev_err(&pdev->dev, "can't request irq\n");
  1956. return ret;
  1957. }
  1958. ret = clk_prepare_enable(atxdmac->clk);
  1959. if (ret) {
  1960. dev_err(&pdev->dev, "can't prepare or enable clock\n");
  1961. goto err_free_irq;
  1962. }
  1963. atxdmac->at_xdmac_desc_pool =
  1964. dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
  1965. sizeof(struct at_xdmac_desc), 4, 0);
  1966. if (!atxdmac->at_xdmac_desc_pool) {
  1967. dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
  1968. ret = -ENOMEM;
  1969. goto err_clk_disable;
  1970. }
  1971. dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
  1972. dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
  1973. dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
  1974. dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
  1975. dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
  1976. dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
  1977. /*
  1978. * Without DMA_PRIVATE the driver is not able to allocate more than
  1979. * one channel, second allocation fails in private_candidate.
  1980. */
  1981. dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
  1982. atxdmac->dma.dev = &pdev->dev;
  1983. atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
  1984. atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
  1985. atxdmac->dma.device_tx_status = at_xdmac_tx_status;
  1986. atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
  1987. atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
  1988. atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
  1989. atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
  1990. atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
  1991. atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
  1992. atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
  1993. atxdmac->dma.device_config = at_xdmac_device_config;
  1994. atxdmac->dma.device_pause = at_xdmac_device_pause;
  1995. atxdmac->dma.device_resume = at_xdmac_device_resume;
  1996. atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
  1997. atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
  1998. atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
  1999. atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  2000. atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  2001. platform_set_drvdata(pdev, atxdmac);
  2002. pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
  2003. pm_runtime_use_autosuspend(&pdev->dev);
  2004. pm_runtime_set_active(&pdev->dev);
  2005. pm_runtime_enable(&pdev->dev);
  2006. pm_runtime_get_noresume(&pdev->dev);
  2007. /* Init channels. */
  2008. INIT_LIST_HEAD(&atxdmac->dma.channels);
  2009. /* Disable all chans and interrupts. */
  2010. at_xdmac_off(atxdmac, true);
  2011. for (i = 0; i < nr_channels; i++) {
  2012. struct at_xdmac_chan *atchan = &atxdmac->chan[i];
  2013. atchan->chan.device = &atxdmac->dma;
  2014. list_add_tail(&atchan->chan.device_node,
  2015. &atxdmac->dma.channels);
  2016. atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
  2017. atchan->mask = 1 << i;
  2018. spin_lock_init(&atchan->lock);
  2019. INIT_LIST_HEAD(&atchan->xfers_list);
  2020. INIT_LIST_HEAD(&atchan->free_descs_list);
  2021. tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
  2022. /* Clear pending interrupts. */
  2023. while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
  2024. cpu_relax();
  2025. }
  2026. ret = dma_async_device_register(&atxdmac->dma);
  2027. if (ret) {
  2028. dev_err(&pdev->dev, "fail to register DMA engine device\n");
  2029. goto err_pm_disable;
  2030. }
  2031. ret = of_dma_controller_register(pdev->dev.of_node,
  2032. at_xdmac_xlate, atxdmac);
  2033. if (ret) {
  2034. dev_err(&pdev->dev, "could not register of dma controller\n");
  2035. goto err_dma_unregister;
  2036. }
  2037. dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
  2038. nr_channels, atxdmac->regs);
  2039. at_xdmac_axi_config(pdev);
  2040. pm_runtime_mark_last_busy(&pdev->dev);
  2041. pm_runtime_put_autosuspend(&pdev->dev);
  2042. return 0;
  2043. err_dma_unregister:
  2044. dma_async_device_unregister(&atxdmac->dma);
  2045. err_pm_disable:
  2046. pm_runtime_put_noidle(&pdev->dev);
  2047. pm_runtime_disable(&pdev->dev);
  2048. pm_runtime_set_suspended(&pdev->dev);
  2049. pm_runtime_dont_use_autosuspend(&pdev->dev);
  2050. err_clk_disable:
  2051. clk_disable_unprepare(atxdmac->clk);
  2052. err_free_irq:
  2053. free_irq(atxdmac->irq, atxdmac);
  2054. return ret;
  2055. }
  2056. static void at_xdmac_remove(struct platform_device *pdev)
  2057. {
  2058. struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
  2059. int i;
  2060. at_xdmac_off(atxdmac, true);
  2061. of_dma_controller_free(pdev->dev.of_node);
  2062. dma_async_device_unregister(&atxdmac->dma);
  2063. pm_runtime_disable(atxdmac->dev);
  2064. pm_runtime_set_suspended(&pdev->dev);
  2065. pm_runtime_dont_use_autosuspend(&pdev->dev);
  2066. clk_disable_unprepare(atxdmac->clk);
  2067. free_irq(atxdmac->irq, atxdmac);
  2068. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  2069. struct at_xdmac_chan *atchan = &atxdmac->chan[i];
  2070. tasklet_kill(&atchan->tasklet);
  2071. at_xdmac_free_chan_resources(&atchan->chan);
  2072. }
  2073. }
  2074. static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
  2075. .prepare = atmel_xdmac_prepare,
  2076. SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
  2077. SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
  2078. atmel_xdmac_runtime_resume, NULL)
  2079. };
  2080. static const struct of_device_id atmel_xdmac_dt_ids[] = {
  2081. {
  2082. .compatible = "atmel,sama5d4-dma",
  2083. .data = &at_xdmac_sama5d4_layout,
  2084. }, {
  2085. .compatible = "microchip,sama7g5-dma",
  2086. .data = &at_xdmac_sama7g5_layout,
  2087. }, {
  2088. /* sentinel */
  2089. }
  2090. };
  2091. MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
  2092. static struct platform_driver at_xdmac_driver = {
  2093. .probe = at_xdmac_probe,
  2094. .remove_new = at_xdmac_remove,
  2095. .driver = {
  2096. .name = "at_xdmac",
  2097. .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
  2098. .pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
  2099. }
  2100. };
  2101. static int __init at_xdmac_init(void)
  2102. {
  2103. return platform_driver_register(&at_xdmac_driver);
  2104. }
  2105. subsys_initcall(at_xdmac_init);
  2106. static void __exit at_xdmac_exit(void)
  2107. {
  2108. platform_driver_unregister(&at_xdmac_driver);
  2109. }
  2110. module_exit(at_xdmac_exit);
  2111. MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
  2112. MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
  2113. MODULE_LICENSE("GPL");