phy-cadence-sierra.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence Sierra PHY Driver
  4. *
  5. * Based on the linux driver provided by Cadence
  6. *
  7. * Copyright (c) 2018 Cadence Design Systems
  8. * Author: Alan Douglas <adouglas@cadence.com>
  9. *
  10. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
  11. * Jean-Jacques Hiblot <jjhiblot@ti.com>
  12. *
  13. */
  14. #include <common.h>
  15. #include <clk.h>
  16. #include <linux/delay.h>
  17. #include <linux/clk-provider.h>
  18. #include <generic-phy.h>
  19. #include <reset.h>
  20. #include <dm/device.h>
  21. #include <dm/device-internal.h>
  22. #include <dm/device_compat.h>
  23. #include <dm/lists.h>
  24. #include <dm/read.h>
  25. #include <dm/uclass.h>
  26. #include <dm/devres.h>
  27. #include <linux/io.h>
  28. #include <dt-bindings/phy/phy.h>
  29. #include <dt-bindings/phy/phy-cadence.h>
  30. #include <regmap.h>
  31. #define usleep_range(a, b) udelay((b))
  32. #define NUM_SSC_MODE 3
  33. #define NUM_PHY_TYPE 4
  34. /* PHY register offsets */
  35. #define SIERRA_COMMON_CDB_OFFSET 0x0
  36. #define SIERRA_MACRO_ID_REG 0x0
  37. #define SIERRA_CMN_PLLLC_GEN_PREG 0x42
  38. #define SIERRA_CMN_PLLLC_MODE_PREG 0x48
  39. #define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
  40. #define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
  41. #define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
  42. #define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
  43. #define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
  44. #define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
  45. #define SIERRA_CMN_PLLLC_SS_PREG 0x52
  46. #define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
  47. #define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
  48. #define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
  49. #define SIERRA_CMN_REFRCV_PREG 0x98
  50. #define SIERRA_CMN_REFRCV1_PREG 0xB8
  51. #define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
  52. #define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
  53. #define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
  54. #define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
  55. #define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
  56. #define SIERRA_LANE_CDB_OFFSET(ln, offset) \
  57. (0x4000 + ((ln) * (0x800 >> (2 - (offset)))))
  58. #define SIERRA_DET_STANDEC_A_PREG 0x000
  59. #define SIERRA_DET_STANDEC_B_PREG 0x001
  60. #define SIERRA_DET_STANDEC_C_PREG 0x002
  61. #define SIERRA_DET_STANDEC_D_PREG 0x003
  62. #define SIERRA_DET_STANDEC_E_PREG 0x004
  63. #define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
  64. #define SIERRA_PSM_A0IN_TMR_PREG 0x009
  65. #define SIERRA_PSM_A3IN_TMR_PREG 0x00C
  66. #define SIERRA_PSM_DIAG_PREG 0x015
  67. #define SIERRA_PSC_LN_A3_PREG 0x023
  68. #define SIERRA_PSC_LN_A4_PREG 0x024
  69. #define SIERRA_PSC_LN_IDLE_PREG 0x026
  70. #define SIERRA_PSC_TX_A0_PREG 0x028
  71. #define SIERRA_PSC_TX_A1_PREG 0x029
  72. #define SIERRA_PSC_TX_A2_PREG 0x02A
  73. #define SIERRA_PSC_TX_A3_PREG 0x02B
  74. #define SIERRA_PSC_RX_A0_PREG 0x030
  75. #define SIERRA_PSC_RX_A1_PREG 0x031
  76. #define SIERRA_PSC_RX_A2_PREG 0x032
  77. #define SIERRA_PSC_RX_A3_PREG 0x033
  78. #define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
  79. #define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
  80. #define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
  81. #define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
  82. #define SIERRA_PLLCTRL_STATUS_PREG 0x044
  83. #define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
  84. #define SIERRA_DFE_BIASTRIM_PREG 0x04C
  85. #define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
  86. #define SIERRA_DRVCTRL_BOOST_PREG 0x06F
  87. #define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
  88. #define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
  89. #define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
  90. #define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
  91. #define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
  92. #define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
  93. #define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
  94. #define SIERRA_RX_CTLE_CAL_PREG 0x08F
  95. #define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
  96. #define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
  97. #define SIERRA_CREQ_EQ_CTRL_PREG 0x093
  98. #define SIERRA_CREQ_SPARE_PREG 0x096
  99. #define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
  100. #define SIERRA_CTLELUT_CTRL_PREG 0x098
  101. #define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
  102. #define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
  103. #define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
  104. #define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8
  105. #define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9
  106. #define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD
  107. #define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE
  108. #define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0
  109. #define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8
  110. #define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0
  111. #define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1
  112. #define SIERRA_DEQ_GLUT0 0x0E8
  113. #define SIERRA_DEQ_GLUT1 0x0E9
  114. #define SIERRA_DEQ_GLUT2 0x0EA
  115. #define SIERRA_DEQ_GLUT3 0x0EB
  116. #define SIERRA_DEQ_GLUT4 0x0EC
  117. #define SIERRA_DEQ_GLUT5 0x0ED
  118. #define SIERRA_DEQ_GLUT6 0x0EE
  119. #define SIERRA_DEQ_GLUT7 0x0EF
  120. #define SIERRA_DEQ_GLUT8 0x0F0
  121. #define SIERRA_DEQ_GLUT9 0x0F1
  122. #define SIERRA_DEQ_GLUT10 0x0F2
  123. #define SIERRA_DEQ_GLUT11 0x0F3
  124. #define SIERRA_DEQ_GLUT12 0x0F4
  125. #define SIERRA_DEQ_GLUT13 0x0F5
  126. #define SIERRA_DEQ_GLUT14 0x0F6
  127. #define SIERRA_DEQ_GLUT15 0x0F7
  128. #define SIERRA_DEQ_GLUT16 0x0F8
  129. #define SIERRA_DEQ_ALUT0 0x108
  130. #define SIERRA_DEQ_ALUT1 0x109
  131. #define SIERRA_DEQ_ALUT2 0x10A
  132. #define SIERRA_DEQ_ALUT3 0x10B
  133. #define SIERRA_DEQ_ALUT4 0x10C
  134. #define SIERRA_DEQ_ALUT5 0x10D
  135. #define SIERRA_DEQ_ALUT6 0x10E
  136. #define SIERRA_DEQ_ALUT7 0x10F
  137. #define SIERRA_DEQ_ALUT8 0x110
  138. #define SIERRA_DEQ_ALUT9 0x111
  139. #define SIERRA_DEQ_ALUT10 0x112
  140. #define SIERRA_DEQ_ALUT11 0x113
  141. #define SIERRA_DEQ_ALUT12 0x114
  142. #define SIERRA_DEQ_ALUT13 0x115
  143. #define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
  144. #define SIERRA_DEQ_DFETAP0 0x129
  145. #define SIERRA_DEQ_DFETAP1 0x12B
  146. #define SIERRA_DEQ_DFETAP2 0x12D
  147. #define SIERRA_DEQ_DFETAP3 0x12F
  148. #define SIERRA_DEQ_DFETAP4 0x131
  149. #define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
  150. #define SIERRA_DEQ_PRECUR_PREG 0x138
  151. #define SIERRA_DEQ_POSTCUR_PREG 0x140
  152. #define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
  153. #define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
  154. #define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
  155. #define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
  156. #define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
  157. #define SIERRA_DEQ_PICTRL_PREG 0x161
  158. #define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
  159. #define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
  160. #define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
  161. #define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
  162. #define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
  163. #define SIERRA_CPI_TRIM_PREG 0x17F
  164. #define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
  165. #define SIERRA_EPI_CTRL_PREG 0x187
  166. #define SIERRA_LFPSDET_SUPPORT_PREG 0x188
  167. #define SIERRA_LFPSFILT_NS_PREG 0x18A
  168. #define SIERRA_LFPSFILT_RD_PREG 0x18B
  169. #define SIERRA_LFPSFILT_MP_PREG 0x18C
  170. #define SIERRA_SIGDET_SUPPORT_PREG 0x190
  171. #define SIERRA_SDFILT_H2L_A_PREG 0x191
  172. #define SIERRA_SDFILT_L2H_PREG 0x193
  173. #define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
  174. #define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
  175. #define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
  176. #define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
  177. #define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
  178. #define SIERRA_PHY_PCS_COMMON_OFFSET 0xc000
  179. #define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
  180. #define SIERRA_PHY_PLL_CFG 0xe
  181. /* PHY PMA common registers */
  182. #define SIERRA_PHY_PMA_COMMON_OFFSET 0xe000
  183. #define SIERRA_PHY_PMA_CMN_CTRL 0x0
  184. /* PHY PCS lane registers */
  185. #define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, offset) \
  186. (0xD000 + ((ln) * (0x800 >> (3 - (offset)))))
  187. #define SIERRA_PHY_ISO_LINK_CTRL 0xB
  188. /* PHY PMA lane registers */
  189. #define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, offset) \
  190. (0xF000 + ((ln) * (0x800 >> (3 - (offset)))))
  191. #define SIERRA_PHY_PMA_XCVR_CTRL 0x000
  192. #define SIERRA_MACRO_ID 0x00007364
  193. #define SIERRA_MAX_LANES 16
  194. #define PLL_LOCK_TIME 100
  195. #define CDNS_SIERRA_INPUT_CLOCKS 5
  196. enum cdns_sierra_clock_input {
  197. PHY_CLK,
  198. CMN_REFCLK_DIG_DIV,
  199. CMN_REFCLK1_DIG_DIV,
  200. PLL0_REFCLK,
  201. PLL1_REFCLK,
  202. };
  203. #define SIERRA_NUM_CMN_PLLC 2
  204. #define SIERRA_NUM_CMN_PLLC_PARENTS 2
  205. static const struct reg_field macro_id_type =
  206. REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
  207. static const struct reg_field phy_pll_cfg_1 =
  208. REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
  209. static const struct reg_field pma_cmn_ready =
  210. REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
  211. static const struct reg_field pllctrl_lock =
  212. REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
  213. static const struct reg_field phy_iso_link_ctrl_1 =
  214. REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
  215. static const char * const clk_names[] = {
  216. [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
  217. [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
  218. };
  219. enum cdns_sierra_cmn_plllc {
  220. CMN_PLLLC,
  221. CMN_PLLLC1,
  222. };
  223. struct cdns_sierra_pll_mux_reg_fields {
  224. struct reg_field pfdclk_sel_preg;
  225. struct reg_field plllc1en_field;
  226. struct reg_field termen_field;
  227. };
  228. static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
  229. [CMN_PLLLC] = {
  230. .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
  231. .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
  232. .termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
  233. },
  234. [CMN_PLLLC1] = {
  235. .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
  236. .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
  237. .termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
  238. },
  239. };
  240. struct cdns_sierra_pll_mux {
  241. struct cdns_sierra_phy *sp;
  242. struct clk *clk;
  243. struct clk *parent_clks[2];
  244. struct regmap_field *pfdclk_sel_preg;
  245. struct regmap_field *plllc1en_field;
  246. struct regmap_field *termen_field;
  247. };
  248. #define reset_control_assert(rst) cdns_reset_assert(rst)
  249. #define reset_control_deassert(rst) cdns_reset_deassert(rst)
  250. #define reset_control reset_ctl
  251. enum cdns_sierra_phy_type {
  252. TYPE_NONE,
  253. TYPE_PCIE,
  254. TYPE_USB,
  255. TYPE_QSGMII
  256. };
  257. enum cdns_sierra_ssc_mode {
  258. NO_SSC,
  259. EXTERNAL_SSC,
  260. INTERNAL_SSC
  261. };
  262. struct cdns_sierra_inst {
  263. enum cdns_sierra_phy_type phy_type;
  264. u32 num_lanes;
  265. u32 mlane;
  266. struct reset_ctl_bulk *lnk_rst;
  267. enum cdns_sierra_ssc_mode ssc_mode;
  268. };
  269. struct cdns_reg_pairs {
  270. u16 val;
  271. u32 off;
  272. };
  273. struct cdns_sierra_vals {
  274. const struct cdns_reg_pairs *reg_pairs;
  275. u32 num_regs;
  276. };
  277. struct cdns_sierra_data {
  278. u32 id_value;
  279. u8 block_offset_shift;
  280. u8 reg_offset_shift;
  281. struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
  282. [NUM_SSC_MODE];
  283. struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
  284. [NUM_SSC_MODE];
  285. struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
  286. [NUM_SSC_MODE];
  287. struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
  288. [NUM_SSC_MODE];
  289. };
  290. struct cdns_sierra_phy {
  291. struct udevice *dev;
  292. void *base;
  293. size_t size;
  294. struct regmap *regmap;
  295. struct cdns_sierra_data *init_data;
  296. struct cdns_sierra_inst *phys[SIERRA_MAX_LANES];
  297. struct reset_control *phy_rst;
  298. struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
  299. struct regmap *regmap_phy_pcs_common_cdb;
  300. struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
  301. struct regmap *regmap_phy_pma_common_cdb;
  302. struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
  303. struct regmap *regmap_common_cdb;
  304. struct regmap_field *macro_id_type;
  305. struct regmap_field *phy_pll_cfg_1;
  306. struct regmap_field *pma_cmn_ready;
  307. struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
  308. struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
  309. struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
  310. struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
  311. struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
  312. struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
  313. int nsubnodes;
  314. u32 num_lanes;
  315. bool autoconf;
  316. unsigned int already_configured;
  317. };
  318. static inline int cdns_reset_assert(struct reset_control *rst)
  319. {
  320. if (rst)
  321. return reset_assert(rst);
  322. else
  323. return 0;
  324. }
  325. static inline int cdns_reset_deassert(struct reset_control *rst)
  326. {
  327. if (rst)
  328. return reset_deassert(rst);
  329. else
  330. return 0;
  331. }
  332. static int cdns_sierra_link_init(struct phy *gphy)
  333. {
  334. struct cdns_sierra_inst *ins = dev_get_priv(gphy->dev);
  335. struct cdns_sierra_phy *phy = dev_get_priv(gphy->dev->parent);
  336. struct cdns_sierra_data *init_data = phy->init_data;
  337. struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
  338. enum cdns_sierra_phy_type phy_type = ins->phy_type;
  339. enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
  340. struct cdns_sierra_vals *phy_pma_ln_vals;
  341. const struct cdns_reg_pairs *reg_pairs;
  342. struct cdns_sierra_vals *pcs_cmn_vals;
  343. struct regmap *regmap = phy->regmap;
  344. u32 num_regs;
  345. int i, j;
  346. /* Initialise the PHY registers, unless auto configured */
  347. if (phy->autoconf || phy->already_configured || phy->nsubnodes > 1)
  348. return 0;
  349. clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
  350. clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
  351. /* PHY PCS common registers configurations */
  352. pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
  353. if (pcs_cmn_vals) {
  354. reg_pairs = pcs_cmn_vals->reg_pairs;
  355. num_regs = pcs_cmn_vals->num_regs;
  356. regmap = phy->regmap_phy_pcs_common_cdb;
  357. for (i = 0; i < num_regs; i++)
  358. regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
  359. }
  360. /* PHY PMA lane registers configurations */
  361. phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
  362. if (phy_pma_ln_vals) {
  363. reg_pairs = phy_pma_ln_vals->reg_pairs;
  364. num_regs = phy_pma_ln_vals->num_regs;
  365. for (i = 0; i < ins->num_lanes; i++) {
  366. regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
  367. for (j = 0; j < num_regs; j++)
  368. regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
  369. }
  370. }
  371. /* PMA common registers configurations */
  372. pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
  373. if (pma_cmn_vals) {
  374. reg_pairs = pma_cmn_vals->reg_pairs;
  375. num_regs = pma_cmn_vals->num_regs;
  376. regmap = phy->regmap_common_cdb;
  377. for (i = 0; i < num_regs; i++)
  378. regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
  379. }
  380. /* PMA TX lane registers configurations */
  381. pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
  382. if (pma_ln_vals) {
  383. reg_pairs = pma_ln_vals->reg_pairs;
  384. num_regs = pma_ln_vals->num_regs;
  385. for (i = 0; i < ins->num_lanes; i++) {
  386. regmap = phy->regmap_lane_cdb[i + ins->mlane];
  387. for (j = 0; j < num_regs; j++)
  388. regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
  389. }
  390. }
  391. return 0;
  392. }
  393. static int cdns_sierra_link_on(struct phy *gphy)
  394. {
  395. struct cdns_sierra_inst *ins = dev_get_priv(gphy->dev);
  396. struct cdns_sierra_phy *sp = dev_get_priv(gphy->dev->parent);
  397. struct udevice *dev = gphy->dev;
  398. u32 val;
  399. int ret;
  400. if (sp->already_configured) {
  401. usleep_range(5000, 10000);
  402. return 0;
  403. }
  404. if (sp->nsubnodes == 1) {
  405. /* Take the PHY out of reset */
  406. ret = reset_control_deassert(sp->phy_rst);
  407. if (ret) {
  408. dev_err(dev, "Failed to take the PHY out of reset\n");
  409. return ret;
  410. }
  411. }
  412. /* Take the PHY lane group out of reset */
  413. ret = reset_deassert_bulk(ins->lnk_rst);
  414. if (ret) {
  415. dev_err(dev, "Failed to take the PHY lane out of reset\n");
  416. return ret;
  417. }
  418. if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
  419. ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
  420. val, !val, 1000, PLL_LOCK_TIME);
  421. if (ret) {
  422. dev_err(dev, "Timeout waiting for PHY status ready\n");
  423. return ret;
  424. }
  425. }
  426. /*
  427. * Wait for cmn_ready assertion
  428. * PHY_PMA_CMN_CTRL[0] == 1
  429. */
  430. ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
  431. 1000, PLL_LOCK_TIME);
  432. if (ret) {
  433. dev_err(dev, "Timeout waiting for CMN ready\n");
  434. return ret;
  435. }
  436. ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
  437. val, val, 1000, PLL_LOCK_TIME);
  438. if (ret < 0)
  439. dev_err(dev, "PLL lock of lane failed\n");
  440. reset_control_assert(sp->phy_rst);
  441. reset_control_deassert(sp->phy_rst);
  442. return ret;
  443. }
  444. static int cdns_sierra_link_off(struct phy *gphy)
  445. {
  446. struct cdns_sierra_inst *ins = dev_get_priv(gphy->dev);
  447. return reset_assert_bulk(ins->lnk_rst);
  448. }
  449. static int cdns_sierra_link_reset(struct phy *gphy)
  450. {
  451. struct cdns_sierra_phy *sp = dev_get_priv(gphy->dev->parent);
  452. reset_control_assert(sp->phy_rst);
  453. reset_control_deassert(sp->phy_rst);
  454. return 0;
  455. };
  456. static const struct phy_ops ops = {
  457. .init = cdns_sierra_link_init,
  458. .power_on = cdns_sierra_link_on,
  459. .power_off = cdns_sierra_link_off,
  460. .reset = cdns_sierra_link_reset,
  461. };
  462. struct cdns_sierra_pll_mux_sel {
  463. enum cdns_sierra_cmn_plllc mux_sel;
  464. u32 table[2];
  465. const char *node_name;
  466. u32 num_parents;
  467. u32 parents[2];
  468. };
  469. static struct cdns_sierra_pll_mux_sel pll_clk_mux_sel[] = {
  470. {
  471. .num_parents = 2,
  472. .parents = { PLL0_REFCLK, PLL1_REFCLK },
  473. .mux_sel = CMN_PLLLC,
  474. .table = { 0, 1 },
  475. .node_name = "pll_cmnlc",
  476. },
  477. {
  478. .num_parents = 2,
  479. .parents = { PLL1_REFCLK, PLL0_REFCLK },
  480. .mux_sel = CMN_PLLLC1,
  481. .table = { 1, 0 },
  482. .node_name = "pll_cmnlc1",
  483. },
  484. };
  485. static int cdns_sierra_pll_mux_set_parent(struct clk *clk, struct clk *parent)
  486. {
  487. struct udevice *dev = clk->dev;
  488. struct cdns_sierra_pll_mux *priv = dev_get_priv(dev);
  489. struct cdns_sierra_pll_mux_sel *data = dev_get_plat(dev);
  490. struct cdns_sierra_phy *sp = priv->sp;
  491. int ret;
  492. int i;
  493. for (i = 0; i < ARRAY_SIZE(priv->parent_clks); i++) {
  494. if (parent->dev == priv->parent_clks[i]->dev)
  495. break;
  496. }
  497. if (i == ARRAY_SIZE(priv->parent_clks))
  498. return -EINVAL;
  499. ret = regmap_field_write(sp->cmn_refrcv_refclk_plllc1en_preg[data[clk->id].mux_sel], i);
  500. ret |= regmap_field_write(sp->cmn_refrcv_refclk_termen_preg[data[clk->id].mux_sel], i);
  501. ret |= regmap_field_write(sp->cmn_plllc_pfdclk1_sel_preg[data[clk->id].mux_sel],
  502. data[clk->id].table[i]);
  503. return ret;
  504. }
  505. static const struct clk_ops cdns_sierra_pll_mux_ops = {
  506. .set_parent = cdns_sierra_pll_mux_set_parent,
  507. };
  508. static int cdns_sierra_pll_mux_probe(struct udevice *dev)
  509. {
  510. struct cdns_sierra_pll_mux *priv = dev_get_priv(dev);
  511. struct cdns_sierra_phy *sp = dev_get_priv(dev->parent);
  512. struct cdns_sierra_pll_mux_sel *data = dev_get_plat(dev);
  513. struct clk *clk;
  514. int i, j;
  515. for (j = 0; j < SIERRA_NUM_CMN_PLLC; j++) {
  516. for (i = 0; i < ARRAY_SIZE(priv->parent_clks); i++) {
  517. clk = sp->input_clks[data[j].parents[i]];
  518. if (IS_ERR_OR_NULL(clk)) {
  519. dev_err(dev, "No parent clock for PLL mux clocks\n");
  520. return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT;
  521. }
  522. priv->parent_clks[i] = clk;
  523. }
  524. }
  525. priv->sp = dev_get_priv(dev->parent);
  526. return 0;
  527. }
  528. U_BOOT_DRIVER(cdns_sierra_pll_mux_clk) = {
  529. .name = "cdns_sierra_mux_clk",
  530. .id = UCLASS_CLK,
  531. .priv_auto = sizeof(struct cdns_sierra_pll_mux),
  532. .ops = &cdns_sierra_pll_mux_ops,
  533. .probe = cdns_sierra_pll_mux_probe,
  534. .plat_auto = sizeof(struct cdns_sierra_pll_mux_sel) * SIERRA_NUM_CMN_PLLC,
  535. };
  536. static int cdns_sierra_pll_bind_of_clocks(struct cdns_sierra_phy *sp)
  537. {
  538. struct udevice *dev = sp->dev;
  539. struct driver *cdns_sierra_clk_drv;
  540. struct cdns_sierra_pll_mux_sel *data = pll_clk_mux_sel;
  541. int i, rc;
  542. cdns_sierra_clk_drv = lists_driver_lookup_name("cdns_sierra_mux_clk");
  543. if (!cdns_sierra_clk_drv) {
  544. dev_err(dev, "Can not find driver 'cdns_sierra_mux_clk'\n");
  545. return -ENOENT;
  546. }
  547. rc = device_bind(dev, cdns_sierra_clk_drv, "pll_mux_clk",
  548. data, dev_ofnode(dev), NULL);
  549. if (rc) {
  550. dev_err(dev, "cannot bind driver for clock %s\n",
  551. clk_names[i]);
  552. }
  553. return 0;
  554. }
  555. static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
  556. ofnode child)
  557. {
  558. u32 phy_type;
  559. if (ofnode_read_u32(child, "reg", &inst->mlane))
  560. return -EINVAL;
  561. if (ofnode_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
  562. return -EINVAL;
  563. if (ofnode_read_u32(child, "cdns,phy-type", &phy_type))
  564. return -EINVAL;
  565. switch (phy_type) {
  566. case PHY_TYPE_PCIE:
  567. inst->phy_type = TYPE_PCIE;
  568. break;
  569. case PHY_TYPE_USB3:
  570. inst->phy_type = TYPE_USB;
  571. break;
  572. case PHY_TYPE_QSGMII:
  573. inst->phy_type = TYPE_QSGMII;
  574. break;
  575. default:
  576. return -EINVAL;
  577. }
  578. inst->ssc_mode = EXTERNAL_SSC;
  579. ofnode_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
  580. return 0;
  581. }
  582. static struct regmap *cdns_regmap_init(struct udevice *dev, void __iomem *base,
  583. u32 block_offset, u8 block_offset_shift,
  584. u8 reg_offset_shift)
  585. {
  586. struct cdns_sierra_phy *sp = dev_get_priv(dev);
  587. struct regmap_config config;
  588. config.r_start = (ulong)(base + (block_offset << block_offset_shift));
  589. config.r_size = sp->size - (block_offset << block_offset_shift);
  590. config.reg_offset_shift = reg_offset_shift;
  591. config.width = REGMAP_SIZE_16;
  592. return devm_regmap_init(dev, NULL, NULL, &config);
  593. }
  594. static int cdns_regfield_init(struct cdns_sierra_phy *sp)
  595. {
  596. struct udevice *dev = sp->dev;
  597. struct regmap_field *field;
  598. struct reg_field reg_field;
  599. struct regmap *regmap;
  600. int i;
  601. regmap = sp->regmap_common_cdb;
  602. field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
  603. if (IS_ERR(field)) {
  604. dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
  605. return PTR_ERR(field);
  606. }
  607. sp->macro_id_type = field;
  608. for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
  609. reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
  610. field = devm_regmap_field_alloc(dev, regmap, reg_field);
  611. if (IS_ERR(field)) {
  612. dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
  613. return PTR_ERR(field);
  614. }
  615. sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
  616. reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
  617. field = devm_regmap_field_alloc(dev, regmap, reg_field);
  618. if (IS_ERR(field)) {
  619. dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
  620. return PTR_ERR(field);
  621. }
  622. sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
  623. reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
  624. field = devm_regmap_field_alloc(dev, regmap, reg_field);
  625. if (IS_ERR(field)) {
  626. dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
  627. return PTR_ERR(field);
  628. }
  629. sp->cmn_refrcv_refclk_termen_preg[i] = field;
  630. }
  631. regmap = sp->regmap_phy_pcs_common_cdb;
  632. field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
  633. if (IS_ERR(field)) {
  634. dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
  635. return PTR_ERR(field);
  636. }
  637. sp->phy_pll_cfg_1 = field;
  638. regmap = sp->regmap_phy_pma_common_cdb;
  639. field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
  640. if (IS_ERR(field)) {
  641. dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
  642. return PTR_ERR(field);
  643. }
  644. sp->pma_cmn_ready = field;
  645. for (i = 0; i < SIERRA_MAX_LANES; i++) {
  646. regmap = sp->regmap_lane_cdb[i];
  647. field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
  648. if (IS_ERR(field)) {
  649. dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
  650. return PTR_ERR(field);
  651. }
  652. sp->pllctrl_lock[i] = field;
  653. }
  654. for (i = 0; i < SIERRA_MAX_LANES; i++) {
  655. regmap = sp->regmap_phy_pcs_lane_cdb[i];
  656. field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
  657. if (IS_ERR(field)) {
  658. dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
  659. return PTR_ERR(field);
  660. }
  661. sp->phy_iso_link_ctrl_1[i] = field;
  662. }
  663. return 0;
  664. }
  665. static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
  666. void __iomem *base, u8 block_offset_shift,
  667. u8 reg_offset_shift)
  668. {
  669. struct udevice *dev = sp->dev;
  670. struct regmap *regmap;
  671. u32 block_offset;
  672. int i;
  673. for (i = 0; i < SIERRA_MAX_LANES; i++) {
  674. block_offset = SIERRA_LANE_CDB_OFFSET(i, reg_offset_shift);
  675. regmap = cdns_regmap_init(dev, base, block_offset,
  676. block_offset_shift, reg_offset_shift);
  677. if (IS_ERR(regmap)) {
  678. dev_err(dev, "Failed to init lane CDB regmap\n");
  679. return PTR_ERR(regmap);
  680. }
  681. sp->regmap_lane_cdb[i] = regmap;
  682. }
  683. regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
  684. block_offset_shift, reg_offset_shift);
  685. if (IS_ERR(regmap)) {
  686. dev_err(dev, "Failed to init common CDB regmap\n");
  687. return PTR_ERR(regmap);
  688. }
  689. sp->regmap_common_cdb = regmap;
  690. regmap = cdns_regmap_init(dev, base, SIERRA_PHY_PCS_COMMON_OFFSET,
  691. block_offset_shift, reg_offset_shift);
  692. if (IS_ERR(regmap)) {
  693. dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
  694. return PTR_ERR(regmap);
  695. }
  696. sp->regmap_phy_pcs_common_cdb = regmap;
  697. for (i = 0; i < SIERRA_MAX_LANES; i++) {
  698. block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, reg_offset_shift);
  699. regmap = cdns_regmap_init(dev, base, block_offset,
  700. block_offset_shift, reg_offset_shift);
  701. if (IS_ERR(regmap)) {
  702. dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
  703. return PTR_ERR(regmap);
  704. }
  705. sp->regmap_phy_pcs_lane_cdb[i] = regmap;
  706. }
  707. regmap = cdns_regmap_init(dev, base, SIERRA_PHY_PMA_COMMON_OFFSET,
  708. block_offset_shift, reg_offset_shift);
  709. if (IS_ERR(regmap)) {
  710. dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
  711. return PTR_ERR(regmap);
  712. }
  713. sp->regmap_phy_pma_common_cdb = regmap;
  714. for (i = 0; i < SIERRA_MAX_LANES; i++) {
  715. block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, reg_offset_shift);
  716. regmap = cdns_regmap_init(dev, base, block_offset,
  717. block_offset_shift, reg_offset_shift);
  718. if (IS_ERR(regmap)) {
  719. dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
  720. return PTR_ERR(regmap);
  721. }
  722. sp->regmap_phy_pma_lane_cdb[i] = regmap;
  723. }
  724. return 0;
  725. }
  726. static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
  727. {
  728. const struct cdns_sierra_data *init_data = sp->init_data;
  729. enum cdns_sierra_phy_type phy_t1, phy_t2, tmp_phy_type;
  730. struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
  731. struct cdns_sierra_vals *phy_pma_ln_vals;
  732. const struct cdns_reg_pairs *reg_pairs;
  733. struct cdns_sierra_vals *pcs_cmn_vals;
  734. int i, j, node, mlane, num_lanes, ret;
  735. enum cdns_sierra_ssc_mode ssc;
  736. struct regmap *regmap;
  737. u32 num_regs;
  738. /* Maximum 2 links (subnodes) are supported */
  739. if (sp->nsubnodes != 2)
  740. return -EINVAL;
  741. clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
  742. clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
  743. /* PHY configured to use both PLL LC and LC1 */
  744. regmap_field_write(sp->phy_pll_cfg_1, 0x1);
  745. phy_t1 = sp->phys[0]->phy_type;
  746. phy_t2 = sp->phys[1]->phy_type;
  747. /*
  748. * First configure the PHY for first link with phy_t1. Get the array
  749. * values as [phy_t1][phy_t2][ssc].
  750. */
  751. for (node = 0; node < sp->nsubnodes; node++) {
  752. if (node == 1) {
  753. /*
  754. * If first link with phy_t1 is configured, then
  755. * configure the PHY for second link with phy_t2.
  756. * Get the array values as [phy_t2][phy_t1][ssc].
  757. */
  758. tmp_phy_type = phy_t1;
  759. phy_t1 = phy_t2;
  760. phy_t2 = tmp_phy_type;
  761. }
  762. mlane = sp->phys[node]->mlane;
  763. ssc = sp->phys[node]->ssc_mode;
  764. num_lanes = sp->phys[node]->num_lanes;
  765. /* PHY PCS common registers configurations */
  766. pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
  767. if (pcs_cmn_vals) {
  768. reg_pairs = pcs_cmn_vals->reg_pairs;
  769. num_regs = pcs_cmn_vals->num_regs;
  770. regmap = sp->regmap_phy_pcs_common_cdb;
  771. for (i = 0; i < num_regs; i++)
  772. regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
  773. }
  774. /* PHY PMA lane registers configurations */
  775. phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
  776. if (phy_pma_ln_vals) {
  777. reg_pairs = phy_pma_ln_vals->reg_pairs;
  778. num_regs = phy_pma_ln_vals->num_regs;
  779. for (i = 0; i < num_lanes; i++) {
  780. regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
  781. for (j = 0; j < num_regs; j++)
  782. regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
  783. }
  784. }
  785. /* PMA common registers configurations */
  786. pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
  787. if (pma_cmn_vals) {
  788. reg_pairs = pma_cmn_vals->reg_pairs;
  789. num_regs = pma_cmn_vals->num_regs;
  790. regmap = sp->regmap_common_cdb;
  791. for (i = 0; i < num_regs; i++)
  792. regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
  793. }
  794. /* PMA TX lane registers configurations */
  795. pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
  796. if (pma_ln_vals) {
  797. reg_pairs = pma_ln_vals->reg_pairs;
  798. num_regs = pma_ln_vals->num_regs;
  799. for (i = 0; i < num_lanes; i++) {
  800. regmap = sp->regmap_lane_cdb[i + mlane];
  801. for (j = 0; j < num_regs; j++)
  802. regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
  803. }
  804. }
  805. if (phy_t1 == TYPE_QSGMII)
  806. reset_deassert_bulk(sp->phys[node]->lnk_rst);
  807. }
  808. /* Take the PHY out of reset */
  809. ret = reset_control_deassert(sp->phy_rst);
  810. if (ret)
  811. return ret;
  812. return 0;
  813. }
  814. static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
  815. struct udevice *dev)
  816. {
  817. struct clk *clk;
  818. int ret;
  819. clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
  820. if (IS_ERR(clk)) {
  821. dev_err(dev, "cmn_refclk_dig_div clock not found\n");
  822. ret = PTR_ERR(clk);
  823. return ret;
  824. }
  825. sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
  826. clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
  827. if (IS_ERR(clk)) {
  828. dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
  829. ret = PTR_ERR(clk);
  830. return ret;
  831. }
  832. sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
  833. clk = devm_clk_get_optional(dev, "pll0_refclk");
  834. if (IS_ERR(clk)) {
  835. dev_err(dev, "pll0_refclk clock not found\n");
  836. ret = PTR_ERR(clk);
  837. return ret;
  838. }
  839. sp->input_clks[PLL0_REFCLK] = clk;
  840. clk = devm_clk_get_optional(dev, "pll1_refclk");
  841. if (IS_ERR(clk)) {
  842. dev_err(dev, "pll1_refclk clock not found\n");
  843. ret = PTR_ERR(clk);
  844. return ret;
  845. }
  846. sp->input_clks[PLL1_REFCLK] = clk;
  847. return 0;
  848. }
  849. static int cdns_sierra_phy_clk(struct cdns_sierra_phy *sp)
  850. {
  851. struct udevice *dev = sp->dev;
  852. struct clk *clk;
  853. int ret;
  854. clk = devm_clk_get_optional(dev, "phy_clk");
  855. if (IS_ERR(clk)) {
  856. dev_err(dev, "failed to get clock phy_clk\n");
  857. return PTR_ERR(clk);
  858. }
  859. sp->input_clks[PHY_CLK] = clk;
  860. ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
  861. if (ret)
  862. return ret;
  863. return 0;
  864. }
  865. static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
  866. struct udevice *dev)
  867. {
  868. struct reset_control *rst;
  869. rst = devm_reset_control_get(dev, "sierra_reset");
  870. if (IS_ERR(rst)) {
  871. dev_err(dev, "failed to get reset\n");
  872. return PTR_ERR(rst);
  873. }
  874. sp->phy_rst = rst;
  875. return 0;
  876. }
  877. static int cdns_sierra_phy_bind(struct udevice *dev)
  878. {
  879. struct driver *link_drv;
  880. ofnode child;
  881. int rc;
  882. link_drv = lists_driver_lookup_name("sierra_phy_link");
  883. if (!link_drv) {
  884. dev_err(dev, "Cannot find driver 'sierra_phy_link'\n");
  885. return -ENOENT;
  886. }
  887. ofnode_for_each_subnode(child, dev_ofnode(dev)) {
  888. if (!(ofnode_name_eq(child, "phy") ||
  889. ofnode_name_eq(child, "link")))
  890. continue;
  891. rc = device_bind(dev, link_drv, "link", NULL, child, NULL);
  892. if (rc) {
  893. dev_err(dev, "cannot bind driver for link\n");
  894. return rc;
  895. }
  896. }
  897. return 0;
  898. }
  899. static int cdns_sierra_link_probe(struct udevice *dev)
  900. {
  901. struct cdns_sierra_inst *inst = dev_get_priv(dev);
  902. struct cdns_sierra_phy *sp = dev_get_priv(dev->parent);
  903. struct reset_ctl_bulk *rst;
  904. int ret, node;
  905. rst = devm_reset_bulk_get_by_node(dev, dev_ofnode(dev));
  906. if (IS_ERR(rst)) {
  907. ret = PTR_ERR(rst);
  908. dev_err(dev, "failed to get reset\n");
  909. return ret;
  910. }
  911. inst->lnk_rst = rst;
  912. ret = cdns_sierra_get_optional(inst, dev_ofnode(dev));
  913. if (ret) {
  914. dev_err(dev, "missing property in node\n");
  915. return ret;
  916. }
  917. node = sp->nsubnodes;
  918. sp->phys[node] = inst;
  919. sp->nsubnodes += 1;
  920. sp->num_lanes += inst->num_lanes;
  921. /* If more than one subnode, configure the PHY as multilink */
  922. if (!sp->autoconf && !sp->already_configured && sp->nsubnodes > 1) {
  923. ret = cdns_sierra_phy_configure_multilink(sp);
  924. if (ret)
  925. return ret;
  926. }
  927. return 0;
  928. }
  929. U_BOOT_DRIVER(sierra_phy_link) = {
  930. .name = "sierra_phy_link",
  931. .id = UCLASS_PHY,
  932. .probe = cdns_sierra_link_probe,
  933. .ops = &ops,
  934. .priv_auto = sizeof(struct cdns_sierra_inst),
  935. };
  936. static int cdns_sierra_phy_probe(struct udevice *dev)
  937. {
  938. struct cdns_sierra_phy *sp = dev_get_priv(dev);
  939. struct cdns_sierra_data *data;
  940. unsigned int id_value;
  941. int ret;
  942. sp->dev = dev;
  943. sp->base = devfdt_remap_addr_index(dev, 0);
  944. if (!sp->base) {
  945. dev_err(dev, "unable to map regs\n");
  946. return -ENOMEM;
  947. }
  948. devfdt_get_addr_size_index(dev, 0, (fdt_size_t *)&sp->size);
  949. /* Get init data for this PHY */
  950. data = (struct cdns_sierra_data *)dev_get_driver_data(dev);
  951. sp->init_data = data;
  952. ret = cdns_regmap_init_blocks(sp, sp->base, data->block_offset_shift,
  953. data->reg_offset_shift);
  954. if (ret)
  955. return ret;
  956. ret = cdns_regfield_init(sp);
  957. if (ret)
  958. return ret;
  959. ret = cdns_sierra_phy_get_clocks(sp, dev);
  960. if (ret)
  961. return ret;
  962. ret = cdns_sierra_pll_bind_of_clocks(sp);
  963. if (ret)
  964. return ret;
  965. regmap_field_read(sp->pma_cmn_ready, &sp->already_configured);
  966. if (!sp->already_configured) {
  967. ret = cdns_sierra_phy_clk(sp);
  968. if (ret)
  969. return ret;
  970. ret = cdns_sierra_phy_get_resets(sp, dev);
  971. if (ret)
  972. return ret;
  973. }
  974. /* Check that PHY is present */
  975. regmap_field_read(sp->macro_id_type, &id_value);
  976. if (sp->init_data->id_value != id_value) {
  977. dev_err(dev, "PHY not found 0x%x vs 0x%x\n",
  978. sp->init_data->id_value, id_value);
  979. ret = -EINVAL;
  980. goto clk_disable;
  981. }
  982. sp->autoconf = dev_read_bool(dev, "cdns,autoconf");
  983. dev_info(dev, "sierra probed\n");
  984. return 0;
  985. clk_disable:
  986. if (!sp->already_configured)
  987. clk_disable_unprepare(sp->input_clks[PHY_CLK]);
  988. return ret;
  989. }
  990. static int cdns_sierra_phy_remove(struct udevice *dev)
  991. {
  992. struct cdns_sierra_phy *phy = dev_get_priv(dev);
  993. int i;
  994. reset_control_assert(phy->phy_rst);
  995. /*
  996. * The device level resets will be put automatically.
  997. * Need to put the subnode resets here though.
  998. */
  999. for (i = 0; i < phy->nsubnodes; i++)
  1000. reset_assert_bulk(phy->phys[i]->lnk_rst);
  1001. clk_disable_unprepare(phy->input_clks[PHY_CLK]);
  1002. return 0;
  1003. }
  1004. /* QSGMII PHY PMA lane configuration */
  1005. static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
  1006. {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
  1007. };
  1008. static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
  1009. .reg_pairs = qsgmii_phy_pma_ln_regs,
  1010. .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
  1011. };
  1012. /* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
  1013. static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
  1014. {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
  1015. {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
  1016. {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
  1017. };
  1018. static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
  1019. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1020. {0x0252, SIERRA_DET_STANDEC_E_PREG},
  1021. {0x0004, SIERRA_PSC_LN_IDLE_PREG},
  1022. {0x0FFE, SIERRA_PSC_RX_A0_PREG},
  1023. {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
  1024. {0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
  1025. {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
  1026. {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
  1027. {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1028. {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
  1029. {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
  1030. {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
  1031. {0x8422, SIERRA_CTLELUT_CTRL_PREG},
  1032. {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
  1033. {0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
  1034. {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
  1035. {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
  1036. {0x0186, SIERRA_DEQ_GLUT0},
  1037. {0x0186, SIERRA_DEQ_GLUT1},
  1038. {0x0186, SIERRA_DEQ_GLUT2},
  1039. {0x0186, SIERRA_DEQ_GLUT3},
  1040. {0x0186, SIERRA_DEQ_GLUT4},
  1041. {0x0861, SIERRA_DEQ_ALUT0},
  1042. {0x07E0, SIERRA_DEQ_ALUT1},
  1043. {0x079E, SIERRA_DEQ_ALUT2},
  1044. {0x071D, SIERRA_DEQ_ALUT3},
  1045. {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
  1046. {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
  1047. {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1048. {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
  1049. {0x0033, SIERRA_DEQ_PICTRL_PREG},
  1050. {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
  1051. {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
  1052. {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
  1053. {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1054. {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
  1055. };
  1056. static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
  1057. .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
  1058. .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
  1059. };
  1060. static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
  1061. .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
  1062. .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
  1063. };
  1064. /* PCIE PHY PCS common configuration */
  1065. static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
  1066. {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
  1067. };
  1068. static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
  1069. .reg_pairs = pcie_phy_pcs_cmn_regs,
  1070. .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
  1071. };
  1072. /* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
  1073. static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
  1074. {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1075. {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1076. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
  1077. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
  1078. };
  1079. /*
  1080. * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
  1081. * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
  1082. */
  1083. static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
  1084. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1085. {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
  1086. {0x0004, SIERRA_PSC_LN_A3_PREG},
  1087. {0x0004, SIERRA_PSC_LN_A4_PREG},
  1088. {0x0004, SIERRA_PSC_LN_IDLE_PREG},
  1089. {0x1555, SIERRA_DFE_BIASTRIM_PREG},
  1090. {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
  1091. {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
  1092. {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
  1093. {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1094. {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1095. {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
  1096. {0x9800, SIERRA_RX_CTLE_CAL_PREG},
  1097. {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1098. {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1099. {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1100. {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1101. {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1102. {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
  1103. {0x0041, SIERRA_DEQ_GLUT0},
  1104. {0x0082, SIERRA_DEQ_GLUT1},
  1105. {0x00C3, SIERRA_DEQ_GLUT2},
  1106. {0x0145, SIERRA_DEQ_GLUT3},
  1107. {0x0186, SIERRA_DEQ_GLUT4},
  1108. {0x09E7, SIERRA_DEQ_ALUT0},
  1109. {0x09A6, SIERRA_DEQ_ALUT1},
  1110. {0x0965, SIERRA_DEQ_ALUT2},
  1111. {0x08E3, SIERRA_DEQ_ALUT3},
  1112. {0x00FA, SIERRA_DEQ_DFETAP0},
  1113. {0x00FA, SIERRA_DEQ_DFETAP1},
  1114. {0x00FA, SIERRA_DEQ_DFETAP2},
  1115. {0x00FA, SIERRA_DEQ_DFETAP3},
  1116. {0x00FA, SIERRA_DEQ_DFETAP4},
  1117. {0x000F, SIERRA_DEQ_PRECUR_PREG},
  1118. {0x0280, SIERRA_DEQ_POSTCUR_PREG},
  1119. {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
  1120. {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1121. {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
  1122. {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
  1123. {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
  1124. {0x002B, SIERRA_CPI_TRIM_PREG},
  1125. {0x0003, SIERRA_EPI_CTRL_PREG},
  1126. {0x803F, SIERRA_SDFILT_H2L_A_PREG},
  1127. {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1128. {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1129. {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
  1130. };
  1131. static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
  1132. .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
  1133. .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
  1134. };
  1135. static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
  1136. .reg_pairs = ml_pcie_100_no_ssc_ln_regs,
  1137. .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
  1138. };
  1139. /* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
  1140. static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
  1141. {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
  1142. {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1143. {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1144. {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
  1145. {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
  1146. {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
  1147. {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
  1148. {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
  1149. {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
  1150. {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
  1151. {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
  1152. };
  1153. /*
  1154. * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
  1155. * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
  1156. */
  1157. static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
  1158. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1159. {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
  1160. {0x0004, SIERRA_PSC_LN_A3_PREG},
  1161. {0x0004, SIERRA_PSC_LN_A4_PREG},
  1162. {0x0004, SIERRA_PSC_LN_IDLE_PREG},
  1163. {0x1555, SIERRA_DFE_BIASTRIM_PREG},
  1164. {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
  1165. {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
  1166. {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
  1167. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
  1168. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1169. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1170. {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
  1171. {0x9800, SIERRA_RX_CTLE_CAL_PREG},
  1172. {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
  1173. {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
  1174. {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1175. {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1176. {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1177. {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1178. {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1179. {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
  1180. {0x0041, SIERRA_DEQ_GLUT0},
  1181. {0x0082, SIERRA_DEQ_GLUT1},
  1182. {0x00C3, SIERRA_DEQ_GLUT2},
  1183. {0x0145, SIERRA_DEQ_GLUT3},
  1184. {0x0186, SIERRA_DEQ_GLUT4},
  1185. {0x09E7, SIERRA_DEQ_ALUT0},
  1186. {0x09A6, SIERRA_DEQ_ALUT1},
  1187. {0x0965, SIERRA_DEQ_ALUT2},
  1188. {0x08E3, SIERRA_DEQ_ALUT3},
  1189. {0x00FA, SIERRA_DEQ_DFETAP0},
  1190. {0x00FA, SIERRA_DEQ_DFETAP1},
  1191. {0x00FA, SIERRA_DEQ_DFETAP2},
  1192. {0x00FA, SIERRA_DEQ_DFETAP3},
  1193. {0x00FA, SIERRA_DEQ_DFETAP4},
  1194. {0x000F, SIERRA_DEQ_PRECUR_PREG},
  1195. {0x0280, SIERRA_DEQ_POSTCUR_PREG},
  1196. {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
  1197. {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1198. {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
  1199. {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
  1200. {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
  1201. {0x002B, SIERRA_CPI_TRIM_PREG},
  1202. {0x0003, SIERRA_EPI_CTRL_PREG},
  1203. {0x803F, SIERRA_SDFILT_H2L_A_PREG},
  1204. {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1205. {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1206. {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
  1207. };
  1208. static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
  1209. .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
  1210. .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
  1211. };
  1212. static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
  1213. .reg_pairs = ml_pcie_100_int_ssc_ln_regs,
  1214. .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
  1215. };
  1216. /* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
  1217. static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
  1218. {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1219. {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1220. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
  1221. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
  1222. {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
  1223. };
  1224. /*
  1225. * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
  1226. * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
  1227. */
  1228. static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
  1229. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1230. {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
  1231. {0x0004, SIERRA_PSC_LN_A3_PREG},
  1232. {0x0004, SIERRA_PSC_LN_A4_PREG},
  1233. {0x0004, SIERRA_PSC_LN_IDLE_PREG},
  1234. {0x1555, SIERRA_DFE_BIASTRIM_PREG},
  1235. {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
  1236. {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
  1237. {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
  1238. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
  1239. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1240. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1241. {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
  1242. {0x9800, SIERRA_RX_CTLE_CAL_PREG},
  1243. {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
  1244. {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
  1245. {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1246. {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1247. {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1248. {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1249. {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1250. {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
  1251. {0x0041, SIERRA_DEQ_GLUT0},
  1252. {0x0082, SIERRA_DEQ_GLUT1},
  1253. {0x00C3, SIERRA_DEQ_GLUT2},
  1254. {0x0145, SIERRA_DEQ_GLUT3},
  1255. {0x0186, SIERRA_DEQ_GLUT4},
  1256. {0x09E7, SIERRA_DEQ_ALUT0},
  1257. {0x09A6, SIERRA_DEQ_ALUT1},
  1258. {0x0965, SIERRA_DEQ_ALUT2},
  1259. {0x08E3, SIERRA_DEQ_ALUT3},
  1260. {0x00FA, SIERRA_DEQ_DFETAP0},
  1261. {0x00FA, SIERRA_DEQ_DFETAP1},
  1262. {0x00FA, SIERRA_DEQ_DFETAP2},
  1263. {0x00FA, SIERRA_DEQ_DFETAP3},
  1264. {0x00FA, SIERRA_DEQ_DFETAP4},
  1265. {0x000F, SIERRA_DEQ_PRECUR_PREG},
  1266. {0x0280, SIERRA_DEQ_POSTCUR_PREG},
  1267. {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
  1268. {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1269. {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
  1270. {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
  1271. {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
  1272. {0x002B, SIERRA_CPI_TRIM_PREG},
  1273. {0x0003, SIERRA_EPI_CTRL_PREG},
  1274. {0x803F, SIERRA_SDFILT_H2L_A_PREG},
  1275. {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1276. {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1277. {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
  1278. };
  1279. static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
  1280. .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
  1281. .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
  1282. };
  1283. static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
  1284. .reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
  1285. .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
  1286. };
  1287. /* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
  1288. static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
  1289. {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1290. {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1291. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
  1292. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
  1293. };
  1294. /* refclk100MHz_32b_PCIe_ln_no_ssc */
  1295. static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
  1296. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1297. {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
  1298. {0x1555, SIERRA_DFE_BIASTRIM_PREG},
  1299. {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
  1300. {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
  1301. {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
  1302. {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1303. {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1304. {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
  1305. {0x9800, SIERRA_RX_CTLE_CAL_PREG},
  1306. {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1307. {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1308. {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1309. {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1310. {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1311. {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
  1312. {0x0041, SIERRA_DEQ_GLUT0},
  1313. {0x0082, SIERRA_DEQ_GLUT1},
  1314. {0x00C3, SIERRA_DEQ_GLUT2},
  1315. {0x0145, SIERRA_DEQ_GLUT3},
  1316. {0x0186, SIERRA_DEQ_GLUT4},
  1317. {0x09E7, SIERRA_DEQ_ALUT0},
  1318. {0x09A6, SIERRA_DEQ_ALUT1},
  1319. {0x0965, SIERRA_DEQ_ALUT2},
  1320. {0x08E3, SIERRA_DEQ_ALUT3},
  1321. {0x00FA, SIERRA_DEQ_DFETAP0},
  1322. {0x00FA, SIERRA_DEQ_DFETAP1},
  1323. {0x00FA, SIERRA_DEQ_DFETAP2},
  1324. {0x00FA, SIERRA_DEQ_DFETAP3},
  1325. {0x00FA, SIERRA_DEQ_DFETAP4},
  1326. {0x000F, SIERRA_DEQ_PRECUR_PREG},
  1327. {0x0280, SIERRA_DEQ_POSTCUR_PREG},
  1328. {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
  1329. {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1330. {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
  1331. {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
  1332. {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
  1333. {0x002B, SIERRA_CPI_TRIM_PREG},
  1334. {0x0003, SIERRA_EPI_CTRL_PREG},
  1335. {0x803F, SIERRA_SDFILT_H2L_A_PREG},
  1336. {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1337. {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1338. {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
  1339. };
  1340. static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
  1341. .reg_pairs = cdns_pcie_cmn_regs_no_ssc,
  1342. .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
  1343. };
  1344. static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
  1345. .reg_pairs = cdns_pcie_ln_regs_no_ssc,
  1346. .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
  1347. };
  1348. /* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
  1349. static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
  1350. {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
  1351. {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1352. {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1353. {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
  1354. {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
  1355. {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
  1356. {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
  1357. {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
  1358. {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
  1359. {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
  1360. {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
  1361. };
  1362. /* refclk100MHz_32b_PCIe_ln_int_ssc */
  1363. static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
  1364. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1365. {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
  1366. {0x1555, SIERRA_DFE_BIASTRIM_PREG},
  1367. {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
  1368. {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
  1369. {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
  1370. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
  1371. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1372. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1373. {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
  1374. {0x9800, SIERRA_RX_CTLE_CAL_PREG},
  1375. {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
  1376. {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
  1377. {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1378. {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1379. {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1380. {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1381. {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1382. {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
  1383. {0x0041, SIERRA_DEQ_GLUT0},
  1384. {0x0082, SIERRA_DEQ_GLUT1},
  1385. {0x00C3, SIERRA_DEQ_GLUT2},
  1386. {0x0145, SIERRA_DEQ_GLUT3},
  1387. {0x0186, SIERRA_DEQ_GLUT4},
  1388. {0x09E7, SIERRA_DEQ_ALUT0},
  1389. {0x09A6, SIERRA_DEQ_ALUT1},
  1390. {0x0965, SIERRA_DEQ_ALUT2},
  1391. {0x08E3, SIERRA_DEQ_ALUT3},
  1392. {0x00FA, SIERRA_DEQ_DFETAP0},
  1393. {0x00FA, SIERRA_DEQ_DFETAP1},
  1394. {0x00FA, SIERRA_DEQ_DFETAP2},
  1395. {0x00FA, SIERRA_DEQ_DFETAP3},
  1396. {0x00FA, SIERRA_DEQ_DFETAP4},
  1397. {0x000F, SIERRA_DEQ_PRECUR_PREG},
  1398. {0x0280, SIERRA_DEQ_POSTCUR_PREG},
  1399. {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
  1400. {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1401. {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
  1402. {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
  1403. {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
  1404. {0x002B, SIERRA_CPI_TRIM_PREG},
  1405. {0x0003, SIERRA_EPI_CTRL_PREG},
  1406. {0x803F, SIERRA_SDFILT_H2L_A_PREG},
  1407. {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1408. {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1409. {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
  1410. };
  1411. static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
  1412. .reg_pairs = cdns_pcie_cmn_regs_int_ssc,
  1413. .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
  1414. };
  1415. static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
  1416. .reg_pairs = cdns_pcie_ln_regs_int_ssc,
  1417. .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
  1418. };
  1419. /* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
  1420. static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
  1421. {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1422. {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1423. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
  1424. {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
  1425. {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
  1426. };
  1427. /* refclk100MHz_32b_PCIe_ln_ext_ssc */
  1428. static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
  1429. {0xFC08, SIERRA_DET_STANDEC_A_PREG},
  1430. {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
  1431. {0x1555, SIERRA_DFE_BIASTRIM_PREG},
  1432. {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
  1433. {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
  1434. {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
  1435. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
  1436. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1437. {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1438. {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
  1439. {0x9800, SIERRA_RX_CTLE_CAL_PREG},
  1440. {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
  1441. {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
  1442. {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1443. {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1444. {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1445. {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1446. {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1447. {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
  1448. {0x0041, SIERRA_DEQ_GLUT0},
  1449. {0x0082, SIERRA_DEQ_GLUT1},
  1450. {0x00C3, SIERRA_DEQ_GLUT2},
  1451. {0x0145, SIERRA_DEQ_GLUT3},
  1452. {0x0186, SIERRA_DEQ_GLUT4},
  1453. {0x09E7, SIERRA_DEQ_ALUT0},
  1454. {0x09A6, SIERRA_DEQ_ALUT1},
  1455. {0x0965, SIERRA_DEQ_ALUT2},
  1456. {0x08E3, SIERRA_DEQ_ALUT3},
  1457. {0x00FA, SIERRA_DEQ_DFETAP0},
  1458. {0x00FA, SIERRA_DEQ_DFETAP1},
  1459. {0x00FA, SIERRA_DEQ_DFETAP2},
  1460. {0x00FA, SIERRA_DEQ_DFETAP3},
  1461. {0x00FA, SIERRA_DEQ_DFETAP4},
  1462. {0x000F, SIERRA_DEQ_PRECUR_PREG},
  1463. {0x0280, SIERRA_DEQ_POSTCUR_PREG},
  1464. {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
  1465. {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1466. {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
  1467. {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
  1468. {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
  1469. {0x002B, SIERRA_CPI_TRIM_PREG},
  1470. {0x0003, SIERRA_EPI_CTRL_PREG},
  1471. {0x803F, SIERRA_SDFILT_H2L_A_PREG},
  1472. {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1473. {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1474. {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
  1475. };
  1476. static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
  1477. .reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
  1478. .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
  1479. };
  1480. static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
  1481. .reg_pairs = cdns_pcie_ln_regs_ext_ssc,
  1482. .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
  1483. };
  1484. /* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
  1485. static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
  1486. {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
  1487. {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
  1488. {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
  1489. {0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
  1490. };
  1491. /* refclk100MHz_20b_USB_ln_ext_ssc */
  1492. static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
  1493. {0xFE0A, SIERRA_DET_STANDEC_A_PREG},
  1494. {0x000F, SIERRA_DET_STANDEC_B_PREG},
  1495. {0x55A5, SIERRA_DET_STANDEC_C_PREG},
  1496. {0x69ad, SIERRA_DET_STANDEC_D_PREG},
  1497. {0x0241, SIERRA_DET_STANDEC_E_PREG},
  1498. {0x0110, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
  1499. {0x0014, SIERRA_PSM_A0IN_TMR_PREG},
  1500. {0xCF00, SIERRA_PSM_DIAG_PREG},
  1501. {0x001F, SIERRA_PSC_TX_A0_PREG},
  1502. {0x0007, SIERRA_PSC_TX_A1_PREG},
  1503. {0x0003, SIERRA_PSC_TX_A2_PREG},
  1504. {0x0003, SIERRA_PSC_TX_A3_PREG},
  1505. {0x0FFF, SIERRA_PSC_RX_A0_PREG},
  1506. {0x0003, SIERRA_PSC_RX_A1_PREG},
  1507. {0x0003, SIERRA_PSC_RX_A2_PREG},
  1508. {0x0001, SIERRA_PSC_RX_A3_PREG},
  1509. {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
  1510. {0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
  1511. {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
  1512. {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
  1513. {0x2512, SIERRA_DFE_BIASTRIM_PREG},
  1514. {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
  1515. {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
  1516. {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
  1517. {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
  1518. {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
  1519. {0x023C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
  1520. {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
  1521. {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
  1522. {0x0000, SIERRA_CREQ_SPARE_PREG},
  1523. {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
  1524. {0x8452, SIERRA_CTLELUT_CTRL_PREG},
  1525. {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
  1526. {0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
  1527. {0x0003, SIERRA_DEQ_PHALIGN_CTRL},
  1528. {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
  1529. {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
  1530. {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
  1531. {0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
  1532. {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
  1533. {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
  1534. {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
  1535. {0x9999, SIERRA_DEQ_VGATUNE_CTRL_PREG},
  1536. {0x0014, SIERRA_DEQ_GLUT0},
  1537. {0x0014, SIERRA_DEQ_GLUT1},
  1538. {0x0014, SIERRA_DEQ_GLUT2},
  1539. {0x0014, SIERRA_DEQ_GLUT3},
  1540. {0x0014, SIERRA_DEQ_GLUT4},
  1541. {0x0014, SIERRA_DEQ_GLUT5},
  1542. {0x0014, SIERRA_DEQ_GLUT6},
  1543. {0x0014, SIERRA_DEQ_GLUT7},
  1544. {0x0014, SIERRA_DEQ_GLUT8},
  1545. {0x0014, SIERRA_DEQ_GLUT9},
  1546. {0x0014, SIERRA_DEQ_GLUT10},
  1547. {0x0014, SIERRA_DEQ_GLUT11},
  1548. {0x0014, SIERRA_DEQ_GLUT12},
  1549. {0x0014, SIERRA_DEQ_GLUT13},
  1550. {0x0014, SIERRA_DEQ_GLUT14},
  1551. {0x0014, SIERRA_DEQ_GLUT15},
  1552. {0x0014, SIERRA_DEQ_GLUT16},
  1553. {0x0BAE, SIERRA_DEQ_ALUT0},
  1554. {0x0AEB, SIERRA_DEQ_ALUT1},
  1555. {0x0A28, SIERRA_DEQ_ALUT2},
  1556. {0x0965, SIERRA_DEQ_ALUT3},
  1557. {0x08A2, SIERRA_DEQ_ALUT4},
  1558. {0x07DF, SIERRA_DEQ_ALUT5},
  1559. {0x071C, SIERRA_DEQ_ALUT6},
  1560. {0x0659, SIERRA_DEQ_ALUT7},
  1561. {0x0596, SIERRA_DEQ_ALUT8},
  1562. {0x0514, SIERRA_DEQ_ALUT9},
  1563. {0x0492, SIERRA_DEQ_ALUT10},
  1564. {0x0410, SIERRA_DEQ_ALUT11},
  1565. {0x038E, SIERRA_DEQ_ALUT12},
  1566. {0x030C, SIERRA_DEQ_ALUT13},
  1567. {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
  1568. {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
  1569. {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
  1570. {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
  1571. {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
  1572. {0x0033, SIERRA_DEQ_PICTRL_PREG},
  1573. {0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
  1574. {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
  1575. {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
  1576. {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
  1577. {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
  1578. {0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
  1579. {0x000F, SIERRA_LFPSFILT_NS_PREG},
  1580. {0x0009, SIERRA_LFPSFILT_RD_PREG},
  1581. {0x0001, SIERRA_LFPSFILT_MP_PREG},
  1582. {0x6013, SIERRA_SIGDET_SUPPORT_PREG},
  1583. {0x8013, SIERRA_SDFILT_H2L_A_PREG},
  1584. {0x8009, SIERRA_SDFILT_L2H_PREG},
  1585. {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
  1586. {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
  1587. {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
  1588. };
  1589. static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
  1590. .reg_pairs = cdns_usb_cmn_regs_ext_ssc,
  1591. .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
  1592. };
  1593. static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
  1594. .reg_pairs = cdns_usb_ln_regs_ext_ssc,
  1595. .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
  1596. };
  1597. static const struct cdns_sierra_data cdns_map_sierra = {
  1598. .id_value = SIERRA_MACRO_ID,
  1599. .block_offset_shift = 0x2,
  1600. .reg_offset_shift = 0x2,
  1601. .pcs_cmn_vals = {
  1602. [TYPE_PCIE] = {
  1603. [TYPE_NONE] = {
  1604. [NO_SSC] = &pcie_phy_pcs_cmn_vals,
  1605. [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1606. [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1607. },
  1608. [TYPE_QSGMII] = {
  1609. [NO_SSC] = &pcie_phy_pcs_cmn_vals,
  1610. [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1611. [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1612. },
  1613. },
  1614. },
  1615. .pma_cmn_vals = {
  1616. [TYPE_PCIE] = {
  1617. [TYPE_NONE] = {
  1618. [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
  1619. [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
  1620. [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals
  1621. },
  1622. [TYPE_QSGMII] = {
  1623. [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
  1624. [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
  1625. [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
  1626. },
  1627. },
  1628. [TYPE_USB] = {
  1629. [TYPE_NONE] = {
  1630. [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
  1631. },
  1632. },
  1633. [TYPE_QSGMII] = {
  1634. [TYPE_PCIE] = {
  1635. [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
  1636. [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
  1637. [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
  1638. },
  1639. },
  1640. },
  1641. .pma_ln_vals = {
  1642. [TYPE_PCIE] = {
  1643. [TYPE_NONE] = {
  1644. [NO_SSC] = &pcie_100_no_ssc_ln_vals,
  1645. [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
  1646. [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
  1647. },
  1648. [TYPE_QSGMII] = {
  1649. [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
  1650. [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
  1651. [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
  1652. },
  1653. },
  1654. [TYPE_USB] = {
  1655. [TYPE_NONE] = {
  1656. [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
  1657. },
  1658. },
  1659. [TYPE_QSGMII] = {
  1660. [TYPE_PCIE] = {
  1661. [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
  1662. [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
  1663. [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
  1664. },
  1665. },
  1666. },
  1667. };
  1668. static const struct cdns_sierra_data cdns_ti_map_sierra = {
  1669. .id_value = SIERRA_MACRO_ID,
  1670. .block_offset_shift = 0x0,
  1671. .reg_offset_shift = 0x1,
  1672. .pcs_cmn_vals = {
  1673. [TYPE_PCIE] = {
  1674. [TYPE_NONE] = {
  1675. [NO_SSC] = &pcie_phy_pcs_cmn_vals,
  1676. [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1677. [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1678. },
  1679. [TYPE_QSGMII] = {
  1680. [NO_SSC] = &pcie_phy_pcs_cmn_vals,
  1681. [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1682. [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
  1683. },
  1684. },
  1685. },
  1686. .phy_pma_ln_vals = {
  1687. [TYPE_QSGMII] = {
  1688. [TYPE_PCIE] = {
  1689. [NO_SSC] = &qsgmii_phy_pma_ln_vals,
  1690. [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
  1691. [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
  1692. },
  1693. },
  1694. },
  1695. .pma_cmn_vals = {
  1696. [TYPE_PCIE] = {
  1697. [TYPE_NONE] = {
  1698. [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
  1699. [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
  1700. [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
  1701. },
  1702. [TYPE_QSGMII] = {
  1703. [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
  1704. [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
  1705. [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
  1706. },
  1707. },
  1708. [TYPE_USB] = {
  1709. [TYPE_NONE] = {
  1710. [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
  1711. },
  1712. },
  1713. [TYPE_QSGMII] = {
  1714. [TYPE_PCIE] = {
  1715. [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
  1716. [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
  1717. [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
  1718. },
  1719. },
  1720. },
  1721. .pma_ln_vals = {
  1722. [TYPE_PCIE] = {
  1723. [TYPE_NONE] = {
  1724. [NO_SSC] = &pcie_100_no_ssc_ln_vals,
  1725. [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
  1726. [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
  1727. },
  1728. [TYPE_QSGMII] = {
  1729. [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
  1730. [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
  1731. [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
  1732. },
  1733. },
  1734. [TYPE_USB] = {
  1735. [TYPE_NONE] = {
  1736. [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
  1737. },
  1738. },
  1739. [TYPE_QSGMII] = {
  1740. [TYPE_PCIE] = {
  1741. [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
  1742. [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
  1743. [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
  1744. },
  1745. },
  1746. },
  1747. };
  1748. static const struct udevice_id cdns_sierra_id_table[] = {
  1749. {
  1750. .compatible = "cdns,sierra-phy-t0",
  1751. .data = (ulong)&cdns_map_sierra,
  1752. },
  1753. {
  1754. .compatible = "ti,sierra-phy-t0",
  1755. .data = (ulong)&cdns_ti_map_sierra,
  1756. },
  1757. {}
  1758. };
  1759. U_BOOT_DRIVER(sierra_phy_provider) = {
  1760. .name = "cdns,sierra",
  1761. .id = UCLASS_MISC,
  1762. .of_match = cdns_sierra_id_table,
  1763. .probe = cdns_sierra_phy_probe,
  1764. .remove = cdns_sierra_phy_remove,
  1765. .bind = cdns_sierra_phy_bind,
  1766. .priv_auto = sizeof(struct cdns_sierra_phy),
  1767. };