lan743x_main.c 83 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /* Copyright (C) 2018 Microchip Technology Inc. */
  3. #include <linux/module.h>
  4. #include <linux/pci.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/crc32.h>
  8. #include <linux/microchipphy.h>
  9. #include <linux/net_tstamp.h>
  10. #include <linux/phy.h>
  11. #include <linux/rtnetlink.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/crc16.h>
  14. #include "lan743x_main.h"
  15. #include "lan743x_ethtool.h"
  16. static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  17. {
  18. pci_release_selected_regions(adapter->pdev,
  19. pci_select_bars(adapter->pdev,
  20. IORESOURCE_MEM));
  21. pci_disable_device(adapter->pdev);
  22. }
  23. static int lan743x_pci_init(struct lan743x_adapter *adapter,
  24. struct pci_dev *pdev)
  25. {
  26. unsigned long bars = 0;
  27. int ret;
  28. adapter->pdev = pdev;
  29. ret = pci_enable_device_mem(pdev);
  30. if (ret)
  31. goto return_error;
  32. netif_info(adapter, probe, adapter->netdev,
  33. "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
  34. pdev->vendor, pdev->device);
  35. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  36. if (!test_bit(0, &bars))
  37. goto disable_device;
  38. ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
  39. if (ret)
  40. goto disable_device;
  41. pci_set_master(pdev);
  42. return 0;
  43. disable_device:
  44. pci_disable_device(adapter->pdev);
  45. return_error:
  46. return ret;
  47. }
  48. u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
  49. {
  50. return ioread32(&adapter->csr.csr_address[offset]);
  51. }
  52. void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
  53. u32 data)
  54. {
  55. iowrite32(data, &adapter->csr.csr_address[offset]);
  56. }
  57. #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
  58. static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
  59. {
  60. u32 data;
  61. data = lan743x_csr_read(adapter, HW_CFG);
  62. data |= HW_CFG_LRST_;
  63. lan743x_csr_write(adapter, HW_CFG, data);
  64. return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
  65. !(data & HW_CFG_LRST_), 100000, 10000000);
  66. }
  67. static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
  68. int offset, u32 bit_mask,
  69. int target_value, int usleep_min,
  70. int usleep_max, int count)
  71. {
  72. u32 data;
  73. return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
  74. target_value == ((data & bit_mask) ? 1 : 0),
  75. usleep_max, usleep_min * count);
  76. }
  77. static int lan743x_csr_init(struct lan743x_adapter *adapter)
  78. {
  79. struct lan743x_csr *csr = &adapter->csr;
  80. resource_size_t bar_start, bar_length;
  81. int result;
  82. bar_start = pci_resource_start(adapter->pdev, 0);
  83. bar_length = pci_resource_len(adapter->pdev, 0);
  84. csr->csr_address = devm_ioremap(&adapter->pdev->dev,
  85. bar_start, bar_length);
  86. if (!csr->csr_address) {
  87. result = -ENOMEM;
  88. goto clean_up;
  89. }
  90. csr->id_rev = lan743x_csr_read(adapter, ID_REV);
  91. csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
  92. netif_info(adapter, probe, adapter->netdev,
  93. "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
  94. csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev),
  95. FPGA_REV_GET_MINOR_(csr->fpga_rev));
  96. if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
  97. result = -ENODEV;
  98. goto clean_up;
  99. }
  100. csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
  101. switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
  102. case ID_REV_CHIP_REV_A0_:
  103. csr->flags |= LAN743X_CSR_FLAG_IS_A0;
  104. csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
  105. break;
  106. case ID_REV_CHIP_REV_B0_:
  107. csr->flags |= LAN743X_CSR_FLAG_IS_B0;
  108. break;
  109. }
  110. result = lan743x_csr_light_reset(adapter);
  111. if (result)
  112. goto clean_up;
  113. return 0;
  114. clean_up:
  115. return result;
  116. }
  117. static void lan743x_intr_software_isr(void *context)
  118. {
  119. struct lan743x_adapter *adapter = context;
  120. struct lan743x_intr *intr = &adapter->intr;
  121. u32 int_sts;
  122. int_sts = lan743x_csr_read(adapter, INT_STS);
  123. if (int_sts & INT_BIT_SW_GP_) {
  124. /* disable the interrupt to prevent repeated re-triggering */
  125. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
  126. intr->software_isr_flag = 1;
  127. }
  128. }
  129. static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
  130. {
  131. struct lan743x_tx *tx = context;
  132. struct lan743x_adapter *adapter = tx->adapter;
  133. bool enable_flag = true;
  134. u32 int_en = 0;
  135. int_en = lan743x_csr_read(adapter, INT_EN_SET);
  136. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
  137. lan743x_csr_write(adapter, INT_EN_CLR,
  138. INT_BIT_DMA_TX_(tx->channel_number));
  139. }
  140. if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
  141. u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
  142. u32 dmac_int_sts;
  143. u32 dmac_int_en;
  144. if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
  145. dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  146. else
  147. dmac_int_sts = ioc_bit;
  148. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
  149. dmac_int_en = lan743x_csr_read(adapter,
  150. DMAC_INT_EN_SET);
  151. else
  152. dmac_int_en = ioc_bit;
  153. dmac_int_en &= ioc_bit;
  154. dmac_int_sts &= dmac_int_en;
  155. if (dmac_int_sts & ioc_bit) {
  156. napi_schedule(&tx->napi);
  157. enable_flag = false;/* poll func will enable later */
  158. }
  159. }
  160. if (enable_flag)
  161. /* enable isr */
  162. lan743x_csr_write(adapter, INT_EN_SET,
  163. INT_BIT_DMA_TX_(tx->channel_number));
  164. }
  165. static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
  166. {
  167. struct lan743x_rx *rx = context;
  168. struct lan743x_adapter *adapter = rx->adapter;
  169. bool enable_flag = true;
  170. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
  171. lan743x_csr_write(adapter, INT_EN_CLR,
  172. INT_BIT_DMA_RX_(rx->channel_number));
  173. }
  174. if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
  175. u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
  176. u32 dmac_int_sts;
  177. u32 dmac_int_en;
  178. if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
  179. dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  180. else
  181. dmac_int_sts = rx_frame_bit;
  182. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
  183. dmac_int_en = lan743x_csr_read(adapter,
  184. DMAC_INT_EN_SET);
  185. else
  186. dmac_int_en = rx_frame_bit;
  187. dmac_int_en &= rx_frame_bit;
  188. dmac_int_sts &= dmac_int_en;
  189. if (dmac_int_sts & rx_frame_bit) {
  190. napi_schedule(&rx->napi);
  191. enable_flag = false;/* poll funct will enable later */
  192. }
  193. }
  194. if (enable_flag) {
  195. /* enable isr */
  196. lan743x_csr_write(adapter, INT_EN_SET,
  197. INT_BIT_DMA_RX_(rx->channel_number));
  198. }
  199. }
  200. static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
  201. {
  202. struct lan743x_adapter *adapter = context;
  203. unsigned int channel;
  204. if (int_sts & INT_BIT_ALL_RX_) {
  205. for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
  206. channel++) {
  207. u32 int_bit = INT_BIT_DMA_RX_(channel);
  208. if (int_sts & int_bit) {
  209. lan743x_rx_isr(&adapter->rx[channel],
  210. int_bit, flags);
  211. int_sts &= ~int_bit;
  212. }
  213. }
  214. }
  215. if (int_sts & INT_BIT_ALL_TX_) {
  216. for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
  217. channel++) {
  218. u32 int_bit = INT_BIT_DMA_TX_(channel);
  219. if (int_sts & int_bit) {
  220. lan743x_tx_isr(&adapter->tx[channel],
  221. int_bit, flags);
  222. int_sts &= ~int_bit;
  223. }
  224. }
  225. }
  226. if (int_sts & INT_BIT_ALL_OTHER_) {
  227. if (int_sts & INT_BIT_SW_GP_) {
  228. lan743x_intr_software_isr(adapter);
  229. int_sts &= ~INT_BIT_SW_GP_;
  230. }
  231. if (int_sts & INT_BIT_1588_) {
  232. lan743x_ptp_isr(adapter);
  233. int_sts &= ~INT_BIT_1588_;
  234. }
  235. }
  236. if (int_sts)
  237. lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
  238. }
  239. static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
  240. {
  241. struct lan743x_vector *vector = ptr;
  242. struct lan743x_adapter *adapter = vector->adapter;
  243. irqreturn_t result = IRQ_NONE;
  244. u32 int_enables;
  245. u32 int_sts;
  246. if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
  247. int_sts = lan743x_csr_read(adapter, INT_STS);
  248. } else if (vector->flags &
  249. (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
  250. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
  251. int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
  252. } else {
  253. /* use mask as implied status */
  254. int_sts = vector->int_mask | INT_BIT_MAS_;
  255. }
  256. if (!(int_sts & INT_BIT_MAS_))
  257. goto irq_done;
  258. if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
  259. /* disable vector interrupt */
  260. lan743x_csr_write(adapter,
  261. INT_VEC_EN_CLR,
  262. INT_VEC_EN_(vector->vector_index));
  263. if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
  264. /* disable master interrupt */
  265. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
  266. if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
  267. int_enables = lan743x_csr_read(adapter, INT_EN_SET);
  268. } else {
  269. /* use vector mask as implied enable mask */
  270. int_enables = vector->int_mask;
  271. }
  272. int_sts &= int_enables;
  273. int_sts &= vector->int_mask;
  274. if (int_sts) {
  275. if (vector->handler) {
  276. vector->handler(vector->context,
  277. int_sts, vector->flags);
  278. } else {
  279. /* disable interrupts on this vector */
  280. lan743x_csr_write(adapter, INT_EN_CLR,
  281. vector->int_mask);
  282. }
  283. result = IRQ_HANDLED;
  284. }
  285. if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
  286. /* enable master interrupt */
  287. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
  288. if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
  289. /* enable vector interrupt */
  290. lan743x_csr_write(adapter,
  291. INT_VEC_EN_SET,
  292. INT_VEC_EN_(vector->vector_index));
  293. irq_done:
  294. return result;
  295. }
  296. static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
  297. {
  298. struct lan743x_intr *intr = &adapter->intr;
  299. int result = -ENODEV;
  300. int timeout = 10;
  301. intr->software_isr_flag = 0;
  302. /* enable interrupt */
  303. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
  304. /* activate interrupt here */
  305. lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
  306. while ((timeout > 0) && (!(intr->software_isr_flag))) {
  307. usleep_range(1000, 20000);
  308. timeout--;
  309. }
  310. if (intr->software_isr_flag)
  311. result = 0;
  312. /* disable interrupts */
  313. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
  314. return result;
  315. }
  316. static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
  317. int vector_index, u32 flags,
  318. u32 int_mask,
  319. lan743x_vector_handler handler,
  320. void *context)
  321. {
  322. struct lan743x_vector *vector = &adapter->intr.vector_list
  323. [vector_index];
  324. int ret;
  325. vector->adapter = adapter;
  326. vector->flags = flags;
  327. vector->vector_index = vector_index;
  328. vector->int_mask = int_mask;
  329. vector->handler = handler;
  330. vector->context = context;
  331. ret = request_irq(vector->irq,
  332. lan743x_intr_entry_isr,
  333. (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
  334. IRQF_SHARED : 0, DRIVER_NAME, vector);
  335. if (ret) {
  336. vector->handler = NULL;
  337. vector->context = NULL;
  338. vector->int_mask = 0;
  339. vector->flags = 0;
  340. }
  341. return ret;
  342. }
  343. static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
  344. int vector_index)
  345. {
  346. struct lan743x_vector *vector = &adapter->intr.vector_list
  347. [vector_index];
  348. free_irq(vector->irq, vector);
  349. vector->handler = NULL;
  350. vector->context = NULL;
  351. vector->int_mask = 0;
  352. vector->flags = 0;
  353. }
  354. static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
  355. u32 int_mask)
  356. {
  357. int index;
  358. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
  359. if (adapter->intr.vector_list[index].int_mask & int_mask)
  360. return adapter->intr.vector_list[index].flags;
  361. }
  362. return 0;
  363. }
  364. static void lan743x_intr_close(struct lan743x_adapter *adapter)
  365. {
  366. struct lan743x_intr *intr = &adapter->intr;
  367. int index = 0;
  368. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
  369. lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
  370. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
  371. if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
  372. lan743x_intr_unregister_isr(adapter, index);
  373. intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
  374. }
  375. }
  376. if (intr->flags & INTR_FLAG_MSI_ENABLED) {
  377. pci_disable_msi(adapter->pdev);
  378. intr->flags &= ~INTR_FLAG_MSI_ENABLED;
  379. }
  380. if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
  381. pci_disable_msix(adapter->pdev);
  382. intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
  383. }
  384. }
  385. static int lan743x_intr_open(struct lan743x_adapter *adapter)
  386. {
  387. struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
  388. struct lan743x_intr *intr = &adapter->intr;
  389. u32 int_vec_en_auto_clr = 0;
  390. u32 int_vec_map0 = 0;
  391. u32 int_vec_map1 = 0;
  392. int ret = -ENODEV;
  393. int index = 0;
  394. u32 flags = 0;
  395. intr->number_of_vectors = 0;
  396. /* Try to set up MSIX interrupts */
  397. memset(&msix_entries[0], 0,
  398. sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
  399. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
  400. msix_entries[index].entry = index;
  401. ret = pci_enable_msix_range(adapter->pdev,
  402. msix_entries, 1,
  403. 1 + LAN743X_USED_TX_CHANNELS +
  404. LAN743X_USED_RX_CHANNELS);
  405. if (ret > 0) {
  406. intr->flags |= INTR_FLAG_MSIX_ENABLED;
  407. intr->number_of_vectors = ret;
  408. intr->using_vectors = true;
  409. for (index = 0; index < intr->number_of_vectors; index++)
  410. intr->vector_list[index].irq = msix_entries
  411. [index].vector;
  412. netif_info(adapter, ifup, adapter->netdev,
  413. "using MSIX interrupts, number of vectors = %d\n",
  414. intr->number_of_vectors);
  415. }
  416. /* If MSIX failed try to setup using MSI interrupts */
  417. if (!intr->number_of_vectors) {
  418. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  419. if (!pci_enable_msi(adapter->pdev)) {
  420. intr->flags |= INTR_FLAG_MSI_ENABLED;
  421. intr->number_of_vectors = 1;
  422. intr->using_vectors = true;
  423. intr->vector_list[0].irq =
  424. adapter->pdev->irq;
  425. netif_info(adapter, ifup, adapter->netdev,
  426. "using MSI interrupts, number of vectors = %d\n",
  427. intr->number_of_vectors);
  428. }
  429. }
  430. }
  431. /* If MSIX, and MSI failed, setup using legacy interrupt */
  432. if (!intr->number_of_vectors) {
  433. intr->number_of_vectors = 1;
  434. intr->using_vectors = false;
  435. intr->vector_list[0].irq = intr->irq;
  436. netif_info(adapter, ifup, adapter->netdev,
  437. "using legacy interrupts\n");
  438. }
  439. /* At this point we must have at least one irq */
  440. lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
  441. /* map all interrupts to vector 0 */
  442. lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
  443. lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
  444. lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
  445. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  446. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  447. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  448. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
  449. if (intr->using_vectors) {
  450. flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  451. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  452. } else {
  453. flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
  454. LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
  455. LAN743X_VECTOR_FLAG_IRQ_SHARED;
  456. }
  457. if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  458. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
  459. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
  460. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
  461. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
  462. flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
  463. flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
  464. }
  465. ret = lan743x_intr_register_isr(adapter, 0, flags,
  466. INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
  467. INT_BIT_ALL_OTHER_,
  468. lan743x_intr_shared_isr, adapter);
  469. if (ret)
  470. goto clean_up;
  471. intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
  472. if (intr->using_vectors)
  473. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  474. INT_VEC_EN_(0));
  475. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  476. lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
  477. lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
  478. lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
  479. lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
  480. lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
  481. lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
  482. lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
  483. lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
  484. lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
  485. lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
  486. lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
  487. }
  488. /* enable interrupts */
  489. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
  490. ret = lan743x_intr_test_isr(adapter);
  491. if (ret)
  492. goto clean_up;
  493. if (intr->number_of_vectors > 1) {
  494. int number_of_tx_vectors = intr->number_of_vectors - 1;
  495. if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
  496. number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
  497. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  498. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  499. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  500. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
  501. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  502. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  503. if (adapter->csr.flags &
  504. LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  505. flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
  506. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
  507. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
  508. LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
  509. }
  510. for (index = 0; index < number_of_tx_vectors; index++) {
  511. u32 int_bit = INT_BIT_DMA_TX_(index);
  512. int vector = index + 1;
  513. /* map TX interrupt to vector */
  514. int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
  515. lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
  516. /* Remove TX interrupt from shared mask */
  517. intr->vector_list[0].int_mask &= ~int_bit;
  518. ret = lan743x_intr_register_isr(adapter, vector, flags,
  519. int_bit, lan743x_tx_isr,
  520. &adapter->tx[index]);
  521. if (ret)
  522. goto clean_up;
  523. intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
  524. if (!(flags &
  525. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
  526. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  527. INT_VEC_EN_(vector));
  528. }
  529. }
  530. if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
  531. int number_of_rx_vectors = intr->number_of_vectors -
  532. LAN743X_USED_TX_CHANNELS - 1;
  533. if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
  534. number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
  535. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  536. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  537. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  538. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
  539. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  540. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  541. if (adapter->csr.flags &
  542. LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  543. flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
  544. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
  545. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
  546. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
  547. LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
  548. }
  549. for (index = 0; index < number_of_rx_vectors; index++) {
  550. int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
  551. u32 int_bit = INT_BIT_DMA_RX_(index);
  552. /* map RX interrupt to vector */
  553. int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
  554. lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
  555. if (flags &
  556. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
  557. int_vec_en_auto_clr |= INT_VEC_EN_(vector);
  558. lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
  559. int_vec_en_auto_clr);
  560. }
  561. /* Remove RX interrupt from shared mask */
  562. intr->vector_list[0].int_mask &= ~int_bit;
  563. ret = lan743x_intr_register_isr(adapter, vector, flags,
  564. int_bit, lan743x_rx_isr,
  565. &adapter->rx[index]);
  566. if (ret)
  567. goto clean_up;
  568. intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
  569. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  570. INT_VEC_EN_(vector));
  571. }
  572. }
  573. return 0;
  574. clean_up:
  575. lan743x_intr_close(adapter);
  576. return ret;
  577. }
  578. static int lan743x_dp_write(struct lan743x_adapter *adapter,
  579. u32 select, u32 addr, u32 length, u32 *buf)
  580. {
  581. u32 dp_sel;
  582. int i;
  583. if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
  584. 1, 40, 100, 100))
  585. return -EIO;
  586. dp_sel = lan743x_csr_read(adapter, DP_SEL);
  587. dp_sel &= ~DP_SEL_MASK_;
  588. dp_sel |= select;
  589. lan743x_csr_write(adapter, DP_SEL, dp_sel);
  590. for (i = 0; i < length; i++) {
  591. lan743x_csr_write(adapter, DP_ADDR, addr + i);
  592. lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
  593. lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
  594. if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
  595. 1, 40, 100, 100))
  596. return -EIO;
  597. }
  598. return 0;
  599. }
  600. static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
  601. {
  602. u32 ret;
  603. ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
  604. MAC_MII_ACC_PHY_ADDR_MASK_;
  605. ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
  606. MAC_MII_ACC_MIIRINDA_MASK_;
  607. if (read)
  608. ret |= MAC_MII_ACC_MII_READ_;
  609. else
  610. ret |= MAC_MII_ACC_MII_WRITE_;
  611. ret |= MAC_MII_ACC_MII_BUSY_;
  612. return ret;
  613. }
  614. static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
  615. {
  616. u32 data;
  617. return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
  618. !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
  619. }
  620. static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
  621. {
  622. struct lan743x_adapter *adapter = bus->priv;
  623. u32 val, mii_access;
  624. int ret;
  625. /* comfirm MII not busy */
  626. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  627. if (ret < 0)
  628. return ret;
  629. /* set the address, index & direction (read from PHY) */
  630. mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
  631. lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
  632. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  633. if (ret < 0)
  634. return ret;
  635. val = lan743x_csr_read(adapter, MAC_MII_DATA);
  636. return (int)(val & 0xFFFF);
  637. }
  638. static int lan743x_mdiobus_write(struct mii_bus *bus,
  639. int phy_id, int index, u16 regval)
  640. {
  641. struct lan743x_adapter *adapter = bus->priv;
  642. u32 val, mii_access;
  643. int ret;
  644. /* confirm MII not busy */
  645. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  646. if (ret < 0)
  647. return ret;
  648. val = (u32)regval;
  649. lan743x_csr_write(adapter, MAC_MII_DATA, val);
  650. /* set the address, index & direction (write to PHY) */
  651. mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
  652. lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
  653. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  654. return ret;
  655. }
  656. static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
  657. u8 *addr)
  658. {
  659. u32 addr_lo, addr_hi;
  660. addr_lo = addr[0] |
  661. addr[1] << 8 |
  662. addr[2] << 16 |
  663. addr[3] << 24;
  664. addr_hi = addr[4] |
  665. addr[5] << 8;
  666. lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
  667. lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
  668. ether_addr_copy(adapter->mac_address, addr);
  669. netif_info(adapter, drv, adapter->netdev,
  670. "MAC address set to %pM\n", addr);
  671. }
  672. static int lan743x_mac_init(struct lan743x_adapter *adapter)
  673. {
  674. bool mac_address_valid = true;
  675. struct net_device *netdev;
  676. u32 mac_addr_hi = 0;
  677. u32 mac_addr_lo = 0;
  678. u32 data;
  679. netdev = adapter->netdev;
  680. /* setup auto duplex, and speed detection */
  681. data = lan743x_csr_read(adapter, MAC_CR);
  682. data |= MAC_CR_ADD_ | MAC_CR_ASD_;
  683. data |= MAC_CR_CNTR_RST_;
  684. lan743x_csr_write(adapter, MAC_CR, data);
  685. mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
  686. mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
  687. adapter->mac_address[0] = mac_addr_lo & 0xFF;
  688. adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
  689. adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
  690. adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
  691. adapter->mac_address[4] = mac_addr_hi & 0xFF;
  692. adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
  693. if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
  694. mac_addr_lo == 0xFFFFFFFF) {
  695. mac_address_valid = false;
  696. } else if (!is_valid_ether_addr(adapter->mac_address)) {
  697. mac_address_valid = false;
  698. }
  699. if (!mac_address_valid)
  700. eth_random_addr(adapter->mac_address);
  701. lan743x_mac_set_address(adapter, adapter->mac_address);
  702. ether_addr_copy(netdev->dev_addr, adapter->mac_address);
  703. return 0;
  704. }
  705. static int lan743x_mac_open(struct lan743x_adapter *adapter)
  706. {
  707. int ret = 0;
  708. u32 temp;
  709. temp = lan743x_csr_read(adapter, MAC_RX);
  710. lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
  711. temp = lan743x_csr_read(adapter, MAC_TX);
  712. lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
  713. return ret;
  714. }
  715. static void lan743x_mac_close(struct lan743x_adapter *adapter)
  716. {
  717. u32 temp;
  718. temp = lan743x_csr_read(adapter, MAC_TX);
  719. temp &= ~MAC_TX_TXEN_;
  720. lan743x_csr_write(adapter, MAC_TX, temp);
  721. lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
  722. 1, 1000, 20000, 100);
  723. temp = lan743x_csr_read(adapter, MAC_RX);
  724. temp &= ~MAC_RX_RXEN_;
  725. lan743x_csr_write(adapter, MAC_RX, temp);
  726. lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
  727. 1, 1000, 20000, 100);
  728. }
  729. static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
  730. bool tx_enable, bool rx_enable)
  731. {
  732. u32 flow_setting = 0;
  733. /* set maximum pause time because when fifo space frees
  734. * up a zero value pause frame will be sent to release the pause
  735. */
  736. flow_setting = MAC_FLOW_CR_FCPT_MASK_;
  737. if (tx_enable)
  738. flow_setting |= MAC_FLOW_CR_TX_FCEN_;
  739. if (rx_enable)
  740. flow_setting |= MAC_FLOW_CR_RX_FCEN_;
  741. lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
  742. }
  743. static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
  744. {
  745. int enabled = 0;
  746. u32 mac_rx = 0;
  747. mac_rx = lan743x_csr_read(adapter, MAC_RX);
  748. if (mac_rx & MAC_RX_RXEN_) {
  749. enabled = 1;
  750. if (mac_rx & MAC_RX_RXD_) {
  751. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  752. mac_rx &= ~MAC_RX_RXD_;
  753. }
  754. mac_rx &= ~MAC_RX_RXEN_;
  755. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  756. lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
  757. 1, 1000, 20000, 100);
  758. lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
  759. }
  760. mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
  761. mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
  762. MAC_RX_MAX_SIZE_MASK_);
  763. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  764. if (enabled) {
  765. mac_rx |= MAC_RX_RXEN_;
  766. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  767. }
  768. return 0;
  769. }
  770. /* PHY */
  771. static int lan743x_phy_reset(struct lan743x_adapter *adapter)
  772. {
  773. u32 data;
  774. /* Only called with in probe, and before mdiobus_register */
  775. data = lan743x_csr_read(adapter, PMT_CTL);
  776. data |= PMT_CTL_ETH_PHY_RST_;
  777. lan743x_csr_write(adapter, PMT_CTL, data);
  778. return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
  779. (!(data & PMT_CTL_ETH_PHY_RST_) &&
  780. (data & PMT_CTL_READY_)),
  781. 50000, 1000000);
  782. }
  783. static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
  784. u8 duplex, u16 local_adv,
  785. u16 remote_adv)
  786. {
  787. struct lan743x_phy *phy = &adapter->phy;
  788. u8 cap;
  789. if (phy->fc_autoneg)
  790. cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
  791. else
  792. cap = phy->fc_request_control;
  793. lan743x_mac_flow_ctrl_set_enables(adapter,
  794. cap & FLOW_CTRL_TX,
  795. cap & FLOW_CTRL_RX);
  796. }
  797. static int lan743x_phy_init(struct lan743x_adapter *adapter)
  798. {
  799. return lan743x_phy_reset(adapter);
  800. }
  801. static void lan743x_phy_link_status_change(struct net_device *netdev)
  802. {
  803. struct lan743x_adapter *adapter = netdev_priv(netdev);
  804. struct phy_device *phydev = netdev->phydev;
  805. phy_print_status(phydev);
  806. if (phydev->state == PHY_RUNNING) {
  807. struct ethtool_link_ksettings ksettings;
  808. int remote_advertisement = 0;
  809. int local_advertisement = 0;
  810. memset(&ksettings, 0, sizeof(ksettings));
  811. phy_ethtool_get_link_ksettings(netdev, &ksettings);
  812. local_advertisement =
  813. ethtool_adv_to_mii_adv_t(phydev->advertising);
  814. remote_advertisement =
  815. ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
  816. lan743x_phy_update_flowcontrol(adapter,
  817. ksettings.base.duplex,
  818. local_advertisement,
  819. remote_advertisement);
  820. lan743x_ptp_update_latency(adapter, ksettings.base.speed);
  821. }
  822. }
  823. static void lan743x_phy_close(struct lan743x_adapter *adapter)
  824. {
  825. struct net_device *netdev = adapter->netdev;
  826. phy_stop(netdev->phydev);
  827. phy_disconnect(netdev->phydev);
  828. netdev->phydev = NULL;
  829. }
  830. static int lan743x_phy_open(struct lan743x_adapter *adapter)
  831. {
  832. struct lan743x_phy *phy = &adapter->phy;
  833. struct phy_device *phydev;
  834. struct net_device *netdev;
  835. int ret = -EIO;
  836. u32 mii_adv;
  837. netdev = adapter->netdev;
  838. phydev = phy_find_first(adapter->mdiobus);
  839. if (!phydev)
  840. goto return_error;
  841. ret = phy_connect_direct(netdev, phydev,
  842. lan743x_phy_link_status_change,
  843. PHY_INTERFACE_MODE_GMII);
  844. if (ret)
  845. goto return_error;
  846. /* MAC doesn't support 1000T Half */
  847. phydev->supported &= ~SUPPORTED_1000baseT_Half;
  848. /* support both flow controls */
  849. phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
  850. phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  851. mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control);
  852. phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
  853. phy->fc_autoneg = phydev->autoneg;
  854. phy_start(phydev);
  855. phy_start_aneg(phydev);
  856. return 0;
  857. return_error:
  858. return ret;
  859. }
  860. static void lan743x_rfe_open(struct lan743x_adapter *adapter)
  861. {
  862. lan743x_csr_write(adapter, RFE_RSS_CFG,
  863. RFE_RSS_CFG_UDP_IPV6_EX_ |
  864. RFE_RSS_CFG_TCP_IPV6_EX_ |
  865. RFE_RSS_CFG_IPV6_EX_ |
  866. RFE_RSS_CFG_UDP_IPV6_ |
  867. RFE_RSS_CFG_TCP_IPV6_ |
  868. RFE_RSS_CFG_IPV6_ |
  869. RFE_RSS_CFG_UDP_IPV4_ |
  870. RFE_RSS_CFG_TCP_IPV4_ |
  871. RFE_RSS_CFG_IPV4_ |
  872. RFE_RSS_CFG_VALID_HASH_BITS_ |
  873. RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
  874. RFE_RSS_CFG_RSS_HASH_STORE_ |
  875. RFE_RSS_CFG_RSS_ENABLE_);
  876. }
  877. static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
  878. {
  879. u8 *mac_addr;
  880. u32 mac_addr_hi = 0;
  881. u32 mac_addr_lo = 0;
  882. /* Add mac address to perfect Filter */
  883. mac_addr = adapter->mac_address;
  884. mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
  885. (((u32)(mac_addr[1])) << 8) |
  886. (((u32)(mac_addr[2])) << 16) |
  887. (((u32)(mac_addr[3])) << 24));
  888. mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
  889. (((u32)(mac_addr[5])) << 8));
  890. lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
  891. lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
  892. mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
  893. }
  894. static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
  895. {
  896. struct net_device *netdev = adapter->netdev;
  897. u32 hash_table[DP_SEL_VHF_HASH_LEN];
  898. u32 rfctl;
  899. u32 data;
  900. rfctl = lan743x_csr_read(adapter, RFE_CTL);
  901. rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
  902. RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
  903. rfctl |= RFE_CTL_AB_;
  904. if (netdev->flags & IFF_PROMISC) {
  905. rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
  906. } else {
  907. if (netdev->flags & IFF_ALLMULTI)
  908. rfctl |= RFE_CTL_AM_;
  909. }
  910. memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
  911. if (netdev_mc_count(netdev)) {
  912. struct netdev_hw_addr *ha;
  913. int i;
  914. rfctl |= RFE_CTL_DA_PERFECT_;
  915. i = 1;
  916. netdev_for_each_mc_addr(ha, netdev) {
  917. /* set first 32 into Perfect Filter */
  918. if (i < 33) {
  919. lan743x_csr_write(adapter,
  920. RFE_ADDR_FILT_HI(i), 0);
  921. data = ha->addr[3];
  922. data = ha->addr[2] | (data << 8);
  923. data = ha->addr[1] | (data << 8);
  924. data = ha->addr[0] | (data << 8);
  925. lan743x_csr_write(adapter,
  926. RFE_ADDR_FILT_LO(i), data);
  927. data = ha->addr[5];
  928. data = ha->addr[4] | (data << 8);
  929. data |= RFE_ADDR_FILT_HI_VALID_;
  930. lan743x_csr_write(adapter,
  931. RFE_ADDR_FILT_HI(i), data);
  932. } else {
  933. u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
  934. 23) & 0x1FF;
  935. hash_table[bitnum / 32] |= (1 << (bitnum % 32));
  936. rfctl |= RFE_CTL_MCAST_HASH_;
  937. }
  938. i++;
  939. }
  940. }
  941. lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
  942. DP_SEL_VHF_VLAN_LEN,
  943. DP_SEL_VHF_HASH_LEN, hash_table);
  944. lan743x_csr_write(adapter, RFE_CTL, rfctl);
  945. }
  946. static int lan743x_dmac_init(struct lan743x_adapter *adapter)
  947. {
  948. u32 data = 0;
  949. lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
  950. lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
  951. 0, 1000, 20000, 100);
  952. switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
  953. case DMA_DESCRIPTOR_SPACING_16:
  954. data = DMAC_CFG_MAX_DSPACE_16_;
  955. break;
  956. case DMA_DESCRIPTOR_SPACING_32:
  957. data = DMAC_CFG_MAX_DSPACE_32_;
  958. break;
  959. case DMA_DESCRIPTOR_SPACING_64:
  960. data = DMAC_CFG_MAX_DSPACE_64_;
  961. break;
  962. case DMA_DESCRIPTOR_SPACING_128:
  963. data = DMAC_CFG_MAX_DSPACE_128_;
  964. break;
  965. default:
  966. return -EPERM;
  967. }
  968. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  969. data |= DMAC_CFG_COAL_EN_;
  970. data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
  971. data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
  972. lan743x_csr_write(adapter, DMAC_CFG, data);
  973. data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
  974. data |= DMAC_COAL_CFG_TIMER_TX_START_;
  975. data |= DMAC_COAL_CFG_FLUSH_INTS_;
  976. data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
  977. data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
  978. data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
  979. data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
  980. lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
  981. data = DMAC_OBFF_TX_THRES_SET_(0x08);
  982. data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
  983. lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
  984. return 0;
  985. }
  986. static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
  987. int tx_channel)
  988. {
  989. u32 dmac_cmd = 0;
  990. dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
  991. return DMAC_CHANNEL_STATE_SET((dmac_cmd &
  992. DMAC_CMD_START_T_(tx_channel)),
  993. (dmac_cmd &
  994. DMAC_CMD_STOP_T_(tx_channel)));
  995. }
  996. static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
  997. int tx_channel)
  998. {
  999. int timeout = 100;
  1000. int result = 0;
  1001. while (timeout &&
  1002. ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
  1003. DMAC_CHANNEL_STATE_STOP_PENDING)) {
  1004. usleep_range(1000, 20000);
  1005. timeout--;
  1006. }
  1007. if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
  1008. result = -ENODEV;
  1009. return result;
  1010. }
  1011. static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
  1012. int rx_channel)
  1013. {
  1014. u32 dmac_cmd = 0;
  1015. dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
  1016. return DMAC_CHANNEL_STATE_SET((dmac_cmd &
  1017. DMAC_CMD_START_R_(rx_channel)),
  1018. (dmac_cmd &
  1019. DMAC_CMD_STOP_R_(rx_channel)));
  1020. }
  1021. static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
  1022. int rx_channel)
  1023. {
  1024. int timeout = 100;
  1025. int result = 0;
  1026. while (timeout &&
  1027. ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
  1028. DMAC_CHANNEL_STATE_STOP_PENDING)) {
  1029. usleep_range(1000, 20000);
  1030. timeout--;
  1031. }
  1032. if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
  1033. result = -ENODEV;
  1034. return result;
  1035. }
  1036. static void lan743x_tx_release_desc(struct lan743x_tx *tx,
  1037. int descriptor_index, bool cleanup)
  1038. {
  1039. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1040. struct lan743x_tx_descriptor *descriptor = NULL;
  1041. u32 descriptor_type = 0;
  1042. bool ignore_sync;
  1043. descriptor = &tx->ring_cpu_ptr[descriptor_index];
  1044. buffer_info = &tx->buffer_info[descriptor_index];
  1045. if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
  1046. goto done;
  1047. descriptor_type = (descriptor->data0) &
  1048. TX_DESC_DATA0_DTYPE_MASK_;
  1049. if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
  1050. goto clean_up_data_descriptor;
  1051. else
  1052. goto clear_active;
  1053. clean_up_data_descriptor:
  1054. if (buffer_info->dma_ptr) {
  1055. if (buffer_info->flags &
  1056. TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
  1057. dma_unmap_page(&tx->adapter->pdev->dev,
  1058. buffer_info->dma_ptr,
  1059. buffer_info->buffer_length,
  1060. DMA_TO_DEVICE);
  1061. } else {
  1062. dma_unmap_single(&tx->adapter->pdev->dev,
  1063. buffer_info->dma_ptr,
  1064. buffer_info->buffer_length,
  1065. DMA_TO_DEVICE);
  1066. }
  1067. buffer_info->dma_ptr = 0;
  1068. buffer_info->buffer_length = 0;
  1069. }
  1070. if (!buffer_info->skb)
  1071. goto clear_active;
  1072. if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
  1073. dev_kfree_skb_any(buffer_info->skb);
  1074. goto clear_skb;
  1075. }
  1076. if (cleanup) {
  1077. lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
  1078. dev_kfree_skb_any(buffer_info->skb);
  1079. } else {
  1080. ignore_sync = (buffer_info->flags &
  1081. TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
  1082. lan743x_ptp_tx_timestamp_skb(tx->adapter,
  1083. buffer_info->skb, ignore_sync);
  1084. }
  1085. clear_skb:
  1086. buffer_info->skb = NULL;
  1087. clear_active:
  1088. buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
  1089. done:
  1090. memset(buffer_info, 0, sizeof(*buffer_info));
  1091. memset(descriptor, 0, sizeof(*descriptor));
  1092. }
  1093. static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
  1094. {
  1095. return ((++index) % tx->ring_size);
  1096. }
  1097. static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
  1098. {
  1099. while ((*tx->head_cpu_ptr) != (tx->last_head)) {
  1100. lan743x_tx_release_desc(tx, tx->last_head, false);
  1101. tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
  1102. }
  1103. }
  1104. static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
  1105. {
  1106. u32 original_head = 0;
  1107. original_head = tx->last_head;
  1108. do {
  1109. lan743x_tx_release_desc(tx, tx->last_head, true);
  1110. tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
  1111. } while (tx->last_head != original_head);
  1112. memset(tx->ring_cpu_ptr, 0,
  1113. sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
  1114. memset(tx->buffer_info, 0,
  1115. sizeof(*tx->buffer_info) * (tx->ring_size));
  1116. }
  1117. static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
  1118. struct sk_buff *skb)
  1119. {
  1120. int result = 1; /* 1 for the main skb buffer */
  1121. int nr_frags = 0;
  1122. if (skb_is_gso(skb))
  1123. result++; /* requires an extension descriptor */
  1124. nr_frags = skb_shinfo(skb)->nr_frags;
  1125. result += nr_frags; /* 1 for each fragment buffer */
  1126. return result;
  1127. }
  1128. static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
  1129. {
  1130. int last_head = tx->last_head;
  1131. int last_tail = tx->last_tail;
  1132. if (last_tail >= last_head)
  1133. return tx->ring_size - last_tail + last_head - 1;
  1134. else
  1135. return last_head - last_tail - 1;
  1136. }
  1137. void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
  1138. bool enable_timestamping,
  1139. bool enable_onestep_sync)
  1140. {
  1141. if (enable_timestamping)
  1142. tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
  1143. else
  1144. tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
  1145. if (enable_onestep_sync)
  1146. tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
  1147. else
  1148. tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
  1149. }
  1150. static int lan743x_tx_frame_start(struct lan743x_tx *tx,
  1151. unsigned char *first_buffer,
  1152. unsigned int first_buffer_length,
  1153. unsigned int frame_length,
  1154. bool time_stamp,
  1155. bool check_sum)
  1156. {
  1157. /* called only from within lan743x_tx_xmit_frame.
  1158. * assuming tx->ring_lock has already been acquired.
  1159. */
  1160. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1161. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1162. struct lan743x_adapter *adapter = tx->adapter;
  1163. struct device *dev = &adapter->pdev->dev;
  1164. dma_addr_t dma_ptr;
  1165. tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
  1166. tx->frame_first = tx->last_tail;
  1167. tx->frame_tail = tx->frame_first;
  1168. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1169. buffer_info = &tx->buffer_info[tx->frame_tail];
  1170. dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
  1171. DMA_TO_DEVICE);
  1172. if (dma_mapping_error(dev, dma_ptr))
  1173. return -ENOMEM;
  1174. tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
  1175. tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
  1176. tx_descriptor->data3 = (frame_length << 16) &
  1177. TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
  1178. buffer_info->skb = NULL;
  1179. buffer_info->dma_ptr = dma_ptr;
  1180. buffer_info->buffer_length = first_buffer_length;
  1181. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1182. tx->frame_data0 = (first_buffer_length &
  1183. TX_DESC_DATA0_BUF_LENGTH_MASK_) |
  1184. TX_DESC_DATA0_DTYPE_DATA_ |
  1185. TX_DESC_DATA0_FS_ |
  1186. TX_DESC_DATA0_FCS_;
  1187. if (time_stamp)
  1188. tx->frame_data0 |= TX_DESC_DATA0_TSE_;
  1189. if (check_sum)
  1190. tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
  1191. TX_DESC_DATA0_IPE_ |
  1192. TX_DESC_DATA0_TPE_;
  1193. /* data0 will be programmed in one of other frame assembler functions */
  1194. return 0;
  1195. }
  1196. static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
  1197. unsigned int frame_length,
  1198. int nr_frags)
  1199. {
  1200. /* called only from within lan743x_tx_xmit_frame.
  1201. * assuming tx->ring_lock has already been acquired.
  1202. */
  1203. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1204. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1205. /* wrap up previous descriptor */
  1206. tx->frame_data0 |= TX_DESC_DATA0_EXT_;
  1207. if (nr_frags <= 0) {
  1208. tx->frame_data0 |= TX_DESC_DATA0_LS_;
  1209. tx->frame_data0 |= TX_DESC_DATA0_IOC_;
  1210. }
  1211. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1212. tx_descriptor->data0 = tx->frame_data0;
  1213. /* move to next descriptor */
  1214. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1215. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1216. buffer_info = &tx->buffer_info[tx->frame_tail];
  1217. /* add extension descriptor */
  1218. tx_descriptor->data1 = 0;
  1219. tx_descriptor->data2 = 0;
  1220. tx_descriptor->data3 = 0;
  1221. buffer_info->skb = NULL;
  1222. buffer_info->dma_ptr = 0;
  1223. buffer_info->buffer_length = 0;
  1224. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1225. tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
  1226. TX_DESC_DATA0_DTYPE_EXT_ |
  1227. TX_DESC_DATA0_EXT_LSO_;
  1228. /* data0 will be programmed in one of other frame assembler functions */
  1229. }
  1230. static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
  1231. const struct skb_frag_struct *fragment,
  1232. unsigned int frame_length)
  1233. {
  1234. /* called only from within lan743x_tx_xmit_frame
  1235. * assuming tx->ring_lock has already been acquired
  1236. */
  1237. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1238. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1239. struct lan743x_adapter *adapter = tx->adapter;
  1240. struct device *dev = &adapter->pdev->dev;
  1241. unsigned int fragment_length = 0;
  1242. dma_addr_t dma_ptr;
  1243. fragment_length = skb_frag_size(fragment);
  1244. if (!fragment_length)
  1245. return 0;
  1246. /* wrap up previous descriptor */
  1247. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1248. tx_descriptor->data0 = tx->frame_data0;
  1249. /* move to next descriptor */
  1250. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1251. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1252. buffer_info = &tx->buffer_info[tx->frame_tail];
  1253. dma_ptr = skb_frag_dma_map(dev, fragment,
  1254. 0, fragment_length,
  1255. DMA_TO_DEVICE);
  1256. if (dma_mapping_error(dev, dma_ptr)) {
  1257. int desc_index;
  1258. /* cleanup all previously setup descriptors */
  1259. desc_index = tx->frame_first;
  1260. while (desc_index != tx->frame_tail) {
  1261. lan743x_tx_release_desc(tx, desc_index, true);
  1262. desc_index = lan743x_tx_next_index(tx, desc_index);
  1263. }
  1264. dma_wmb();
  1265. tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
  1266. tx->frame_first = 0;
  1267. tx->frame_data0 = 0;
  1268. tx->frame_tail = 0;
  1269. return -ENOMEM;
  1270. }
  1271. tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
  1272. tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
  1273. tx_descriptor->data3 = (frame_length << 16) &
  1274. TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
  1275. buffer_info->skb = NULL;
  1276. buffer_info->dma_ptr = dma_ptr;
  1277. buffer_info->buffer_length = fragment_length;
  1278. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1279. buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
  1280. tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
  1281. TX_DESC_DATA0_DTYPE_DATA_ |
  1282. TX_DESC_DATA0_FCS_;
  1283. /* data0 will be programmed in one of other frame assembler functions */
  1284. return 0;
  1285. }
  1286. static void lan743x_tx_frame_end(struct lan743x_tx *tx,
  1287. struct sk_buff *skb,
  1288. bool time_stamp,
  1289. bool ignore_sync)
  1290. {
  1291. /* called only from within lan743x_tx_xmit_frame
  1292. * assuming tx->ring_lock has already been acquired
  1293. */
  1294. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1295. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1296. struct lan743x_adapter *adapter = tx->adapter;
  1297. u32 tx_tail_flags = 0;
  1298. /* wrap up previous descriptor */
  1299. if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
  1300. TX_DESC_DATA0_DTYPE_DATA_) {
  1301. tx->frame_data0 |= TX_DESC_DATA0_LS_;
  1302. tx->frame_data0 |= TX_DESC_DATA0_IOC_;
  1303. }
  1304. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1305. buffer_info = &tx->buffer_info[tx->frame_tail];
  1306. buffer_info->skb = skb;
  1307. if (time_stamp)
  1308. buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
  1309. if (ignore_sync)
  1310. buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
  1311. tx_descriptor->data0 = tx->frame_data0;
  1312. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1313. tx->last_tail = tx->frame_tail;
  1314. dma_wmb();
  1315. if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
  1316. tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
  1317. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
  1318. tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
  1319. TX_TAIL_SET_TOP_INT_EN_;
  1320. lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
  1321. tx_tail_flags | tx->frame_tail);
  1322. tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
  1323. }
  1324. static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
  1325. struct sk_buff *skb)
  1326. {
  1327. int required_number_of_descriptors = 0;
  1328. unsigned int start_frame_length = 0;
  1329. unsigned int frame_length = 0;
  1330. unsigned int head_length = 0;
  1331. unsigned long irq_flags = 0;
  1332. bool do_timestamp = false;
  1333. bool ignore_sync = false;
  1334. int nr_frags = 0;
  1335. bool gso = false;
  1336. int j;
  1337. required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
  1338. spin_lock_irqsave(&tx->ring_lock, irq_flags);
  1339. if (required_number_of_descriptors >
  1340. lan743x_tx_get_avail_desc(tx)) {
  1341. if (required_number_of_descriptors > (tx->ring_size - 1)) {
  1342. dev_kfree_skb_irq(skb);
  1343. } else {
  1344. /* save to overflow buffer */
  1345. tx->overflow_skb = skb;
  1346. netif_stop_queue(tx->adapter->netdev);
  1347. }
  1348. goto unlock;
  1349. }
  1350. /* space available, transmit skb */
  1351. if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
  1352. (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
  1353. (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
  1354. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  1355. do_timestamp = true;
  1356. if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
  1357. ignore_sync = true;
  1358. }
  1359. head_length = skb_headlen(skb);
  1360. frame_length = skb_pagelen(skb);
  1361. nr_frags = skb_shinfo(skb)->nr_frags;
  1362. start_frame_length = frame_length;
  1363. gso = skb_is_gso(skb);
  1364. if (gso) {
  1365. start_frame_length = max(skb_shinfo(skb)->gso_size,
  1366. (unsigned short)8);
  1367. }
  1368. if (lan743x_tx_frame_start(tx,
  1369. skb->data, head_length,
  1370. start_frame_length,
  1371. do_timestamp,
  1372. skb->ip_summed == CHECKSUM_PARTIAL)) {
  1373. dev_kfree_skb_irq(skb);
  1374. goto unlock;
  1375. }
  1376. if (gso)
  1377. lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
  1378. if (nr_frags <= 0)
  1379. goto finish;
  1380. for (j = 0; j < nr_frags; j++) {
  1381. const struct skb_frag_struct *frag;
  1382. frag = &(skb_shinfo(skb)->frags[j]);
  1383. if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
  1384. /* upon error no need to call
  1385. * lan743x_tx_frame_end
  1386. * frame assembler clean up was performed inside
  1387. * lan743x_tx_frame_add_fragment
  1388. */
  1389. dev_kfree_skb_irq(skb);
  1390. goto unlock;
  1391. }
  1392. }
  1393. finish:
  1394. lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
  1395. unlock:
  1396. spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
  1397. return NETDEV_TX_OK;
  1398. }
  1399. static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
  1400. {
  1401. struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
  1402. struct lan743x_adapter *adapter = tx->adapter;
  1403. bool start_transmitter = false;
  1404. unsigned long irq_flags = 0;
  1405. u32 ioc_bit = 0;
  1406. u32 int_sts = 0;
  1407. ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
  1408. int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  1409. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
  1410. lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
  1411. spin_lock_irqsave(&tx->ring_lock, irq_flags);
  1412. /* clean up tx ring */
  1413. lan743x_tx_release_completed_descriptors(tx);
  1414. if (netif_queue_stopped(adapter->netdev)) {
  1415. if (tx->overflow_skb) {
  1416. if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
  1417. lan743x_tx_get_avail_desc(tx))
  1418. start_transmitter = true;
  1419. } else {
  1420. netif_wake_queue(adapter->netdev);
  1421. }
  1422. }
  1423. spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
  1424. if (start_transmitter) {
  1425. /* space is now available, transmit overflow skb */
  1426. lan743x_tx_xmit_frame(tx, tx->overflow_skb);
  1427. tx->overflow_skb = NULL;
  1428. netif_wake_queue(adapter->netdev);
  1429. }
  1430. if (!napi_complete(napi))
  1431. goto done;
  1432. /* enable isr */
  1433. lan743x_csr_write(adapter, INT_EN_SET,
  1434. INT_BIT_DMA_TX_(tx->channel_number));
  1435. lan743x_csr_read(adapter, INT_STS);
  1436. done:
  1437. return 0;
  1438. }
  1439. static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
  1440. {
  1441. if (tx->head_cpu_ptr) {
  1442. pci_free_consistent(tx->adapter->pdev,
  1443. sizeof(*tx->head_cpu_ptr),
  1444. (void *)(tx->head_cpu_ptr),
  1445. tx->head_dma_ptr);
  1446. tx->head_cpu_ptr = NULL;
  1447. tx->head_dma_ptr = 0;
  1448. }
  1449. kfree(tx->buffer_info);
  1450. tx->buffer_info = NULL;
  1451. if (tx->ring_cpu_ptr) {
  1452. pci_free_consistent(tx->adapter->pdev,
  1453. tx->ring_allocation_size,
  1454. tx->ring_cpu_ptr,
  1455. tx->ring_dma_ptr);
  1456. tx->ring_allocation_size = 0;
  1457. tx->ring_cpu_ptr = NULL;
  1458. tx->ring_dma_ptr = 0;
  1459. }
  1460. tx->ring_size = 0;
  1461. }
  1462. static int lan743x_tx_ring_init(struct lan743x_tx *tx)
  1463. {
  1464. size_t ring_allocation_size = 0;
  1465. void *cpu_ptr = NULL;
  1466. dma_addr_t dma_ptr;
  1467. int ret = -ENOMEM;
  1468. tx->ring_size = LAN743X_TX_RING_SIZE;
  1469. if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
  1470. ret = -EINVAL;
  1471. goto cleanup;
  1472. }
  1473. ring_allocation_size = ALIGN(tx->ring_size *
  1474. sizeof(struct lan743x_tx_descriptor),
  1475. PAGE_SIZE);
  1476. dma_ptr = 0;
  1477. cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
  1478. ring_allocation_size, &dma_ptr);
  1479. if (!cpu_ptr) {
  1480. ret = -ENOMEM;
  1481. goto cleanup;
  1482. }
  1483. tx->ring_allocation_size = ring_allocation_size;
  1484. tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
  1485. tx->ring_dma_ptr = dma_ptr;
  1486. cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
  1487. if (!cpu_ptr) {
  1488. ret = -ENOMEM;
  1489. goto cleanup;
  1490. }
  1491. tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
  1492. dma_ptr = 0;
  1493. cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
  1494. sizeof(*tx->head_cpu_ptr), &dma_ptr);
  1495. if (!cpu_ptr) {
  1496. ret = -ENOMEM;
  1497. goto cleanup;
  1498. }
  1499. tx->head_cpu_ptr = cpu_ptr;
  1500. tx->head_dma_ptr = dma_ptr;
  1501. if (tx->head_dma_ptr & 0x3) {
  1502. ret = -ENOMEM;
  1503. goto cleanup;
  1504. }
  1505. return 0;
  1506. cleanup:
  1507. lan743x_tx_ring_cleanup(tx);
  1508. return ret;
  1509. }
  1510. static void lan743x_tx_close(struct lan743x_tx *tx)
  1511. {
  1512. struct lan743x_adapter *adapter = tx->adapter;
  1513. lan743x_csr_write(adapter,
  1514. DMAC_CMD,
  1515. DMAC_CMD_STOP_T_(tx->channel_number));
  1516. lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
  1517. lan743x_csr_write(adapter,
  1518. DMAC_INT_EN_CLR,
  1519. DMAC_INT_BIT_TX_IOC_(tx->channel_number));
  1520. lan743x_csr_write(adapter, INT_EN_CLR,
  1521. INT_BIT_DMA_TX_(tx->channel_number));
  1522. napi_disable(&tx->napi);
  1523. netif_napi_del(&tx->napi);
  1524. lan743x_csr_write(adapter, FCT_TX_CTL,
  1525. FCT_TX_CTL_DIS_(tx->channel_number));
  1526. lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
  1527. FCT_TX_CTL_EN_(tx->channel_number),
  1528. 0, 1000, 20000, 100);
  1529. lan743x_tx_release_all_descriptors(tx);
  1530. if (tx->overflow_skb) {
  1531. dev_kfree_skb(tx->overflow_skb);
  1532. tx->overflow_skb = NULL;
  1533. }
  1534. lan743x_tx_ring_cleanup(tx);
  1535. }
  1536. static int lan743x_tx_open(struct lan743x_tx *tx)
  1537. {
  1538. struct lan743x_adapter *adapter = NULL;
  1539. u32 data = 0;
  1540. int ret;
  1541. adapter = tx->adapter;
  1542. ret = lan743x_tx_ring_init(tx);
  1543. if (ret)
  1544. return ret;
  1545. /* initialize fifo */
  1546. lan743x_csr_write(adapter, FCT_TX_CTL,
  1547. FCT_TX_CTL_RESET_(tx->channel_number));
  1548. lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
  1549. FCT_TX_CTL_RESET_(tx->channel_number),
  1550. 0, 1000, 20000, 100);
  1551. /* enable fifo */
  1552. lan743x_csr_write(adapter, FCT_TX_CTL,
  1553. FCT_TX_CTL_EN_(tx->channel_number));
  1554. /* reset tx channel */
  1555. lan743x_csr_write(adapter, DMAC_CMD,
  1556. DMAC_CMD_TX_SWR_(tx->channel_number));
  1557. lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
  1558. DMAC_CMD_TX_SWR_(tx->channel_number),
  1559. 0, 1000, 20000, 100);
  1560. /* Write TX_BASE_ADDR */
  1561. lan743x_csr_write(adapter,
  1562. TX_BASE_ADDRH(tx->channel_number),
  1563. DMA_ADDR_HIGH32(tx->ring_dma_ptr));
  1564. lan743x_csr_write(adapter,
  1565. TX_BASE_ADDRL(tx->channel_number),
  1566. DMA_ADDR_LOW32(tx->ring_dma_ptr));
  1567. /* Write TX_CFG_B */
  1568. data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
  1569. data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
  1570. data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
  1571. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  1572. data |= TX_CFG_B_TDMABL_512_;
  1573. lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
  1574. /* Write TX_CFG_A */
  1575. data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
  1576. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  1577. data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
  1578. data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
  1579. data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
  1580. data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
  1581. }
  1582. lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
  1583. /* Write TX_HEAD_WRITEBACK_ADDR */
  1584. lan743x_csr_write(adapter,
  1585. TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
  1586. DMA_ADDR_HIGH32(tx->head_dma_ptr));
  1587. lan743x_csr_write(adapter,
  1588. TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
  1589. DMA_ADDR_LOW32(tx->head_dma_ptr));
  1590. /* set last head */
  1591. tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
  1592. /* write TX_TAIL */
  1593. tx->last_tail = 0;
  1594. lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
  1595. (u32)(tx->last_tail));
  1596. tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
  1597. INT_BIT_DMA_TX_
  1598. (tx->channel_number));
  1599. netif_tx_napi_add(adapter->netdev,
  1600. &tx->napi, lan743x_tx_napi_poll,
  1601. tx->ring_size - 1);
  1602. napi_enable(&tx->napi);
  1603. data = 0;
  1604. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
  1605. data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
  1606. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
  1607. data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
  1608. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
  1609. data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
  1610. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
  1611. data |= TX_CFG_C_TX_INT_EN_R2C_;
  1612. lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
  1613. if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
  1614. lan743x_csr_write(adapter, INT_EN_SET,
  1615. INT_BIT_DMA_TX_(tx->channel_number));
  1616. lan743x_csr_write(adapter, DMAC_INT_EN_SET,
  1617. DMAC_INT_BIT_TX_IOC_(tx->channel_number));
  1618. /* start dmac channel */
  1619. lan743x_csr_write(adapter, DMAC_CMD,
  1620. DMAC_CMD_START_T_(tx->channel_number));
  1621. return 0;
  1622. }
  1623. static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
  1624. {
  1625. return ((++index) % rx->ring_size);
  1626. }
  1627. static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
  1628. {
  1629. int length = 0;
  1630. length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
  1631. return __netdev_alloc_skb(rx->adapter->netdev,
  1632. length, GFP_ATOMIC | GFP_DMA);
  1633. }
  1634. static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
  1635. struct sk_buff *skb)
  1636. {
  1637. struct lan743x_rx_buffer_info *buffer_info;
  1638. struct lan743x_rx_descriptor *descriptor;
  1639. int length = 0;
  1640. length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
  1641. descriptor = &rx->ring_cpu_ptr[index];
  1642. buffer_info = &rx->buffer_info[index];
  1643. buffer_info->skb = skb;
  1644. if (!(buffer_info->skb))
  1645. return -ENOMEM;
  1646. buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
  1647. buffer_info->skb->data,
  1648. length,
  1649. DMA_FROM_DEVICE);
  1650. if (dma_mapping_error(&rx->adapter->pdev->dev,
  1651. buffer_info->dma_ptr)) {
  1652. buffer_info->dma_ptr = 0;
  1653. return -ENOMEM;
  1654. }
  1655. buffer_info->buffer_length = length;
  1656. descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
  1657. descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
  1658. descriptor->data3 = 0;
  1659. descriptor->data0 = (RX_DESC_DATA0_OWN_ |
  1660. (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
  1661. skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
  1662. return 0;
  1663. }
  1664. static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
  1665. {
  1666. struct lan743x_rx_buffer_info *buffer_info;
  1667. struct lan743x_rx_descriptor *descriptor;
  1668. descriptor = &rx->ring_cpu_ptr[index];
  1669. buffer_info = &rx->buffer_info[index];
  1670. descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
  1671. descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
  1672. descriptor->data3 = 0;
  1673. descriptor->data0 = (RX_DESC_DATA0_OWN_ |
  1674. ((buffer_info->buffer_length) &
  1675. RX_DESC_DATA0_BUF_LENGTH_MASK_));
  1676. }
  1677. static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
  1678. {
  1679. struct lan743x_rx_buffer_info *buffer_info;
  1680. struct lan743x_rx_descriptor *descriptor;
  1681. descriptor = &rx->ring_cpu_ptr[index];
  1682. buffer_info = &rx->buffer_info[index];
  1683. memset(descriptor, 0, sizeof(*descriptor));
  1684. if (buffer_info->dma_ptr) {
  1685. dma_unmap_single(&rx->adapter->pdev->dev,
  1686. buffer_info->dma_ptr,
  1687. buffer_info->buffer_length,
  1688. DMA_FROM_DEVICE);
  1689. buffer_info->dma_ptr = 0;
  1690. }
  1691. if (buffer_info->skb) {
  1692. dev_kfree_skb(buffer_info->skb);
  1693. buffer_info->skb = NULL;
  1694. }
  1695. memset(buffer_info, 0, sizeof(*buffer_info));
  1696. }
  1697. static int lan743x_rx_process_packet(struct lan743x_rx *rx)
  1698. {
  1699. struct skb_shared_hwtstamps *hwtstamps = NULL;
  1700. int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
  1701. struct lan743x_rx_buffer_info *buffer_info;
  1702. struct lan743x_rx_descriptor *descriptor;
  1703. int current_head_index = -1;
  1704. int extension_index = -1;
  1705. int first_index = -1;
  1706. int last_index = -1;
  1707. current_head_index = *rx->head_cpu_ptr;
  1708. if (current_head_index < 0 || current_head_index >= rx->ring_size)
  1709. goto done;
  1710. if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
  1711. goto done;
  1712. if (rx->last_head != current_head_index) {
  1713. descriptor = &rx->ring_cpu_ptr[rx->last_head];
  1714. if (descriptor->data0 & RX_DESC_DATA0_OWN_)
  1715. goto done;
  1716. if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
  1717. goto done;
  1718. first_index = rx->last_head;
  1719. if (descriptor->data0 & RX_DESC_DATA0_LS_) {
  1720. last_index = rx->last_head;
  1721. } else {
  1722. int index;
  1723. index = lan743x_rx_next_index(rx, first_index);
  1724. while (index != current_head_index) {
  1725. descriptor = &rx->ring_cpu_ptr[index];
  1726. if (descriptor->data0 & RX_DESC_DATA0_OWN_)
  1727. goto done;
  1728. if (descriptor->data0 & RX_DESC_DATA0_LS_) {
  1729. last_index = index;
  1730. break;
  1731. }
  1732. index = lan743x_rx_next_index(rx, index);
  1733. }
  1734. }
  1735. if (last_index >= 0) {
  1736. descriptor = &rx->ring_cpu_ptr[last_index];
  1737. if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
  1738. /* extension is expected to follow */
  1739. int index = lan743x_rx_next_index(rx,
  1740. last_index);
  1741. if (index != current_head_index) {
  1742. descriptor = &rx->ring_cpu_ptr[index];
  1743. if (descriptor->data0 &
  1744. RX_DESC_DATA0_OWN_) {
  1745. goto done;
  1746. }
  1747. if (descriptor->data0 &
  1748. RX_DESC_DATA0_EXT_) {
  1749. extension_index = index;
  1750. } else {
  1751. goto done;
  1752. }
  1753. } else {
  1754. /* extension is not yet available */
  1755. /* prevent processing of this packet */
  1756. first_index = -1;
  1757. last_index = -1;
  1758. }
  1759. }
  1760. }
  1761. }
  1762. if (first_index >= 0 && last_index >= 0) {
  1763. int real_last_index = last_index;
  1764. struct sk_buff *skb = NULL;
  1765. u32 ts_sec = 0;
  1766. u32 ts_nsec = 0;
  1767. /* packet is available */
  1768. if (first_index == last_index) {
  1769. /* single buffer packet */
  1770. struct sk_buff *new_skb = NULL;
  1771. int packet_length;
  1772. new_skb = lan743x_rx_allocate_skb(rx);
  1773. if (!new_skb) {
  1774. /* failed to allocate next skb.
  1775. * Memory is very low.
  1776. * Drop this packet and reuse buffer.
  1777. */
  1778. lan743x_rx_reuse_ring_element(rx, first_index);
  1779. goto process_extension;
  1780. }
  1781. buffer_info = &rx->buffer_info[first_index];
  1782. skb = buffer_info->skb;
  1783. descriptor = &rx->ring_cpu_ptr[first_index];
  1784. /* unmap from dma */
  1785. if (buffer_info->dma_ptr) {
  1786. dma_unmap_single(&rx->adapter->pdev->dev,
  1787. buffer_info->dma_ptr,
  1788. buffer_info->buffer_length,
  1789. DMA_FROM_DEVICE);
  1790. buffer_info->dma_ptr = 0;
  1791. buffer_info->buffer_length = 0;
  1792. }
  1793. buffer_info->skb = NULL;
  1794. packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
  1795. (descriptor->data0);
  1796. skb_put(skb, packet_length - 4);
  1797. skb->protocol = eth_type_trans(skb,
  1798. rx->adapter->netdev);
  1799. lan743x_rx_init_ring_element(rx, first_index, new_skb);
  1800. } else {
  1801. int index = first_index;
  1802. /* multi buffer packet not supported */
  1803. /* this should not happen since
  1804. * buffers are allocated to be at least jumbo size
  1805. */
  1806. /* clean up buffers */
  1807. if (first_index <= last_index) {
  1808. while ((index >= first_index) &&
  1809. (index <= last_index)) {
  1810. lan743x_rx_reuse_ring_element(rx,
  1811. index);
  1812. index = lan743x_rx_next_index(rx,
  1813. index);
  1814. }
  1815. } else {
  1816. while ((index >= first_index) ||
  1817. (index <= last_index)) {
  1818. lan743x_rx_reuse_ring_element(rx,
  1819. index);
  1820. index = lan743x_rx_next_index(rx,
  1821. index);
  1822. }
  1823. }
  1824. }
  1825. process_extension:
  1826. if (extension_index >= 0) {
  1827. descriptor = &rx->ring_cpu_ptr[extension_index];
  1828. buffer_info = &rx->buffer_info[extension_index];
  1829. ts_sec = descriptor->data1;
  1830. ts_nsec = (descriptor->data2 &
  1831. RX_DESC_DATA2_TS_NS_MASK_);
  1832. lan743x_rx_reuse_ring_element(rx, extension_index);
  1833. real_last_index = extension_index;
  1834. }
  1835. if (!skb) {
  1836. result = RX_PROCESS_RESULT_PACKET_DROPPED;
  1837. goto move_forward;
  1838. }
  1839. if (extension_index < 0)
  1840. goto pass_packet_to_os;
  1841. hwtstamps = skb_hwtstamps(skb);
  1842. if (hwtstamps)
  1843. hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
  1844. pass_packet_to_os:
  1845. /* pass packet to OS */
  1846. napi_gro_receive(&rx->napi, skb);
  1847. result = RX_PROCESS_RESULT_PACKET_RECEIVED;
  1848. move_forward:
  1849. /* push tail and head forward */
  1850. rx->last_tail = real_last_index;
  1851. rx->last_head = lan743x_rx_next_index(rx, real_last_index);
  1852. }
  1853. done:
  1854. return result;
  1855. }
  1856. static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
  1857. {
  1858. struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
  1859. struct lan743x_adapter *adapter = rx->adapter;
  1860. u32 rx_tail_flags = 0;
  1861. int count;
  1862. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
  1863. /* clear int status bit before reading packet */
  1864. lan743x_csr_write(adapter, DMAC_INT_STS,
  1865. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  1866. }
  1867. count = 0;
  1868. while (count < weight) {
  1869. int rx_process_result = -1;
  1870. rx_process_result = lan743x_rx_process_packet(rx);
  1871. if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
  1872. count++;
  1873. } else if (rx_process_result ==
  1874. RX_PROCESS_RESULT_NOTHING_TO_DO) {
  1875. break;
  1876. } else if (rx_process_result ==
  1877. RX_PROCESS_RESULT_PACKET_DROPPED) {
  1878. continue;
  1879. }
  1880. }
  1881. rx->frame_count += count;
  1882. if (count == weight)
  1883. goto done;
  1884. if (!napi_complete_done(napi, count))
  1885. goto done;
  1886. if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
  1887. rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
  1888. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
  1889. rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
  1890. } else {
  1891. lan743x_csr_write(adapter, INT_EN_SET,
  1892. INT_BIT_DMA_RX_(rx->channel_number));
  1893. }
  1894. /* update RX_TAIL */
  1895. lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
  1896. rx_tail_flags | rx->last_tail);
  1897. done:
  1898. return count;
  1899. }
  1900. static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
  1901. {
  1902. if (rx->buffer_info && rx->ring_cpu_ptr) {
  1903. int index;
  1904. for (index = 0; index < rx->ring_size; index++)
  1905. lan743x_rx_release_ring_element(rx, index);
  1906. }
  1907. if (rx->head_cpu_ptr) {
  1908. pci_free_consistent(rx->adapter->pdev,
  1909. sizeof(*rx->head_cpu_ptr),
  1910. rx->head_cpu_ptr,
  1911. rx->head_dma_ptr);
  1912. rx->head_cpu_ptr = NULL;
  1913. rx->head_dma_ptr = 0;
  1914. }
  1915. kfree(rx->buffer_info);
  1916. rx->buffer_info = NULL;
  1917. if (rx->ring_cpu_ptr) {
  1918. pci_free_consistent(rx->adapter->pdev,
  1919. rx->ring_allocation_size,
  1920. rx->ring_cpu_ptr,
  1921. rx->ring_dma_ptr);
  1922. rx->ring_allocation_size = 0;
  1923. rx->ring_cpu_ptr = NULL;
  1924. rx->ring_dma_ptr = 0;
  1925. }
  1926. rx->ring_size = 0;
  1927. rx->last_head = 0;
  1928. }
  1929. static int lan743x_rx_ring_init(struct lan743x_rx *rx)
  1930. {
  1931. size_t ring_allocation_size = 0;
  1932. dma_addr_t dma_ptr = 0;
  1933. void *cpu_ptr = NULL;
  1934. int ret = -ENOMEM;
  1935. int index = 0;
  1936. rx->ring_size = LAN743X_RX_RING_SIZE;
  1937. if (rx->ring_size <= 1) {
  1938. ret = -EINVAL;
  1939. goto cleanup;
  1940. }
  1941. if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
  1942. ret = -EINVAL;
  1943. goto cleanup;
  1944. }
  1945. ring_allocation_size = ALIGN(rx->ring_size *
  1946. sizeof(struct lan743x_rx_descriptor),
  1947. PAGE_SIZE);
  1948. dma_ptr = 0;
  1949. cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
  1950. ring_allocation_size, &dma_ptr);
  1951. if (!cpu_ptr) {
  1952. ret = -ENOMEM;
  1953. goto cleanup;
  1954. }
  1955. rx->ring_allocation_size = ring_allocation_size;
  1956. rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
  1957. rx->ring_dma_ptr = dma_ptr;
  1958. cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
  1959. GFP_KERNEL);
  1960. if (!cpu_ptr) {
  1961. ret = -ENOMEM;
  1962. goto cleanup;
  1963. }
  1964. rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
  1965. dma_ptr = 0;
  1966. cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
  1967. sizeof(*rx->head_cpu_ptr), &dma_ptr);
  1968. if (!cpu_ptr) {
  1969. ret = -ENOMEM;
  1970. goto cleanup;
  1971. }
  1972. rx->head_cpu_ptr = cpu_ptr;
  1973. rx->head_dma_ptr = dma_ptr;
  1974. if (rx->head_dma_ptr & 0x3) {
  1975. ret = -ENOMEM;
  1976. goto cleanup;
  1977. }
  1978. rx->last_head = 0;
  1979. for (index = 0; index < rx->ring_size; index++) {
  1980. struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
  1981. ret = lan743x_rx_init_ring_element(rx, index, new_skb);
  1982. if (ret)
  1983. goto cleanup;
  1984. }
  1985. return 0;
  1986. cleanup:
  1987. lan743x_rx_ring_cleanup(rx);
  1988. return ret;
  1989. }
  1990. static void lan743x_rx_close(struct lan743x_rx *rx)
  1991. {
  1992. struct lan743x_adapter *adapter = rx->adapter;
  1993. lan743x_csr_write(adapter, FCT_RX_CTL,
  1994. FCT_RX_CTL_DIS_(rx->channel_number));
  1995. lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
  1996. FCT_RX_CTL_EN_(rx->channel_number),
  1997. 0, 1000, 20000, 100);
  1998. lan743x_csr_write(adapter, DMAC_CMD,
  1999. DMAC_CMD_STOP_R_(rx->channel_number));
  2000. lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
  2001. lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
  2002. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2003. lan743x_csr_write(adapter, INT_EN_CLR,
  2004. INT_BIT_DMA_RX_(rx->channel_number));
  2005. napi_disable(&rx->napi);
  2006. netif_napi_del(&rx->napi);
  2007. lan743x_rx_ring_cleanup(rx);
  2008. }
  2009. static int lan743x_rx_open(struct lan743x_rx *rx)
  2010. {
  2011. struct lan743x_adapter *adapter = rx->adapter;
  2012. u32 data = 0;
  2013. int ret;
  2014. rx->frame_count = 0;
  2015. ret = lan743x_rx_ring_init(rx);
  2016. if (ret)
  2017. goto return_error;
  2018. netif_napi_add(adapter->netdev,
  2019. &rx->napi, lan743x_rx_napi_poll,
  2020. rx->ring_size - 1);
  2021. lan743x_csr_write(adapter, DMAC_CMD,
  2022. DMAC_CMD_RX_SWR_(rx->channel_number));
  2023. lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
  2024. DMAC_CMD_RX_SWR_(rx->channel_number),
  2025. 0, 1000, 20000, 100);
  2026. /* set ring base address */
  2027. lan743x_csr_write(adapter,
  2028. RX_BASE_ADDRH(rx->channel_number),
  2029. DMA_ADDR_HIGH32(rx->ring_dma_ptr));
  2030. lan743x_csr_write(adapter,
  2031. RX_BASE_ADDRL(rx->channel_number),
  2032. DMA_ADDR_LOW32(rx->ring_dma_ptr));
  2033. /* set rx write back address */
  2034. lan743x_csr_write(adapter,
  2035. RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
  2036. DMA_ADDR_HIGH32(rx->head_dma_ptr));
  2037. lan743x_csr_write(adapter,
  2038. RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
  2039. DMA_ADDR_LOW32(rx->head_dma_ptr));
  2040. data = RX_CFG_A_RX_HP_WB_EN_;
  2041. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  2042. data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
  2043. RX_CFG_A_RX_WB_THRES_SET_(0x7) |
  2044. RX_CFG_A_RX_PF_THRES_SET_(16) |
  2045. RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
  2046. }
  2047. /* set RX_CFG_A */
  2048. lan743x_csr_write(adapter,
  2049. RX_CFG_A(rx->channel_number), data);
  2050. /* set RX_CFG_B */
  2051. data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
  2052. data &= ~RX_CFG_B_RX_PAD_MASK_;
  2053. if (!RX_HEAD_PADDING)
  2054. data |= RX_CFG_B_RX_PAD_0_;
  2055. else
  2056. data |= RX_CFG_B_RX_PAD_2_;
  2057. data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
  2058. data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
  2059. data |= RX_CFG_B_TS_ALL_RX_;
  2060. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  2061. data |= RX_CFG_B_RDMABL_512_;
  2062. lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
  2063. rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
  2064. INT_BIT_DMA_RX_
  2065. (rx->channel_number));
  2066. /* set RX_CFG_C */
  2067. data = 0;
  2068. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
  2069. data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
  2070. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
  2071. data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
  2072. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
  2073. data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
  2074. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
  2075. data |= RX_CFG_C_RX_INT_EN_R2C_;
  2076. lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
  2077. rx->last_tail = ((u32)(rx->ring_size - 1));
  2078. lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
  2079. rx->last_tail);
  2080. rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
  2081. if (rx->last_head) {
  2082. ret = -EIO;
  2083. goto napi_delete;
  2084. }
  2085. napi_enable(&rx->napi);
  2086. lan743x_csr_write(adapter, INT_EN_SET,
  2087. INT_BIT_DMA_RX_(rx->channel_number));
  2088. lan743x_csr_write(adapter, DMAC_INT_STS,
  2089. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2090. lan743x_csr_write(adapter, DMAC_INT_EN_SET,
  2091. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2092. lan743x_csr_write(adapter, DMAC_CMD,
  2093. DMAC_CMD_START_R_(rx->channel_number));
  2094. /* initialize fifo */
  2095. lan743x_csr_write(adapter, FCT_RX_CTL,
  2096. FCT_RX_CTL_RESET_(rx->channel_number));
  2097. lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
  2098. FCT_RX_CTL_RESET_(rx->channel_number),
  2099. 0, 1000, 20000, 100);
  2100. lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
  2101. FCT_FLOW_CTL_REQ_EN_ |
  2102. FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
  2103. FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
  2104. /* enable fifo */
  2105. lan743x_csr_write(adapter, FCT_RX_CTL,
  2106. FCT_RX_CTL_EN_(rx->channel_number));
  2107. return 0;
  2108. napi_delete:
  2109. netif_napi_del(&rx->napi);
  2110. lan743x_rx_ring_cleanup(rx);
  2111. return_error:
  2112. return ret;
  2113. }
  2114. static int lan743x_netdev_close(struct net_device *netdev)
  2115. {
  2116. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2117. int index;
  2118. lan743x_tx_close(&adapter->tx[0]);
  2119. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
  2120. lan743x_rx_close(&adapter->rx[index]);
  2121. lan743x_ptp_close(adapter);
  2122. lan743x_phy_close(adapter);
  2123. lan743x_mac_close(adapter);
  2124. lan743x_intr_close(adapter);
  2125. return 0;
  2126. }
  2127. static int lan743x_netdev_open(struct net_device *netdev)
  2128. {
  2129. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2130. int index;
  2131. int ret;
  2132. ret = lan743x_intr_open(adapter);
  2133. if (ret)
  2134. goto return_error;
  2135. ret = lan743x_mac_open(adapter);
  2136. if (ret)
  2137. goto close_intr;
  2138. ret = lan743x_phy_open(adapter);
  2139. if (ret)
  2140. goto close_mac;
  2141. ret = lan743x_ptp_open(adapter);
  2142. if (ret)
  2143. goto close_phy;
  2144. lan743x_rfe_open(adapter);
  2145. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2146. ret = lan743x_rx_open(&adapter->rx[index]);
  2147. if (ret)
  2148. goto close_rx;
  2149. }
  2150. ret = lan743x_tx_open(&adapter->tx[0]);
  2151. if (ret)
  2152. goto close_rx;
  2153. return 0;
  2154. close_rx:
  2155. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2156. if (adapter->rx[index].ring_cpu_ptr)
  2157. lan743x_rx_close(&adapter->rx[index]);
  2158. }
  2159. lan743x_ptp_close(adapter);
  2160. close_phy:
  2161. lan743x_phy_close(adapter);
  2162. close_mac:
  2163. lan743x_mac_close(adapter);
  2164. close_intr:
  2165. lan743x_intr_close(adapter);
  2166. return_error:
  2167. netif_warn(adapter, ifup, adapter->netdev,
  2168. "Error opening LAN743x\n");
  2169. return ret;
  2170. }
  2171. static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
  2172. struct net_device *netdev)
  2173. {
  2174. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2175. return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
  2176. }
  2177. static int lan743x_netdev_ioctl(struct net_device *netdev,
  2178. struct ifreq *ifr, int cmd)
  2179. {
  2180. if (!netif_running(netdev))
  2181. return -EINVAL;
  2182. if (cmd == SIOCSHWTSTAMP)
  2183. return lan743x_ptp_ioctl(netdev, ifr, cmd);
  2184. return phy_mii_ioctl(netdev->phydev, ifr, cmd);
  2185. }
  2186. static void lan743x_netdev_set_multicast(struct net_device *netdev)
  2187. {
  2188. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2189. lan743x_rfe_set_multicast(adapter);
  2190. }
  2191. static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
  2192. {
  2193. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2194. int ret = 0;
  2195. ret = lan743x_mac_set_mtu(adapter, new_mtu);
  2196. if (!ret)
  2197. netdev->mtu = new_mtu;
  2198. return ret;
  2199. }
  2200. static void lan743x_netdev_get_stats64(struct net_device *netdev,
  2201. struct rtnl_link_stats64 *stats)
  2202. {
  2203. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2204. stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
  2205. stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
  2206. stats->rx_bytes = lan743x_csr_read(adapter,
  2207. STAT_RX_UNICAST_BYTE_COUNT) +
  2208. lan743x_csr_read(adapter,
  2209. STAT_RX_BROADCAST_BYTE_COUNT) +
  2210. lan743x_csr_read(adapter,
  2211. STAT_RX_MULTICAST_BYTE_COUNT);
  2212. stats->tx_bytes = lan743x_csr_read(adapter,
  2213. STAT_TX_UNICAST_BYTE_COUNT) +
  2214. lan743x_csr_read(adapter,
  2215. STAT_TX_BROADCAST_BYTE_COUNT) +
  2216. lan743x_csr_read(adapter,
  2217. STAT_TX_MULTICAST_BYTE_COUNT);
  2218. stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
  2219. lan743x_csr_read(adapter,
  2220. STAT_RX_ALIGNMENT_ERRORS) +
  2221. lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
  2222. lan743x_csr_read(adapter,
  2223. STAT_RX_UNDERSIZE_FRAME_ERRORS) +
  2224. lan743x_csr_read(adapter,
  2225. STAT_RX_OVERSIZE_FRAME_ERRORS);
  2226. stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
  2227. lan743x_csr_read(adapter,
  2228. STAT_TX_EXCESS_DEFERRAL_ERRORS) +
  2229. lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
  2230. stats->rx_dropped = lan743x_csr_read(adapter,
  2231. STAT_RX_DROPPED_FRAMES);
  2232. stats->tx_dropped = lan743x_csr_read(adapter,
  2233. STAT_TX_EXCESSIVE_COLLISION);
  2234. stats->multicast = lan743x_csr_read(adapter,
  2235. STAT_RX_MULTICAST_FRAMES) +
  2236. lan743x_csr_read(adapter,
  2237. STAT_TX_MULTICAST_FRAMES);
  2238. stats->collisions = lan743x_csr_read(adapter,
  2239. STAT_TX_SINGLE_COLLISIONS) +
  2240. lan743x_csr_read(adapter,
  2241. STAT_TX_MULTIPLE_COLLISIONS) +
  2242. lan743x_csr_read(adapter,
  2243. STAT_TX_LATE_COLLISIONS);
  2244. }
  2245. static int lan743x_netdev_set_mac_address(struct net_device *netdev,
  2246. void *addr)
  2247. {
  2248. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2249. struct sockaddr *sock_addr = addr;
  2250. int ret;
  2251. ret = eth_prepare_mac_addr_change(netdev, sock_addr);
  2252. if (ret)
  2253. return ret;
  2254. ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
  2255. lan743x_mac_set_address(adapter, sock_addr->sa_data);
  2256. lan743x_rfe_update_mac_address(adapter);
  2257. return 0;
  2258. }
  2259. static const struct net_device_ops lan743x_netdev_ops = {
  2260. .ndo_open = lan743x_netdev_open,
  2261. .ndo_stop = lan743x_netdev_close,
  2262. .ndo_start_xmit = lan743x_netdev_xmit_frame,
  2263. .ndo_do_ioctl = lan743x_netdev_ioctl,
  2264. .ndo_set_rx_mode = lan743x_netdev_set_multicast,
  2265. .ndo_change_mtu = lan743x_netdev_change_mtu,
  2266. .ndo_get_stats64 = lan743x_netdev_get_stats64,
  2267. .ndo_set_mac_address = lan743x_netdev_set_mac_address,
  2268. };
  2269. static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
  2270. {
  2271. lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
  2272. }
  2273. static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
  2274. {
  2275. mdiobus_unregister(adapter->mdiobus);
  2276. }
  2277. static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
  2278. {
  2279. unregister_netdev(adapter->netdev);
  2280. lan743x_mdiobus_cleanup(adapter);
  2281. lan743x_hardware_cleanup(adapter);
  2282. lan743x_pci_cleanup(adapter);
  2283. }
  2284. static int lan743x_hardware_init(struct lan743x_adapter *adapter,
  2285. struct pci_dev *pdev)
  2286. {
  2287. struct lan743x_tx *tx;
  2288. int index;
  2289. int ret;
  2290. adapter->intr.irq = adapter->pdev->irq;
  2291. lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
  2292. ret = lan743x_gpio_init(adapter);
  2293. if (ret)
  2294. return ret;
  2295. ret = lan743x_mac_init(adapter);
  2296. if (ret)
  2297. return ret;
  2298. ret = lan743x_phy_init(adapter);
  2299. if (ret)
  2300. return ret;
  2301. ret = lan743x_ptp_init(adapter);
  2302. if (ret)
  2303. return ret;
  2304. lan743x_rfe_update_mac_address(adapter);
  2305. ret = lan743x_dmac_init(adapter);
  2306. if (ret)
  2307. return ret;
  2308. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2309. adapter->rx[index].adapter = adapter;
  2310. adapter->rx[index].channel_number = index;
  2311. }
  2312. tx = &adapter->tx[0];
  2313. tx->adapter = adapter;
  2314. tx->channel_number = 0;
  2315. spin_lock_init(&tx->ring_lock);
  2316. return 0;
  2317. }
  2318. static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
  2319. {
  2320. int ret;
  2321. adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
  2322. if (!(adapter->mdiobus)) {
  2323. ret = -ENOMEM;
  2324. goto return_error;
  2325. }
  2326. adapter->mdiobus->priv = (void *)adapter;
  2327. adapter->mdiobus->read = lan743x_mdiobus_read;
  2328. adapter->mdiobus->write = lan743x_mdiobus_write;
  2329. adapter->mdiobus->name = "lan743x-mdiobus";
  2330. snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
  2331. "pci-%s", pci_name(adapter->pdev));
  2332. if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
  2333. /* LAN7430 uses internal phy at address 1 */
  2334. adapter->mdiobus->phy_mask = ~(u32)BIT(1);
  2335. /* register mdiobus */
  2336. ret = mdiobus_register(adapter->mdiobus);
  2337. if (ret < 0)
  2338. goto return_error;
  2339. return 0;
  2340. return_error:
  2341. return ret;
  2342. }
  2343. /* lan743x_pcidev_probe - Device Initialization Routine
  2344. * @pdev: PCI device information struct
  2345. * @id: entry in lan743x_pci_tbl
  2346. *
  2347. * Returns 0 on success, negative on failure
  2348. *
  2349. * initializes an adapter identified by a pci_dev structure.
  2350. * The OS initialization, configuring of the adapter private structure,
  2351. * and a hardware reset occur.
  2352. **/
  2353. static int lan743x_pcidev_probe(struct pci_dev *pdev,
  2354. const struct pci_device_id *id)
  2355. {
  2356. struct lan743x_adapter *adapter = NULL;
  2357. struct net_device *netdev = NULL;
  2358. int ret = -ENODEV;
  2359. netdev = devm_alloc_etherdev(&pdev->dev,
  2360. sizeof(struct lan743x_adapter));
  2361. if (!netdev)
  2362. goto return_error;
  2363. SET_NETDEV_DEV(netdev, &pdev->dev);
  2364. pci_set_drvdata(pdev, netdev);
  2365. adapter = netdev_priv(netdev);
  2366. adapter->netdev = netdev;
  2367. adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  2368. NETIF_MSG_LINK | NETIF_MSG_IFUP |
  2369. NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
  2370. netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
  2371. ret = lan743x_pci_init(adapter, pdev);
  2372. if (ret)
  2373. goto return_error;
  2374. ret = lan743x_csr_init(adapter);
  2375. if (ret)
  2376. goto cleanup_pci;
  2377. ret = lan743x_hardware_init(adapter, pdev);
  2378. if (ret)
  2379. goto cleanup_pci;
  2380. ret = lan743x_mdiobus_init(adapter);
  2381. if (ret)
  2382. goto cleanup_hardware;
  2383. adapter->netdev->netdev_ops = &lan743x_netdev_ops;
  2384. adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
  2385. adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
  2386. adapter->netdev->hw_features = adapter->netdev->features;
  2387. /* carrier off reporting is important to ethtool even BEFORE open */
  2388. netif_carrier_off(netdev);
  2389. ret = register_netdev(adapter->netdev);
  2390. if (ret < 0)
  2391. goto cleanup_mdiobus;
  2392. return 0;
  2393. cleanup_mdiobus:
  2394. lan743x_mdiobus_cleanup(adapter);
  2395. cleanup_hardware:
  2396. lan743x_hardware_cleanup(adapter);
  2397. cleanup_pci:
  2398. lan743x_pci_cleanup(adapter);
  2399. return_error:
  2400. pr_warn("Initialization failed\n");
  2401. return ret;
  2402. }
  2403. /**
  2404. * lan743x_pcidev_remove - Device Removal Routine
  2405. * @pdev: PCI device information struct
  2406. *
  2407. * this is called by the PCI subsystem to alert the driver
  2408. * that it should release a PCI device. This could be caused by a
  2409. * Hot-Plug event, or because the driver is going to be removed from
  2410. * memory.
  2411. **/
  2412. static void lan743x_pcidev_remove(struct pci_dev *pdev)
  2413. {
  2414. struct net_device *netdev = pci_get_drvdata(pdev);
  2415. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2416. lan743x_full_cleanup(adapter);
  2417. }
  2418. static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
  2419. {
  2420. struct net_device *netdev = pci_get_drvdata(pdev);
  2421. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2422. rtnl_lock();
  2423. netif_device_detach(netdev);
  2424. /* close netdev when netdev is at running state.
  2425. * For instance, it is true when system goes to sleep by pm-suspend
  2426. * However, it is false when system goes to sleep by suspend GUI menu
  2427. */
  2428. if (netif_running(netdev))
  2429. lan743x_netdev_close(netdev);
  2430. rtnl_unlock();
  2431. #ifdef CONFIG_PM
  2432. pci_save_state(pdev);
  2433. #endif
  2434. /* clean up lan743x portion */
  2435. lan743x_hardware_cleanup(adapter);
  2436. }
  2437. #ifdef CONFIG_PM_SLEEP
  2438. static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
  2439. {
  2440. return bitrev16(crc16(0xFFFF, buf, len));
  2441. }
  2442. static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
  2443. {
  2444. const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
  2445. const u8 ipv6_multicast[3] = { 0x33, 0x33 };
  2446. const u8 arp_type[2] = { 0x08, 0x06 };
  2447. int mask_index;
  2448. u32 pmtctl;
  2449. u32 wucsr;
  2450. u32 macrx;
  2451. u16 crc;
  2452. for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
  2453. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
  2454. /* clear wake settings */
  2455. pmtctl = lan743x_csr_read(adapter, PMT_CTL);
  2456. pmtctl |= PMT_CTL_WUPS_MASK_;
  2457. pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
  2458. PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
  2459. PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
  2460. macrx = lan743x_csr_read(adapter, MAC_RX);
  2461. wucsr = 0;
  2462. mask_index = 0;
  2463. pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
  2464. if (adapter->wolopts & WAKE_PHY) {
  2465. pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
  2466. pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
  2467. }
  2468. if (adapter->wolopts & WAKE_MAGIC) {
  2469. wucsr |= MAC_WUCSR_MPEN_;
  2470. macrx |= MAC_RX_RXEN_;
  2471. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2472. }
  2473. if (adapter->wolopts & WAKE_UCAST) {
  2474. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
  2475. macrx |= MAC_RX_RXEN_;
  2476. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2477. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2478. }
  2479. if (adapter->wolopts & WAKE_BCAST) {
  2480. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
  2481. macrx |= MAC_RX_RXEN_;
  2482. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2483. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2484. }
  2485. if (adapter->wolopts & WAKE_MCAST) {
  2486. /* IPv4 multicast */
  2487. crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
  2488. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
  2489. MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
  2490. (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
  2491. (crc & MAC_WUF_CFG_CRC16_MASK_));
  2492. lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
  2493. lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
  2494. lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
  2495. lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
  2496. mask_index++;
  2497. /* IPv6 multicast */
  2498. crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
  2499. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
  2500. MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
  2501. (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
  2502. (crc & MAC_WUF_CFG_CRC16_MASK_));
  2503. lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
  2504. lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
  2505. lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
  2506. lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
  2507. mask_index++;
  2508. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
  2509. macrx |= MAC_RX_RXEN_;
  2510. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2511. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2512. }
  2513. if (adapter->wolopts & WAKE_ARP) {
  2514. /* set MAC_WUF_CFG & WUF_MASK
  2515. * for packettype (offset 12,13) = ARP (0x0806)
  2516. */
  2517. crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
  2518. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
  2519. MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
  2520. (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
  2521. (crc & MAC_WUF_CFG_CRC16_MASK_));
  2522. lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
  2523. lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
  2524. lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
  2525. lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
  2526. mask_index++;
  2527. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
  2528. macrx |= MAC_RX_RXEN_;
  2529. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2530. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2531. }
  2532. lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
  2533. lan743x_csr_write(adapter, PMT_CTL, pmtctl);
  2534. lan743x_csr_write(adapter, MAC_RX, macrx);
  2535. }
  2536. static int lan743x_pm_suspend(struct device *dev)
  2537. {
  2538. struct pci_dev *pdev = to_pci_dev(dev);
  2539. struct net_device *netdev = pci_get_drvdata(pdev);
  2540. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2541. int ret;
  2542. lan743x_pcidev_shutdown(pdev);
  2543. /* clear all wakes */
  2544. lan743x_csr_write(adapter, MAC_WUCSR, 0);
  2545. lan743x_csr_write(adapter, MAC_WUCSR2, 0);
  2546. lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
  2547. if (adapter->wolopts)
  2548. lan743x_pm_set_wol(adapter);
  2549. /* Host sets PME_En, put D3hot */
  2550. ret = pci_prepare_to_sleep(pdev);
  2551. return 0;
  2552. }
  2553. static int lan743x_pm_resume(struct device *dev)
  2554. {
  2555. struct pci_dev *pdev = to_pci_dev(dev);
  2556. struct net_device *netdev = pci_get_drvdata(pdev);
  2557. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2558. int ret;
  2559. pci_set_power_state(pdev, PCI_D0);
  2560. pci_restore_state(pdev);
  2561. pci_save_state(pdev);
  2562. ret = lan743x_hardware_init(adapter, pdev);
  2563. if (ret) {
  2564. netif_err(adapter, probe, adapter->netdev,
  2565. "lan743x_hardware_init returned %d\n", ret);
  2566. }
  2567. /* open netdev when netdev is at running state while resume.
  2568. * For instance, it is true when system wakesup after pm-suspend
  2569. * However, it is false when system wakes up after suspend GUI menu
  2570. */
  2571. if (netif_running(netdev))
  2572. lan743x_netdev_open(netdev);
  2573. netif_device_attach(netdev);
  2574. return 0;
  2575. }
  2576. static const struct dev_pm_ops lan743x_pm_ops = {
  2577. SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
  2578. };
  2579. #endif /* CONFIG_PM_SLEEP */
  2580. static const struct pci_device_id lan743x_pcidev_tbl[] = {
  2581. { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
  2582. { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
  2583. { 0, }
  2584. };
  2585. static struct pci_driver lan743x_pcidev_driver = {
  2586. .name = DRIVER_NAME,
  2587. .id_table = lan743x_pcidev_tbl,
  2588. .probe = lan743x_pcidev_probe,
  2589. .remove = lan743x_pcidev_remove,
  2590. #ifdef CONFIG_PM_SLEEP
  2591. .driver.pm = &lan743x_pm_ops,
  2592. #endif
  2593. .shutdown = lan743x_pcidev_shutdown,
  2594. };
  2595. module_pci_driver(lan743x_pcidev_driver);
  2596. MODULE_AUTHOR(DRIVER_AUTHOR);
  2597. MODULE_DESCRIPTION(DRIVER_DESC);
  2598. MODULE_LICENSE("GPL");