ixgbe_ethtool.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* ethtool support for ixgbe */
  4. #include <linux/interrupt.h>
  5. #include <linux/types.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/highmem.h>
  13. #include <linux/uaccess.h>
  14. #include "ixgbe.h"
  15. #include "ixgbe_phy.h"
  16. #define IXGBE_ALL_RAR_ENTRIES 16
  17. enum {NETDEV_STATS, IXGBE_STATS};
  18. struct ixgbe_stats {
  19. char stat_string[ETH_GSTRING_LEN];
  20. int type;
  21. int sizeof_stat;
  22. int stat_offset;
  23. };
  24. #define IXGBE_STAT(m) IXGBE_STATS, \
  25. sizeof(((struct ixgbe_adapter *)0)->m), \
  26. offsetof(struct ixgbe_adapter, m)
  27. #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
  28. sizeof(((struct rtnl_link_stats64 *)0)->m), \
  29. offsetof(struct rtnl_link_stats64, m)
  30. static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
  31. {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
  32. {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
  33. {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
  34. {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
  35. {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
  36. {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
  37. {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
  38. {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
  39. {"lsc_int", IXGBE_STAT(lsc_int)},
  40. {"tx_busy", IXGBE_STAT(tx_busy)},
  41. {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
  42. {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
  43. {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
  44. {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
  45. {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
  46. {"multicast", IXGBE_NETDEV_STAT(multicast)},
  47. {"broadcast", IXGBE_STAT(stats.bprc)},
  48. {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
  49. {"collisions", IXGBE_NETDEV_STAT(collisions)},
  50. {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
  51. {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
  52. {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
  53. {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
  54. {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
  55. {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
  56. {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
  57. {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
  58. {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
  59. {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
  60. {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
  61. {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
  62. {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
  63. {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
  64. {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
  65. {"tx_restart_queue", IXGBE_STAT(restart_queue)},
  66. {"rx_length_errors", IXGBE_STAT(stats.rlec)},
  67. {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
  68. {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
  69. {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
  70. {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
  71. {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
  72. {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
  73. {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
  74. {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
  75. {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
  76. {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
  77. {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
  78. {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
  79. {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
  80. {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
  81. {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
  82. {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
  83. {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
  84. {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
  85. {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
  86. {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
  87. #ifdef IXGBE_FCOE
  88. {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
  89. {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
  90. {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
  91. {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
  92. {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
  93. {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
  94. {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
  95. {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
  96. #endif /* IXGBE_FCOE */
  97. };
  98. /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
  99. * we set the num_rx_queues to evaluate to num_tx_queues. This is
  100. * used because we do not have a good way to get the max number of
  101. * rx queues with CONFIG_RPS disabled.
  102. */
  103. #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
  104. #define IXGBE_QUEUE_STATS_LEN ( \
  105. (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
  106. (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
  107. #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
  108. #define IXGBE_PB_STATS_LEN ( \
  109. (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
  110. sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
  111. sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
  112. sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
  113. / sizeof(u64))
  114. #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
  115. IXGBE_PB_STATS_LEN + \
  116. IXGBE_QUEUE_STATS_LEN)
  117. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  118. "Register test (offline)", "Eeprom test (offline)",
  119. "Interrupt test (offline)", "Loopback test (offline)",
  120. "Link test (on/offline)"
  121. };
  122. #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
  123. static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
  124. #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
  125. "legacy-rx",
  126. };
  127. #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
  128. /* currently supported speeds for 10G */
  129. #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
  130. SUPPORTED_10000baseKX4_Full | \
  131. SUPPORTED_10000baseKR_Full)
  132. #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
  133. static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
  134. {
  135. if (!ixgbe_isbackplane(hw->phy.media_type))
  136. return SUPPORTED_10000baseT_Full;
  137. switch (hw->device_id) {
  138. case IXGBE_DEV_ID_82598:
  139. case IXGBE_DEV_ID_82599_KX4:
  140. case IXGBE_DEV_ID_82599_KX4_MEZZ:
  141. case IXGBE_DEV_ID_X550EM_X_KX4:
  142. return SUPPORTED_10000baseKX4_Full;
  143. case IXGBE_DEV_ID_82598_BX:
  144. case IXGBE_DEV_ID_82599_KR:
  145. case IXGBE_DEV_ID_X550EM_X_KR:
  146. case IXGBE_DEV_ID_X550EM_X_XFI:
  147. return SUPPORTED_10000baseKR_Full;
  148. default:
  149. return SUPPORTED_10000baseKX4_Full |
  150. SUPPORTED_10000baseKR_Full;
  151. }
  152. }
  153. static int ixgbe_get_link_ksettings(struct net_device *netdev,
  154. struct ethtool_link_ksettings *cmd)
  155. {
  156. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  157. struct ixgbe_hw *hw = &adapter->hw;
  158. ixgbe_link_speed supported_link;
  159. bool autoneg = false;
  160. u32 supported, advertising;
  161. ethtool_convert_link_mode_to_legacy_u32(&supported,
  162. cmd->link_modes.supported);
  163. hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
  164. /* set the supported link speeds */
  165. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  166. supported |= ixgbe_get_supported_10gtypes(hw);
  167. if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
  168. supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
  169. SUPPORTED_1000baseKX_Full :
  170. SUPPORTED_1000baseT_Full;
  171. if (supported_link & IXGBE_LINK_SPEED_100_FULL)
  172. supported |= SUPPORTED_100baseT_Full;
  173. if (supported_link & IXGBE_LINK_SPEED_10_FULL)
  174. supported |= SUPPORTED_10baseT_Full;
  175. /* default advertised speed if phy.autoneg_advertised isn't set */
  176. advertising = supported;
  177. /* set the advertised speeds */
  178. if (hw->phy.autoneg_advertised) {
  179. advertising = 0;
  180. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
  181. advertising |= ADVERTISED_10baseT_Full;
  182. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
  183. advertising |= ADVERTISED_100baseT_Full;
  184. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
  185. advertising |= supported & ADVRTSD_MSK_10G;
  186. if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
  187. if (supported & SUPPORTED_1000baseKX_Full)
  188. advertising |= ADVERTISED_1000baseKX_Full;
  189. else
  190. advertising |= ADVERTISED_1000baseT_Full;
  191. }
  192. } else {
  193. if (hw->phy.multispeed_fiber && !autoneg) {
  194. if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
  195. advertising = ADVERTISED_10000baseT_Full;
  196. }
  197. }
  198. if (autoneg) {
  199. supported |= SUPPORTED_Autoneg;
  200. advertising |= ADVERTISED_Autoneg;
  201. cmd->base.autoneg = AUTONEG_ENABLE;
  202. } else
  203. cmd->base.autoneg = AUTONEG_DISABLE;
  204. /* Determine the remaining settings based on the PHY type. */
  205. switch (adapter->hw.phy.type) {
  206. case ixgbe_phy_tn:
  207. case ixgbe_phy_aq:
  208. case ixgbe_phy_x550em_ext_t:
  209. case ixgbe_phy_fw:
  210. case ixgbe_phy_cu_unknown:
  211. supported |= SUPPORTED_TP;
  212. advertising |= ADVERTISED_TP;
  213. cmd->base.port = PORT_TP;
  214. break;
  215. case ixgbe_phy_qt:
  216. supported |= SUPPORTED_FIBRE;
  217. advertising |= ADVERTISED_FIBRE;
  218. cmd->base.port = PORT_FIBRE;
  219. break;
  220. case ixgbe_phy_nl:
  221. case ixgbe_phy_sfp_passive_tyco:
  222. case ixgbe_phy_sfp_passive_unknown:
  223. case ixgbe_phy_sfp_ftl:
  224. case ixgbe_phy_sfp_avago:
  225. case ixgbe_phy_sfp_intel:
  226. case ixgbe_phy_sfp_unknown:
  227. case ixgbe_phy_qsfp_passive_unknown:
  228. case ixgbe_phy_qsfp_active_unknown:
  229. case ixgbe_phy_qsfp_intel:
  230. case ixgbe_phy_qsfp_unknown:
  231. /* SFP+ devices, further checking needed */
  232. switch (adapter->hw.phy.sfp_type) {
  233. case ixgbe_sfp_type_da_cu:
  234. case ixgbe_sfp_type_da_cu_core0:
  235. case ixgbe_sfp_type_da_cu_core1:
  236. supported |= SUPPORTED_FIBRE;
  237. advertising |= ADVERTISED_FIBRE;
  238. cmd->base.port = PORT_DA;
  239. break;
  240. case ixgbe_sfp_type_sr:
  241. case ixgbe_sfp_type_lr:
  242. case ixgbe_sfp_type_srlr_core0:
  243. case ixgbe_sfp_type_srlr_core1:
  244. case ixgbe_sfp_type_1g_sx_core0:
  245. case ixgbe_sfp_type_1g_sx_core1:
  246. case ixgbe_sfp_type_1g_lx_core0:
  247. case ixgbe_sfp_type_1g_lx_core1:
  248. supported |= SUPPORTED_FIBRE;
  249. advertising |= ADVERTISED_FIBRE;
  250. cmd->base.port = PORT_FIBRE;
  251. break;
  252. case ixgbe_sfp_type_not_present:
  253. supported |= SUPPORTED_FIBRE;
  254. advertising |= ADVERTISED_FIBRE;
  255. cmd->base.port = PORT_NONE;
  256. break;
  257. case ixgbe_sfp_type_1g_cu_core0:
  258. case ixgbe_sfp_type_1g_cu_core1:
  259. supported |= SUPPORTED_TP;
  260. advertising |= ADVERTISED_TP;
  261. cmd->base.port = PORT_TP;
  262. break;
  263. case ixgbe_sfp_type_unknown:
  264. default:
  265. supported |= SUPPORTED_FIBRE;
  266. advertising |= ADVERTISED_FIBRE;
  267. cmd->base.port = PORT_OTHER;
  268. break;
  269. }
  270. break;
  271. case ixgbe_phy_xaui:
  272. supported |= SUPPORTED_FIBRE;
  273. advertising |= ADVERTISED_FIBRE;
  274. cmd->base.port = PORT_NONE;
  275. break;
  276. case ixgbe_phy_unknown:
  277. case ixgbe_phy_generic:
  278. case ixgbe_phy_sfp_unsupported:
  279. default:
  280. supported |= SUPPORTED_FIBRE;
  281. advertising |= ADVERTISED_FIBRE;
  282. cmd->base.port = PORT_OTHER;
  283. break;
  284. }
  285. /* Indicate pause support */
  286. supported |= SUPPORTED_Pause;
  287. switch (hw->fc.requested_mode) {
  288. case ixgbe_fc_full:
  289. advertising |= ADVERTISED_Pause;
  290. break;
  291. case ixgbe_fc_rx_pause:
  292. advertising |= ADVERTISED_Pause |
  293. ADVERTISED_Asym_Pause;
  294. break;
  295. case ixgbe_fc_tx_pause:
  296. advertising |= ADVERTISED_Asym_Pause;
  297. break;
  298. default:
  299. advertising &= ~(ADVERTISED_Pause |
  300. ADVERTISED_Asym_Pause);
  301. }
  302. if (netif_carrier_ok(netdev)) {
  303. switch (adapter->link_speed) {
  304. case IXGBE_LINK_SPEED_10GB_FULL:
  305. cmd->base.speed = SPEED_10000;
  306. break;
  307. case IXGBE_LINK_SPEED_5GB_FULL:
  308. cmd->base.speed = SPEED_5000;
  309. break;
  310. case IXGBE_LINK_SPEED_2_5GB_FULL:
  311. cmd->base.speed = SPEED_2500;
  312. break;
  313. case IXGBE_LINK_SPEED_1GB_FULL:
  314. cmd->base.speed = SPEED_1000;
  315. break;
  316. case IXGBE_LINK_SPEED_100_FULL:
  317. cmd->base.speed = SPEED_100;
  318. break;
  319. case IXGBE_LINK_SPEED_10_FULL:
  320. cmd->base.speed = SPEED_10;
  321. break;
  322. default:
  323. break;
  324. }
  325. cmd->base.duplex = DUPLEX_FULL;
  326. } else {
  327. cmd->base.speed = SPEED_UNKNOWN;
  328. cmd->base.duplex = DUPLEX_UNKNOWN;
  329. }
  330. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  331. supported);
  332. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  333. advertising);
  334. return 0;
  335. }
  336. static int ixgbe_set_link_ksettings(struct net_device *netdev,
  337. const struct ethtool_link_ksettings *cmd)
  338. {
  339. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  340. struct ixgbe_hw *hw = &adapter->hw;
  341. u32 advertised, old;
  342. s32 err = 0;
  343. u32 supported, advertising;
  344. ethtool_convert_link_mode_to_legacy_u32(&supported,
  345. cmd->link_modes.supported);
  346. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  347. cmd->link_modes.advertising);
  348. if ((hw->phy.media_type == ixgbe_media_type_copper) ||
  349. (hw->phy.multispeed_fiber)) {
  350. /*
  351. * this function does not support duplex forcing, but can
  352. * limit the advertising of the adapter to the specified speed
  353. */
  354. if (advertising & ~supported)
  355. return -EINVAL;
  356. /* only allow one speed at a time if no autoneg */
  357. if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
  358. if (advertising ==
  359. (ADVERTISED_10000baseT_Full |
  360. ADVERTISED_1000baseT_Full))
  361. return -EINVAL;
  362. }
  363. old = hw->phy.autoneg_advertised;
  364. advertised = 0;
  365. if (advertising & ADVERTISED_10000baseT_Full)
  366. advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  367. if (advertising & ADVERTISED_1000baseT_Full)
  368. advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  369. if (advertising & ADVERTISED_100baseT_Full)
  370. advertised |= IXGBE_LINK_SPEED_100_FULL;
  371. if (advertising & ADVERTISED_10baseT_Full)
  372. advertised |= IXGBE_LINK_SPEED_10_FULL;
  373. if (old == advertised)
  374. return err;
  375. /* this sets the link speed and restarts auto-neg */
  376. while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  377. usleep_range(1000, 2000);
  378. hw->mac.autotry_restart = true;
  379. err = hw->mac.ops.setup_link(hw, advertised, true);
  380. if (err) {
  381. e_info(probe, "setup link failed with code %d\n", err);
  382. hw->mac.ops.setup_link(hw, old, true);
  383. }
  384. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  385. } else {
  386. /* in this case we currently only support 10Gb/FULL */
  387. u32 speed = cmd->base.speed;
  388. if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
  389. (advertising != ADVERTISED_10000baseT_Full) ||
  390. (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
  391. return -EINVAL;
  392. }
  393. return err;
  394. }
  395. static void ixgbe_get_pauseparam(struct net_device *netdev,
  396. struct ethtool_pauseparam *pause)
  397. {
  398. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  399. struct ixgbe_hw *hw = &adapter->hw;
  400. if (ixgbe_device_supports_autoneg_fc(hw) &&
  401. !hw->fc.disable_fc_autoneg)
  402. pause->autoneg = 1;
  403. else
  404. pause->autoneg = 0;
  405. if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
  406. pause->rx_pause = 1;
  407. } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
  408. pause->tx_pause = 1;
  409. } else if (hw->fc.current_mode == ixgbe_fc_full) {
  410. pause->rx_pause = 1;
  411. pause->tx_pause = 1;
  412. }
  413. }
  414. static int ixgbe_set_pauseparam(struct net_device *netdev,
  415. struct ethtool_pauseparam *pause)
  416. {
  417. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  418. struct ixgbe_hw *hw = &adapter->hw;
  419. struct ixgbe_fc_info fc = hw->fc;
  420. /* 82598 does no support link flow control with DCB enabled */
  421. if ((hw->mac.type == ixgbe_mac_82598EB) &&
  422. (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
  423. return -EINVAL;
  424. /* some devices do not support autoneg of link flow control */
  425. if ((pause->autoneg == AUTONEG_ENABLE) &&
  426. !ixgbe_device_supports_autoneg_fc(hw))
  427. return -EINVAL;
  428. fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
  429. if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
  430. fc.requested_mode = ixgbe_fc_full;
  431. else if (pause->rx_pause && !pause->tx_pause)
  432. fc.requested_mode = ixgbe_fc_rx_pause;
  433. else if (!pause->rx_pause && pause->tx_pause)
  434. fc.requested_mode = ixgbe_fc_tx_pause;
  435. else
  436. fc.requested_mode = ixgbe_fc_none;
  437. /* if the thing changed then we'll update and use new autoneg */
  438. if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
  439. hw->fc = fc;
  440. if (netif_running(netdev))
  441. ixgbe_reinit_locked(adapter);
  442. else
  443. ixgbe_reset(adapter);
  444. }
  445. return 0;
  446. }
  447. static u32 ixgbe_get_msglevel(struct net_device *netdev)
  448. {
  449. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  450. return adapter->msg_enable;
  451. }
  452. static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
  453. {
  454. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  455. adapter->msg_enable = data;
  456. }
  457. static int ixgbe_get_regs_len(struct net_device *netdev)
  458. {
  459. #define IXGBE_REGS_LEN 1145
  460. return IXGBE_REGS_LEN * sizeof(u32);
  461. }
  462. #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
  463. static void ixgbe_get_regs(struct net_device *netdev,
  464. struct ethtool_regs *regs, void *p)
  465. {
  466. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  467. struct ixgbe_hw *hw = &adapter->hw;
  468. u32 *regs_buff = p;
  469. u8 i;
  470. memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
  471. regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
  472. hw->device_id;
  473. /* General Registers */
  474. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
  475. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
  476. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  477. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
  478. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
  479. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  480. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
  481. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
  482. /* NVM Register */
  483. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
  484. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
  485. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
  486. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
  487. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
  488. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
  489. regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
  490. regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
  491. regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
  492. regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
  493. /* Interrupt */
  494. /* don't read EICR because it can clear interrupt causes, instead
  495. * read EICS which is a shadow but doesn't clear EICR */
  496. regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
  497. regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
  498. regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
  499. regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
  500. regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
  501. regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
  502. regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
  503. regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
  504. regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
  505. regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
  506. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
  507. regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
  508. /* Flow Control */
  509. regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
  510. for (i = 0; i < 4; i++)
  511. regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
  512. for (i = 0; i < 8; i++) {
  513. switch (hw->mac.type) {
  514. case ixgbe_mac_82598EB:
  515. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
  516. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
  517. break;
  518. case ixgbe_mac_82599EB:
  519. case ixgbe_mac_X540:
  520. case ixgbe_mac_X550:
  521. case ixgbe_mac_X550EM_x:
  522. case ixgbe_mac_x550em_a:
  523. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
  524. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
  525. break;
  526. default:
  527. break;
  528. }
  529. }
  530. regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
  531. regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
  532. /* Receive DMA */
  533. for (i = 0; i < 64; i++)
  534. regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  535. for (i = 0; i < 64; i++)
  536. regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  537. for (i = 0; i < 64; i++)
  538. regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  539. for (i = 0; i < 64; i++)
  540. regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  541. for (i = 0; i < 64; i++)
  542. regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  543. for (i = 0; i < 64; i++)
  544. regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  545. for (i = 0; i < 16; i++)
  546. regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  547. for (i = 0; i < 16; i++)
  548. regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  549. regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  550. for (i = 0; i < 8; i++)
  551. regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
  552. regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  553. regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
  554. /* Receive */
  555. regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  556. regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  557. for (i = 0; i < 16; i++)
  558. regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
  559. for (i = 0; i < 16; i++)
  560. regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
  561. regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
  562. regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  563. regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  564. regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
  565. regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
  566. regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  567. for (i = 0; i < 8; i++)
  568. regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
  569. for (i = 0; i < 8; i++)
  570. regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
  571. regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
  572. /* Transmit */
  573. for (i = 0; i < 32; i++)
  574. regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  575. for (i = 0; i < 32; i++)
  576. regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  577. for (i = 0; i < 32; i++)
  578. regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  579. for (i = 0; i < 32; i++)
  580. regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  581. for (i = 0; i < 32; i++)
  582. regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  583. for (i = 0; i < 32; i++)
  584. regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  585. for (i = 0; i < 32; i++)
  586. regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
  587. for (i = 0; i < 32; i++)
  588. regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
  589. regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
  590. for (i = 0; i < 16; i++)
  591. regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
  592. regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
  593. for (i = 0; i < 8; i++)
  594. regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
  595. regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
  596. /* Wake Up */
  597. regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
  598. regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
  599. regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
  600. regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
  601. regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
  602. regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
  603. regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
  604. regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
  605. regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
  606. /* DCB */
  607. regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
  608. regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
  609. switch (hw->mac.type) {
  610. case ixgbe_mac_82598EB:
  611. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
  612. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
  613. for (i = 0; i < 8; i++)
  614. regs_buff[833 + i] =
  615. IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
  616. for (i = 0; i < 8; i++)
  617. regs_buff[841 + i] =
  618. IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
  619. for (i = 0; i < 8; i++)
  620. regs_buff[849 + i] =
  621. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
  622. for (i = 0; i < 8; i++)
  623. regs_buff[857 + i] =
  624. IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
  625. break;
  626. case ixgbe_mac_82599EB:
  627. case ixgbe_mac_X540:
  628. case ixgbe_mac_X550:
  629. case ixgbe_mac_X550EM_x:
  630. case ixgbe_mac_x550em_a:
  631. regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  632. regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
  633. for (i = 0; i < 8; i++)
  634. regs_buff[833 + i] =
  635. IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
  636. for (i = 0; i < 8; i++)
  637. regs_buff[841 + i] =
  638. IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
  639. for (i = 0; i < 8; i++)
  640. regs_buff[849 + i] =
  641. IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
  642. for (i = 0; i < 8; i++)
  643. regs_buff[857 + i] =
  644. IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
  645. break;
  646. default:
  647. break;
  648. }
  649. for (i = 0; i < 8; i++)
  650. regs_buff[865 + i] =
  651. IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
  652. for (i = 0; i < 8; i++)
  653. regs_buff[873 + i] =
  654. IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
  655. /* Statistics */
  656. regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
  657. regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
  658. regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
  659. regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
  660. for (i = 0; i < 8; i++)
  661. regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
  662. regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
  663. regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
  664. regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
  665. regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
  666. regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
  667. regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
  668. regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
  669. for (i = 0; i < 8; i++)
  670. regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
  671. for (i = 0; i < 8; i++)
  672. regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
  673. for (i = 0; i < 8; i++)
  674. regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
  675. for (i = 0; i < 8; i++)
  676. regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
  677. regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
  678. regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
  679. regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
  680. regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
  681. regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
  682. regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
  683. regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
  684. regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
  685. regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
  686. regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
  687. regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
  688. regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
  689. regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
  690. regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
  691. for (i = 0; i < 8; i++)
  692. regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
  693. regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
  694. regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
  695. regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
  696. regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
  697. regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
  698. regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
  699. regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
  700. regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
  701. regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
  702. regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
  703. regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
  704. regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
  705. regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
  706. regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
  707. regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
  708. regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
  709. regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
  710. regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
  711. regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
  712. regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
  713. for (i = 0; i < 16; i++)
  714. regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
  715. for (i = 0; i < 16; i++)
  716. regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
  717. for (i = 0; i < 16; i++)
  718. regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
  719. for (i = 0; i < 16; i++)
  720. regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
  721. /* MAC */
  722. regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
  723. regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
  724. regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
  725. regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
  726. regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
  727. regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
  728. regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
  729. regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
  730. regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
  731. regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  732. regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
  733. regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
  734. regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
  735. regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
  736. regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
  737. regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
  738. regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
  739. regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
  740. regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
  741. regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
  742. regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
  743. regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
  744. regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
  745. regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
  746. regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
  747. regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
  748. regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  749. regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
  750. regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  751. regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
  752. regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  753. regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
  754. regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
  755. /* Diagnostic */
  756. regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
  757. for (i = 0; i < 8; i++)
  758. regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
  759. regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
  760. for (i = 0; i < 4; i++)
  761. regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
  762. regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
  763. regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
  764. for (i = 0; i < 8; i++)
  765. regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
  766. regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
  767. for (i = 0; i < 4; i++)
  768. regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
  769. regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
  770. regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
  771. for (i = 0; i < 4; i++)
  772. regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
  773. regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
  774. for (i = 0; i < 4; i++)
  775. regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
  776. for (i = 0; i < 8; i++)
  777. regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
  778. regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
  779. regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
  780. regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
  781. regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
  782. regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
  783. regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
  784. regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
  785. regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
  786. regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
  787. /* 82599 X540 specific registers */
  788. regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  789. /* 82599 X540 specific DCB registers */
  790. regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
  791. regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
  792. for (i = 0; i < 4; i++)
  793. regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
  794. regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
  795. /* same as RTTQCNRM */
  796. regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
  797. /* same as RTTQCNRR */
  798. /* X540 specific DCB registers */
  799. regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
  800. regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
  801. /* Security config registers */
  802. regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  803. regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
  804. regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  805. regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  806. regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  807. regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
  808. }
  809. static int ixgbe_get_eeprom_len(struct net_device *netdev)
  810. {
  811. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  812. return adapter->hw.eeprom.word_size * 2;
  813. }
  814. static int ixgbe_get_eeprom(struct net_device *netdev,
  815. struct ethtool_eeprom *eeprom, u8 *bytes)
  816. {
  817. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  818. struct ixgbe_hw *hw = &adapter->hw;
  819. u16 *eeprom_buff;
  820. int first_word, last_word, eeprom_len;
  821. int ret_val = 0;
  822. u16 i;
  823. if (eeprom->len == 0)
  824. return -EINVAL;
  825. eeprom->magic = hw->vendor_id | (hw->device_id << 16);
  826. first_word = eeprom->offset >> 1;
  827. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  828. eeprom_len = last_word - first_word + 1;
  829. eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
  830. if (!eeprom_buff)
  831. return -ENOMEM;
  832. ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
  833. eeprom_buff);
  834. /* Device's eeprom is always little-endian, word addressable */
  835. for (i = 0; i < eeprom_len; i++)
  836. le16_to_cpus(&eeprom_buff[i]);
  837. memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
  838. kfree(eeprom_buff);
  839. return ret_val;
  840. }
  841. static int ixgbe_set_eeprom(struct net_device *netdev,
  842. struct ethtool_eeprom *eeprom, u8 *bytes)
  843. {
  844. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  845. struct ixgbe_hw *hw = &adapter->hw;
  846. u16 *eeprom_buff;
  847. void *ptr;
  848. int max_len, first_word, last_word, ret_val = 0;
  849. u16 i;
  850. if (eeprom->len == 0)
  851. return -EINVAL;
  852. if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
  853. return -EINVAL;
  854. max_len = hw->eeprom.word_size * 2;
  855. first_word = eeprom->offset >> 1;
  856. last_word = (eeprom->offset + eeprom->len - 1) >> 1;
  857. eeprom_buff = kmalloc(max_len, GFP_KERNEL);
  858. if (!eeprom_buff)
  859. return -ENOMEM;
  860. ptr = eeprom_buff;
  861. if (eeprom->offset & 1) {
  862. /*
  863. * need read/modify/write of first changed EEPROM word
  864. * only the second byte of the word is being modified
  865. */
  866. ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
  867. if (ret_val)
  868. goto err;
  869. ptr++;
  870. }
  871. if ((eeprom->offset + eeprom->len) & 1) {
  872. /*
  873. * need read/modify/write of last changed EEPROM word
  874. * only the first byte of the word is being modified
  875. */
  876. ret_val = hw->eeprom.ops.read(hw, last_word,
  877. &eeprom_buff[last_word - first_word]);
  878. if (ret_val)
  879. goto err;
  880. }
  881. /* Device's eeprom is always little-endian, word addressable */
  882. for (i = 0; i < last_word - first_word + 1; i++)
  883. le16_to_cpus(&eeprom_buff[i]);
  884. memcpy(ptr, bytes, eeprom->len);
  885. for (i = 0; i < last_word - first_word + 1; i++)
  886. cpu_to_le16s(&eeprom_buff[i]);
  887. ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
  888. last_word - first_word + 1,
  889. eeprom_buff);
  890. /* Update the checksum */
  891. if (ret_val == 0)
  892. hw->eeprom.ops.update_checksum(hw);
  893. err:
  894. kfree(eeprom_buff);
  895. return ret_val;
  896. }
  897. static void ixgbe_get_drvinfo(struct net_device *netdev,
  898. struct ethtool_drvinfo *drvinfo)
  899. {
  900. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  901. strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
  902. strlcpy(drvinfo->version, ixgbe_driver_version,
  903. sizeof(drvinfo->version));
  904. strlcpy(drvinfo->fw_version, adapter->eeprom_id,
  905. sizeof(drvinfo->fw_version));
  906. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  907. sizeof(drvinfo->bus_info));
  908. drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
  909. }
  910. static void ixgbe_get_ringparam(struct net_device *netdev,
  911. struct ethtool_ringparam *ring)
  912. {
  913. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  914. struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  915. struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
  916. ring->rx_max_pending = IXGBE_MAX_RXD;
  917. ring->tx_max_pending = IXGBE_MAX_TXD;
  918. ring->rx_pending = rx_ring->count;
  919. ring->tx_pending = tx_ring->count;
  920. }
  921. static int ixgbe_set_ringparam(struct net_device *netdev,
  922. struct ethtool_ringparam *ring)
  923. {
  924. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  925. struct ixgbe_ring *temp_ring;
  926. int i, j, err = 0;
  927. u32 new_rx_count, new_tx_count;
  928. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  929. return -EINVAL;
  930. new_tx_count = clamp_t(u32, ring->tx_pending,
  931. IXGBE_MIN_TXD, IXGBE_MAX_TXD);
  932. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  933. new_rx_count = clamp_t(u32, ring->rx_pending,
  934. IXGBE_MIN_RXD, IXGBE_MAX_RXD);
  935. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  936. if ((new_tx_count == adapter->tx_ring_count) &&
  937. (new_rx_count == adapter->rx_ring_count)) {
  938. /* nothing to do */
  939. return 0;
  940. }
  941. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  942. usleep_range(1000, 2000);
  943. if (!netif_running(adapter->netdev)) {
  944. for (i = 0; i < adapter->num_tx_queues; i++)
  945. adapter->tx_ring[i]->count = new_tx_count;
  946. for (i = 0; i < adapter->num_xdp_queues; i++)
  947. adapter->xdp_ring[i]->count = new_tx_count;
  948. for (i = 0; i < adapter->num_rx_queues; i++)
  949. adapter->rx_ring[i]->count = new_rx_count;
  950. adapter->tx_ring_count = new_tx_count;
  951. adapter->xdp_ring_count = new_tx_count;
  952. adapter->rx_ring_count = new_rx_count;
  953. goto clear_reset;
  954. }
  955. /* allocate temporary buffer to store rings in */
  956. i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
  957. adapter->num_rx_queues);
  958. temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
  959. if (!temp_ring) {
  960. err = -ENOMEM;
  961. goto clear_reset;
  962. }
  963. ixgbe_down(adapter);
  964. /*
  965. * Setup new Tx resources and free the old Tx resources in that order.
  966. * We can then assign the new resources to the rings via a memcpy.
  967. * The advantage to this approach is that we are guaranteed to still
  968. * have resources even in the case of an allocation failure.
  969. */
  970. if (new_tx_count != adapter->tx_ring_count) {
  971. for (i = 0; i < adapter->num_tx_queues; i++) {
  972. memcpy(&temp_ring[i], adapter->tx_ring[i],
  973. sizeof(struct ixgbe_ring));
  974. temp_ring[i].count = new_tx_count;
  975. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  976. if (err) {
  977. while (i) {
  978. i--;
  979. ixgbe_free_tx_resources(&temp_ring[i]);
  980. }
  981. goto err_setup;
  982. }
  983. }
  984. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  985. memcpy(&temp_ring[i], adapter->xdp_ring[j],
  986. sizeof(struct ixgbe_ring));
  987. temp_ring[i].count = new_tx_count;
  988. err = ixgbe_setup_tx_resources(&temp_ring[i]);
  989. if (err) {
  990. while (i) {
  991. i--;
  992. ixgbe_free_tx_resources(&temp_ring[i]);
  993. }
  994. goto err_setup;
  995. }
  996. }
  997. for (i = 0; i < adapter->num_tx_queues; i++) {
  998. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  999. memcpy(adapter->tx_ring[i], &temp_ring[i],
  1000. sizeof(struct ixgbe_ring));
  1001. }
  1002. for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
  1003. ixgbe_free_tx_resources(adapter->xdp_ring[j]);
  1004. memcpy(adapter->xdp_ring[j], &temp_ring[i],
  1005. sizeof(struct ixgbe_ring));
  1006. }
  1007. adapter->tx_ring_count = new_tx_count;
  1008. }
  1009. /* Repeat the process for the Rx rings if needed */
  1010. if (new_rx_count != adapter->rx_ring_count) {
  1011. for (i = 0; i < adapter->num_rx_queues; i++) {
  1012. memcpy(&temp_ring[i], adapter->rx_ring[i],
  1013. sizeof(struct ixgbe_ring));
  1014. /* Clear copied XDP RX-queue info */
  1015. memset(&temp_ring[i].xdp_rxq, 0,
  1016. sizeof(temp_ring[i].xdp_rxq));
  1017. temp_ring[i].count = new_rx_count;
  1018. err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
  1019. if (err) {
  1020. while (i) {
  1021. i--;
  1022. ixgbe_free_rx_resources(&temp_ring[i]);
  1023. }
  1024. goto err_setup;
  1025. }
  1026. }
  1027. for (i = 0; i < adapter->num_rx_queues; i++) {
  1028. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  1029. memcpy(adapter->rx_ring[i], &temp_ring[i],
  1030. sizeof(struct ixgbe_ring));
  1031. }
  1032. adapter->rx_ring_count = new_rx_count;
  1033. }
  1034. err_setup:
  1035. ixgbe_up(adapter);
  1036. vfree(temp_ring);
  1037. clear_reset:
  1038. clear_bit(__IXGBE_RESETTING, &adapter->state);
  1039. return err;
  1040. }
  1041. static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
  1042. {
  1043. switch (sset) {
  1044. case ETH_SS_TEST:
  1045. return IXGBE_TEST_LEN;
  1046. case ETH_SS_STATS:
  1047. return IXGBE_STATS_LEN;
  1048. case ETH_SS_PRIV_FLAGS:
  1049. return IXGBE_PRIV_FLAGS_STR_LEN;
  1050. default:
  1051. return -EOPNOTSUPP;
  1052. }
  1053. }
  1054. static void ixgbe_get_ethtool_stats(struct net_device *netdev,
  1055. struct ethtool_stats *stats, u64 *data)
  1056. {
  1057. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1058. struct rtnl_link_stats64 temp;
  1059. const struct rtnl_link_stats64 *net_stats;
  1060. unsigned int start;
  1061. struct ixgbe_ring *ring;
  1062. int i, j;
  1063. char *p = NULL;
  1064. ixgbe_update_stats(adapter);
  1065. net_stats = dev_get_stats(netdev, &temp);
  1066. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1067. switch (ixgbe_gstrings_stats[i].type) {
  1068. case NETDEV_STATS:
  1069. p = (char *) net_stats +
  1070. ixgbe_gstrings_stats[i].stat_offset;
  1071. break;
  1072. case IXGBE_STATS:
  1073. p = (char *) adapter +
  1074. ixgbe_gstrings_stats[i].stat_offset;
  1075. break;
  1076. default:
  1077. data[i] = 0;
  1078. continue;
  1079. }
  1080. data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
  1081. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  1082. }
  1083. for (j = 0; j < netdev->num_tx_queues; j++) {
  1084. ring = adapter->tx_ring[j];
  1085. if (!ring) {
  1086. data[i] = 0;
  1087. data[i+1] = 0;
  1088. i += 2;
  1089. continue;
  1090. }
  1091. do {
  1092. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1093. data[i] = ring->stats.packets;
  1094. data[i+1] = ring->stats.bytes;
  1095. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1096. i += 2;
  1097. }
  1098. for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
  1099. ring = adapter->rx_ring[j];
  1100. if (!ring) {
  1101. data[i] = 0;
  1102. data[i+1] = 0;
  1103. i += 2;
  1104. continue;
  1105. }
  1106. do {
  1107. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1108. data[i] = ring->stats.packets;
  1109. data[i+1] = ring->stats.bytes;
  1110. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1111. i += 2;
  1112. }
  1113. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1114. data[i++] = adapter->stats.pxontxc[j];
  1115. data[i++] = adapter->stats.pxofftxc[j];
  1116. }
  1117. for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
  1118. data[i++] = adapter->stats.pxonrxc[j];
  1119. data[i++] = adapter->stats.pxoffrxc[j];
  1120. }
  1121. }
  1122. static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
  1123. u8 *data)
  1124. {
  1125. char *p = (char *)data;
  1126. unsigned int i;
  1127. switch (stringset) {
  1128. case ETH_SS_TEST:
  1129. for (i = 0; i < IXGBE_TEST_LEN; i++) {
  1130. memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
  1131. data += ETH_GSTRING_LEN;
  1132. }
  1133. break;
  1134. case ETH_SS_STATS:
  1135. for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
  1136. memcpy(p, ixgbe_gstrings_stats[i].stat_string,
  1137. ETH_GSTRING_LEN);
  1138. p += ETH_GSTRING_LEN;
  1139. }
  1140. for (i = 0; i < netdev->num_tx_queues; i++) {
  1141. sprintf(p, "tx_queue_%u_packets", i);
  1142. p += ETH_GSTRING_LEN;
  1143. sprintf(p, "tx_queue_%u_bytes", i);
  1144. p += ETH_GSTRING_LEN;
  1145. }
  1146. for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
  1147. sprintf(p, "rx_queue_%u_packets", i);
  1148. p += ETH_GSTRING_LEN;
  1149. sprintf(p, "rx_queue_%u_bytes", i);
  1150. p += ETH_GSTRING_LEN;
  1151. }
  1152. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1153. sprintf(p, "tx_pb_%u_pxon", i);
  1154. p += ETH_GSTRING_LEN;
  1155. sprintf(p, "tx_pb_%u_pxoff", i);
  1156. p += ETH_GSTRING_LEN;
  1157. }
  1158. for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
  1159. sprintf(p, "rx_pb_%u_pxon", i);
  1160. p += ETH_GSTRING_LEN;
  1161. sprintf(p, "rx_pb_%u_pxoff", i);
  1162. p += ETH_GSTRING_LEN;
  1163. }
  1164. /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
  1165. break;
  1166. case ETH_SS_PRIV_FLAGS:
  1167. memcpy(data, ixgbe_priv_flags_strings,
  1168. IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
  1169. }
  1170. }
  1171. static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
  1172. {
  1173. struct ixgbe_hw *hw = &adapter->hw;
  1174. bool link_up;
  1175. u32 link_speed = 0;
  1176. if (ixgbe_removed(hw->hw_addr)) {
  1177. *data = 1;
  1178. return 1;
  1179. }
  1180. *data = 0;
  1181. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  1182. if (link_up)
  1183. return *data;
  1184. else
  1185. *data = 1;
  1186. return *data;
  1187. }
  1188. /* ethtool register test data */
  1189. struct ixgbe_reg_test {
  1190. u16 reg;
  1191. u8 array_len;
  1192. u8 test_type;
  1193. u32 mask;
  1194. u32 write;
  1195. };
  1196. /* In the hardware, registers are laid out either singly, in arrays
  1197. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  1198. * most tests take place on arrays or single registers (handled
  1199. * as a single-element array) and special-case the tables.
  1200. * Table tests are always pattern tests.
  1201. *
  1202. * We also make provision for some required setup steps by specifying
  1203. * registers to be written without any read-back testing.
  1204. */
  1205. #define PATTERN_TEST 1
  1206. #define SET_READ_TEST 2
  1207. #define WRITE_NO_TEST 3
  1208. #define TABLE32_TEST 4
  1209. #define TABLE64_TEST_LO 5
  1210. #define TABLE64_TEST_HI 6
  1211. /* default 82599 register test */
  1212. static const struct ixgbe_reg_test reg_test_82599[] = {
  1213. { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1214. { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1215. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1216. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1217. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  1218. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1219. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1220. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1221. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1222. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1223. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1224. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1225. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1226. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1227. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  1228. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
  1229. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1230. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
  1231. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1232. { .reg = 0 }
  1233. };
  1234. /* default 82598 register test */
  1235. static const struct ixgbe_reg_test reg_test_82598[] = {
  1236. { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1237. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1238. { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1239. { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
  1240. { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1241. { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1242. { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1243. /* Enable all four RX queues before testing. */
  1244. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  1245. /* RDH is read-only for 82598, only test RDT. */
  1246. { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  1247. { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
  1248. { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
  1249. { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1250. { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
  1251. { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  1252. { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1253. { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  1254. { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
  1255. { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
  1256. { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
  1257. { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
  1258. { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  1259. { .reg = 0 }
  1260. };
  1261. static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1262. u32 mask, u32 write)
  1263. {
  1264. u32 pat, val, before;
  1265. static const u32 test_pattern[] = {
  1266. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
  1267. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1268. *data = 1;
  1269. return true;
  1270. }
  1271. for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
  1272. before = ixgbe_read_reg(&adapter->hw, reg);
  1273. ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
  1274. val = ixgbe_read_reg(&adapter->hw, reg);
  1275. if (val != (test_pattern[pat] & write & mask)) {
  1276. e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
  1277. reg, val, (test_pattern[pat] & write & mask));
  1278. *data = reg;
  1279. ixgbe_write_reg(&adapter->hw, reg, before);
  1280. return true;
  1281. }
  1282. ixgbe_write_reg(&adapter->hw, reg, before);
  1283. }
  1284. return false;
  1285. }
  1286. static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
  1287. u32 mask, u32 write)
  1288. {
  1289. u32 val, before;
  1290. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1291. *data = 1;
  1292. return true;
  1293. }
  1294. before = ixgbe_read_reg(&adapter->hw, reg);
  1295. ixgbe_write_reg(&adapter->hw, reg, write & mask);
  1296. val = ixgbe_read_reg(&adapter->hw, reg);
  1297. if ((write & mask) != (val & mask)) {
  1298. e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
  1299. reg, (val & mask), (write & mask));
  1300. *data = reg;
  1301. ixgbe_write_reg(&adapter->hw, reg, before);
  1302. return true;
  1303. }
  1304. ixgbe_write_reg(&adapter->hw, reg, before);
  1305. return false;
  1306. }
  1307. static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
  1308. {
  1309. const struct ixgbe_reg_test *test;
  1310. u32 value, before, after;
  1311. u32 i, toggle;
  1312. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1313. e_err(drv, "Adapter removed - register test blocked\n");
  1314. *data = 1;
  1315. return 1;
  1316. }
  1317. switch (adapter->hw.mac.type) {
  1318. case ixgbe_mac_82598EB:
  1319. toggle = 0x7FFFF3FF;
  1320. test = reg_test_82598;
  1321. break;
  1322. case ixgbe_mac_82599EB:
  1323. case ixgbe_mac_X540:
  1324. case ixgbe_mac_X550:
  1325. case ixgbe_mac_X550EM_x:
  1326. case ixgbe_mac_x550em_a:
  1327. toggle = 0x7FFFF30F;
  1328. test = reg_test_82599;
  1329. break;
  1330. default:
  1331. *data = 1;
  1332. return 1;
  1333. }
  1334. /*
  1335. * Because the status register is such a special case,
  1336. * we handle it separately from the rest of the register
  1337. * tests. Some bits are read-only, some toggle, and some
  1338. * are writeable on newer MACs.
  1339. */
  1340. before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
  1341. value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
  1342. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
  1343. after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
  1344. if (value != after) {
  1345. e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
  1346. after, value);
  1347. *data = 1;
  1348. return 1;
  1349. }
  1350. /* restore previous status */
  1351. ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
  1352. /*
  1353. * Perform the remainder of the register test, looping through
  1354. * the test table until we either fail or reach the null entry.
  1355. */
  1356. while (test->reg) {
  1357. for (i = 0; i < test->array_len; i++) {
  1358. bool b = false;
  1359. switch (test->test_type) {
  1360. case PATTERN_TEST:
  1361. b = reg_pattern_test(adapter, data,
  1362. test->reg + (i * 0x40),
  1363. test->mask,
  1364. test->write);
  1365. break;
  1366. case SET_READ_TEST:
  1367. b = reg_set_and_check(adapter, data,
  1368. test->reg + (i * 0x40),
  1369. test->mask,
  1370. test->write);
  1371. break;
  1372. case WRITE_NO_TEST:
  1373. ixgbe_write_reg(&adapter->hw,
  1374. test->reg + (i * 0x40),
  1375. test->write);
  1376. break;
  1377. case TABLE32_TEST:
  1378. b = reg_pattern_test(adapter, data,
  1379. test->reg + (i * 4),
  1380. test->mask,
  1381. test->write);
  1382. break;
  1383. case TABLE64_TEST_LO:
  1384. b = reg_pattern_test(adapter, data,
  1385. test->reg + (i * 8),
  1386. test->mask,
  1387. test->write);
  1388. break;
  1389. case TABLE64_TEST_HI:
  1390. b = reg_pattern_test(adapter, data,
  1391. (test->reg + 4) + (i * 8),
  1392. test->mask,
  1393. test->write);
  1394. break;
  1395. }
  1396. if (b)
  1397. return 1;
  1398. }
  1399. test++;
  1400. }
  1401. *data = 0;
  1402. return 0;
  1403. }
  1404. static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
  1405. {
  1406. struct ixgbe_hw *hw = &adapter->hw;
  1407. if (hw->eeprom.ops.validate_checksum(hw, NULL))
  1408. *data = 1;
  1409. else
  1410. *data = 0;
  1411. return *data;
  1412. }
  1413. static irqreturn_t ixgbe_test_intr(int irq, void *data)
  1414. {
  1415. struct net_device *netdev = (struct net_device *) data;
  1416. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1417. adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
  1418. return IRQ_HANDLED;
  1419. }
  1420. static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
  1421. {
  1422. struct net_device *netdev = adapter->netdev;
  1423. u32 mask, i = 0, shared_int = true;
  1424. u32 irq = adapter->pdev->irq;
  1425. *data = 0;
  1426. /* Hook up test interrupt handler just for this test */
  1427. if (adapter->msix_entries) {
  1428. /* NOTE: we don't test MSI-X interrupts here, yet */
  1429. return 0;
  1430. } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
  1431. shared_int = false;
  1432. if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
  1433. netdev)) {
  1434. *data = 1;
  1435. return -1;
  1436. }
  1437. } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
  1438. netdev->name, netdev)) {
  1439. shared_int = false;
  1440. } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
  1441. netdev->name, netdev)) {
  1442. *data = 1;
  1443. return -1;
  1444. }
  1445. e_info(hw, "testing %s interrupt\n", shared_int ?
  1446. "shared" : "unshared");
  1447. /* Disable all the interrupts */
  1448. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1449. IXGBE_WRITE_FLUSH(&adapter->hw);
  1450. usleep_range(10000, 20000);
  1451. /* Test each interrupt */
  1452. for (; i < 10; i++) {
  1453. /* Interrupt to test */
  1454. mask = BIT(i);
  1455. if (!shared_int) {
  1456. /*
  1457. * Disable the interrupts to be reported in
  1458. * the cause register and then force the same
  1459. * interrupt and see if one gets posted. If
  1460. * an interrupt was posted to the bus, the
  1461. * test failed.
  1462. */
  1463. adapter->test_icr = 0;
  1464. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1465. ~mask & 0x00007FFF);
  1466. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1467. ~mask & 0x00007FFF);
  1468. IXGBE_WRITE_FLUSH(&adapter->hw);
  1469. usleep_range(10000, 20000);
  1470. if (adapter->test_icr & mask) {
  1471. *data = 3;
  1472. break;
  1473. }
  1474. }
  1475. /*
  1476. * Enable the interrupt to be reported in the cause
  1477. * register and then force the same interrupt and see
  1478. * if one gets posted. If an interrupt was not posted
  1479. * to the bus, the test failed.
  1480. */
  1481. adapter->test_icr = 0;
  1482. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  1483. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  1484. IXGBE_WRITE_FLUSH(&adapter->hw);
  1485. usleep_range(10000, 20000);
  1486. if (!(adapter->test_icr & mask)) {
  1487. *data = 4;
  1488. break;
  1489. }
  1490. if (!shared_int) {
  1491. /*
  1492. * Disable the other interrupts to be reported in
  1493. * the cause register and then force the other
  1494. * interrupts and see if any get posted. If
  1495. * an interrupt was posted to the bus, the
  1496. * test failed.
  1497. */
  1498. adapter->test_icr = 0;
  1499. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  1500. ~mask & 0x00007FFF);
  1501. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
  1502. ~mask & 0x00007FFF);
  1503. IXGBE_WRITE_FLUSH(&adapter->hw);
  1504. usleep_range(10000, 20000);
  1505. if (adapter->test_icr) {
  1506. *data = 5;
  1507. break;
  1508. }
  1509. }
  1510. }
  1511. /* Disable all the interrupts */
  1512. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
  1513. IXGBE_WRITE_FLUSH(&adapter->hw);
  1514. usleep_range(10000, 20000);
  1515. /* Unhook test interrupt handler */
  1516. free_irq(irq, netdev);
  1517. return *data;
  1518. }
  1519. static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
  1520. {
  1521. /* Shut down the DMA engines now so they can be reinitialized later,
  1522. * since the test rings and normally used rings should overlap on
  1523. * queue 0 we can just use the standard disable Rx/Tx calls and they
  1524. * will take care of disabling the test rings for us.
  1525. */
  1526. /* first Rx */
  1527. ixgbe_disable_rx(adapter);
  1528. /* now Tx */
  1529. ixgbe_disable_tx(adapter);
  1530. ixgbe_reset(adapter);
  1531. ixgbe_free_tx_resources(&adapter->test_tx_ring);
  1532. ixgbe_free_rx_resources(&adapter->test_rx_ring);
  1533. }
  1534. static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
  1535. {
  1536. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1537. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1538. struct ixgbe_hw *hw = &adapter->hw;
  1539. u32 rctl, reg_data;
  1540. int ret_val;
  1541. int err;
  1542. /* Setup Tx descriptor ring and Tx buffers */
  1543. tx_ring->count = IXGBE_DEFAULT_TXD;
  1544. tx_ring->queue_index = 0;
  1545. tx_ring->dev = &adapter->pdev->dev;
  1546. tx_ring->netdev = adapter->netdev;
  1547. tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
  1548. err = ixgbe_setup_tx_resources(tx_ring);
  1549. if (err)
  1550. return 1;
  1551. switch (adapter->hw.mac.type) {
  1552. case ixgbe_mac_82599EB:
  1553. case ixgbe_mac_X540:
  1554. case ixgbe_mac_X550:
  1555. case ixgbe_mac_X550EM_x:
  1556. case ixgbe_mac_x550em_a:
  1557. reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
  1558. reg_data |= IXGBE_DMATXCTL_TE;
  1559. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
  1560. break;
  1561. default:
  1562. break;
  1563. }
  1564. ixgbe_configure_tx_ring(adapter, tx_ring);
  1565. /* Setup Rx Descriptor ring and Rx buffers */
  1566. rx_ring->count = IXGBE_DEFAULT_RXD;
  1567. rx_ring->queue_index = 0;
  1568. rx_ring->dev = &adapter->pdev->dev;
  1569. rx_ring->netdev = adapter->netdev;
  1570. rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
  1571. err = ixgbe_setup_rx_resources(adapter, rx_ring);
  1572. if (err) {
  1573. ret_val = 4;
  1574. goto err_nomem;
  1575. }
  1576. hw->mac.ops.disable_rx(hw);
  1577. ixgbe_configure_rx_ring(adapter, rx_ring);
  1578. rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
  1579. rctl |= IXGBE_RXCTRL_DMBYPS;
  1580. IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
  1581. hw->mac.ops.enable_rx(hw);
  1582. return 0;
  1583. err_nomem:
  1584. ixgbe_free_desc_rings(adapter);
  1585. return ret_val;
  1586. }
  1587. static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
  1588. {
  1589. struct ixgbe_hw *hw = &adapter->hw;
  1590. u32 reg_data;
  1591. /* Setup MAC loopback */
  1592. reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  1593. reg_data |= IXGBE_HLREG0_LPBK;
  1594. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
  1595. reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  1596. reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
  1597. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
  1598. /* X540 and X550 needs to set the MACC.FLU bit to force link up */
  1599. switch (adapter->hw.mac.type) {
  1600. case ixgbe_mac_X540:
  1601. case ixgbe_mac_X550:
  1602. case ixgbe_mac_X550EM_x:
  1603. case ixgbe_mac_x550em_a:
  1604. reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
  1605. reg_data |= IXGBE_MACC_FLU;
  1606. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
  1607. break;
  1608. default:
  1609. if (hw->mac.orig_autoc) {
  1610. reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
  1611. IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
  1612. } else {
  1613. return 10;
  1614. }
  1615. }
  1616. IXGBE_WRITE_FLUSH(hw);
  1617. usleep_range(10000, 20000);
  1618. /* Disable Atlas Tx lanes; re-enabled in reset path */
  1619. if (hw->mac.type == ixgbe_mac_82598EB) {
  1620. u8 atlas;
  1621. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
  1622. atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
  1623. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
  1624. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
  1625. atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
  1626. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
  1627. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
  1628. atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
  1629. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
  1630. hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
  1631. atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
  1632. hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
  1633. }
  1634. return 0;
  1635. }
  1636. static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
  1637. {
  1638. u32 reg_data;
  1639. reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
  1640. reg_data &= ~IXGBE_HLREG0_LPBK;
  1641. IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
  1642. }
  1643. static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
  1644. unsigned int frame_size)
  1645. {
  1646. memset(skb->data, 0xFF, frame_size);
  1647. frame_size >>= 1;
  1648. memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
  1649. memset(&skb->data[frame_size + 10], 0xBE, 1);
  1650. memset(&skb->data[frame_size + 12], 0xAF, 1);
  1651. }
  1652. static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
  1653. unsigned int frame_size)
  1654. {
  1655. unsigned char *data;
  1656. bool match = true;
  1657. frame_size >>= 1;
  1658. data = kmap(rx_buffer->page) + rx_buffer->page_offset;
  1659. if (data[3] != 0xFF ||
  1660. data[frame_size + 10] != 0xBE ||
  1661. data[frame_size + 12] != 0xAF)
  1662. match = false;
  1663. kunmap(rx_buffer->page);
  1664. return match;
  1665. }
  1666. static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
  1667. struct ixgbe_ring *tx_ring,
  1668. unsigned int size)
  1669. {
  1670. union ixgbe_adv_rx_desc *rx_desc;
  1671. u16 rx_ntc, tx_ntc, count = 0;
  1672. /* initialize next to clean and descriptor values */
  1673. rx_ntc = rx_ring->next_to_clean;
  1674. tx_ntc = tx_ring->next_to_clean;
  1675. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
  1676. while (tx_ntc != tx_ring->next_to_use) {
  1677. union ixgbe_adv_tx_desc *tx_desc;
  1678. struct ixgbe_tx_buffer *tx_buffer;
  1679. tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
  1680. /* if DD is not set transmit has not completed */
  1681. if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
  1682. return count;
  1683. /* unmap buffer on Tx side */
  1684. tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
  1685. /* Free all the Tx ring sk_buffs */
  1686. dev_kfree_skb_any(tx_buffer->skb);
  1687. /* unmap skb header data */
  1688. dma_unmap_single(tx_ring->dev,
  1689. dma_unmap_addr(tx_buffer, dma),
  1690. dma_unmap_len(tx_buffer, len),
  1691. DMA_TO_DEVICE);
  1692. dma_unmap_len_set(tx_buffer, len, 0);
  1693. /* increment Tx next to clean counter */
  1694. tx_ntc++;
  1695. if (tx_ntc == tx_ring->count)
  1696. tx_ntc = 0;
  1697. }
  1698. while (rx_desc->wb.upper.length) {
  1699. struct ixgbe_rx_buffer *rx_buffer;
  1700. /* check Rx buffer */
  1701. rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
  1702. /* sync Rx buffer for CPU read */
  1703. dma_sync_single_for_cpu(rx_ring->dev,
  1704. rx_buffer->dma,
  1705. ixgbe_rx_bufsz(rx_ring),
  1706. DMA_FROM_DEVICE);
  1707. /* verify contents of skb */
  1708. if (ixgbe_check_lbtest_frame(rx_buffer, size))
  1709. count++;
  1710. else
  1711. break;
  1712. /* sync Rx buffer for device write */
  1713. dma_sync_single_for_device(rx_ring->dev,
  1714. rx_buffer->dma,
  1715. ixgbe_rx_bufsz(rx_ring),
  1716. DMA_FROM_DEVICE);
  1717. /* increment Rx next to clean counter */
  1718. rx_ntc++;
  1719. if (rx_ntc == rx_ring->count)
  1720. rx_ntc = 0;
  1721. /* fetch next descriptor */
  1722. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
  1723. }
  1724. netdev_tx_reset_queue(txring_txq(tx_ring));
  1725. /* re-map buffers to ring, store next to clean values */
  1726. ixgbe_alloc_rx_buffers(rx_ring, count);
  1727. rx_ring->next_to_clean = rx_ntc;
  1728. tx_ring->next_to_clean = tx_ntc;
  1729. return count;
  1730. }
  1731. static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
  1732. {
  1733. struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
  1734. struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
  1735. int i, j, lc, good_cnt, ret_val = 0;
  1736. unsigned int size = 1024;
  1737. netdev_tx_t tx_ret_val;
  1738. struct sk_buff *skb;
  1739. u32 flags_orig = adapter->flags;
  1740. /* DCB can modify the frames on Tx */
  1741. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  1742. /* allocate test skb */
  1743. skb = alloc_skb(size, GFP_KERNEL);
  1744. if (!skb)
  1745. return 11;
  1746. /* place data into test skb */
  1747. ixgbe_create_lbtest_frame(skb, size);
  1748. skb_put(skb, size);
  1749. /*
  1750. * Calculate the loop count based on the largest descriptor ring
  1751. * The idea is to wrap the largest ring a number of times using 64
  1752. * send/receive pairs during each loop
  1753. */
  1754. if (rx_ring->count <= tx_ring->count)
  1755. lc = ((tx_ring->count / 64) * 2) + 1;
  1756. else
  1757. lc = ((rx_ring->count / 64) * 2) + 1;
  1758. for (j = 0; j <= lc; j++) {
  1759. /* reset count of good packets */
  1760. good_cnt = 0;
  1761. /* place 64 packets on the transmit queue*/
  1762. for (i = 0; i < 64; i++) {
  1763. skb_get(skb);
  1764. tx_ret_val = ixgbe_xmit_frame_ring(skb,
  1765. adapter,
  1766. tx_ring);
  1767. if (tx_ret_val == NETDEV_TX_OK)
  1768. good_cnt++;
  1769. }
  1770. if (good_cnt != 64) {
  1771. ret_val = 12;
  1772. break;
  1773. }
  1774. /* allow 200 milliseconds for packets to go from Tx to Rx */
  1775. msleep(200);
  1776. good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
  1777. if (good_cnt != 64) {
  1778. ret_val = 13;
  1779. break;
  1780. }
  1781. }
  1782. /* free the original skb */
  1783. kfree_skb(skb);
  1784. adapter->flags = flags_orig;
  1785. return ret_val;
  1786. }
  1787. static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
  1788. {
  1789. *data = ixgbe_setup_desc_rings(adapter);
  1790. if (*data)
  1791. goto out;
  1792. *data = ixgbe_setup_loopback_test(adapter);
  1793. if (*data)
  1794. goto err_loopback;
  1795. *data = ixgbe_run_loopback_test(adapter);
  1796. ixgbe_loopback_cleanup(adapter);
  1797. err_loopback:
  1798. ixgbe_free_desc_rings(adapter);
  1799. out:
  1800. return *data;
  1801. }
  1802. static void ixgbe_diag_test(struct net_device *netdev,
  1803. struct ethtool_test *eth_test, u64 *data)
  1804. {
  1805. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1806. bool if_running = netif_running(netdev);
  1807. if (ixgbe_removed(adapter->hw.hw_addr)) {
  1808. e_err(hw, "Adapter removed - test blocked\n");
  1809. data[0] = 1;
  1810. data[1] = 1;
  1811. data[2] = 1;
  1812. data[3] = 1;
  1813. data[4] = 1;
  1814. eth_test->flags |= ETH_TEST_FL_FAILED;
  1815. return;
  1816. }
  1817. set_bit(__IXGBE_TESTING, &adapter->state);
  1818. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  1819. struct ixgbe_hw *hw = &adapter->hw;
  1820. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  1821. int i;
  1822. for (i = 0; i < adapter->num_vfs; i++) {
  1823. if (adapter->vfinfo[i].clear_to_send) {
  1824. netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
  1825. data[0] = 1;
  1826. data[1] = 1;
  1827. data[2] = 1;
  1828. data[3] = 1;
  1829. data[4] = 1;
  1830. eth_test->flags |= ETH_TEST_FL_FAILED;
  1831. clear_bit(__IXGBE_TESTING,
  1832. &adapter->state);
  1833. goto skip_ol_tests;
  1834. }
  1835. }
  1836. }
  1837. /* Offline tests */
  1838. e_info(hw, "offline testing starting\n");
  1839. /* Link test performed before hardware reset so autoneg doesn't
  1840. * interfere with test result
  1841. */
  1842. if (ixgbe_link_test(adapter, &data[4]))
  1843. eth_test->flags |= ETH_TEST_FL_FAILED;
  1844. if (if_running)
  1845. /* indicate we're in test mode */
  1846. ixgbe_close(netdev);
  1847. else
  1848. ixgbe_reset(adapter);
  1849. e_info(hw, "register testing starting\n");
  1850. if (ixgbe_reg_test(adapter, &data[0]))
  1851. eth_test->flags |= ETH_TEST_FL_FAILED;
  1852. ixgbe_reset(adapter);
  1853. e_info(hw, "eeprom testing starting\n");
  1854. if (ixgbe_eeprom_test(adapter, &data[1]))
  1855. eth_test->flags |= ETH_TEST_FL_FAILED;
  1856. ixgbe_reset(adapter);
  1857. e_info(hw, "interrupt testing starting\n");
  1858. if (ixgbe_intr_test(adapter, &data[2]))
  1859. eth_test->flags |= ETH_TEST_FL_FAILED;
  1860. /* If SRIOV or VMDq is enabled then skip MAC
  1861. * loopback diagnostic. */
  1862. if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
  1863. IXGBE_FLAG_VMDQ_ENABLED)) {
  1864. e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
  1865. data[3] = 0;
  1866. goto skip_loopback;
  1867. }
  1868. ixgbe_reset(adapter);
  1869. e_info(hw, "loopback testing starting\n");
  1870. if (ixgbe_loopback_test(adapter, &data[3]))
  1871. eth_test->flags |= ETH_TEST_FL_FAILED;
  1872. skip_loopback:
  1873. ixgbe_reset(adapter);
  1874. /* clear testing bit and return adapter to previous state */
  1875. clear_bit(__IXGBE_TESTING, &adapter->state);
  1876. if (if_running)
  1877. ixgbe_open(netdev);
  1878. else if (hw->mac.ops.disable_tx_laser)
  1879. hw->mac.ops.disable_tx_laser(hw);
  1880. } else {
  1881. e_info(hw, "online testing starting\n");
  1882. /* Online tests */
  1883. if (ixgbe_link_test(adapter, &data[4]))
  1884. eth_test->flags |= ETH_TEST_FL_FAILED;
  1885. /* Offline tests aren't run; pass by default */
  1886. data[0] = 0;
  1887. data[1] = 0;
  1888. data[2] = 0;
  1889. data[3] = 0;
  1890. clear_bit(__IXGBE_TESTING, &adapter->state);
  1891. }
  1892. skip_ol_tests:
  1893. msleep_interruptible(4 * 1000);
  1894. }
  1895. static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
  1896. struct ethtool_wolinfo *wol)
  1897. {
  1898. struct ixgbe_hw *hw = &adapter->hw;
  1899. int retval = 0;
  1900. /* WOL not supported for all devices */
  1901. if (!ixgbe_wol_supported(adapter, hw->device_id,
  1902. hw->subsystem_device_id)) {
  1903. retval = 1;
  1904. wol->supported = 0;
  1905. }
  1906. return retval;
  1907. }
  1908. static void ixgbe_get_wol(struct net_device *netdev,
  1909. struct ethtool_wolinfo *wol)
  1910. {
  1911. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1912. wol->supported = WAKE_UCAST | WAKE_MCAST |
  1913. WAKE_BCAST | WAKE_MAGIC;
  1914. wol->wolopts = 0;
  1915. if (ixgbe_wol_exclusion(adapter, wol) ||
  1916. !device_can_wakeup(&adapter->pdev->dev))
  1917. return;
  1918. if (adapter->wol & IXGBE_WUFC_EX)
  1919. wol->wolopts |= WAKE_UCAST;
  1920. if (adapter->wol & IXGBE_WUFC_MC)
  1921. wol->wolopts |= WAKE_MCAST;
  1922. if (adapter->wol & IXGBE_WUFC_BC)
  1923. wol->wolopts |= WAKE_BCAST;
  1924. if (adapter->wol & IXGBE_WUFC_MAG)
  1925. wol->wolopts |= WAKE_MAGIC;
  1926. }
  1927. static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  1928. {
  1929. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1930. if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
  1931. return -EOPNOTSUPP;
  1932. if (ixgbe_wol_exclusion(adapter, wol))
  1933. return wol->wolopts ? -EOPNOTSUPP : 0;
  1934. adapter->wol = 0;
  1935. if (wol->wolopts & WAKE_UCAST)
  1936. adapter->wol |= IXGBE_WUFC_EX;
  1937. if (wol->wolopts & WAKE_MCAST)
  1938. adapter->wol |= IXGBE_WUFC_MC;
  1939. if (wol->wolopts & WAKE_BCAST)
  1940. adapter->wol |= IXGBE_WUFC_BC;
  1941. if (wol->wolopts & WAKE_MAGIC)
  1942. adapter->wol |= IXGBE_WUFC_MAG;
  1943. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  1944. return 0;
  1945. }
  1946. static int ixgbe_nway_reset(struct net_device *netdev)
  1947. {
  1948. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1949. if (netif_running(netdev))
  1950. ixgbe_reinit_locked(adapter);
  1951. return 0;
  1952. }
  1953. static int ixgbe_set_phys_id(struct net_device *netdev,
  1954. enum ethtool_phys_id_state state)
  1955. {
  1956. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1957. struct ixgbe_hw *hw = &adapter->hw;
  1958. if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
  1959. return -EOPNOTSUPP;
  1960. switch (state) {
  1961. case ETHTOOL_ID_ACTIVE:
  1962. adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
  1963. return 2;
  1964. case ETHTOOL_ID_ON:
  1965. hw->mac.ops.led_on(hw, hw->mac.led_link_act);
  1966. break;
  1967. case ETHTOOL_ID_OFF:
  1968. hw->mac.ops.led_off(hw, hw->mac.led_link_act);
  1969. break;
  1970. case ETHTOOL_ID_INACTIVE:
  1971. /* Restore LED settings */
  1972. IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
  1973. break;
  1974. }
  1975. return 0;
  1976. }
  1977. static int ixgbe_get_coalesce(struct net_device *netdev,
  1978. struct ethtool_coalesce *ec)
  1979. {
  1980. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1981. /* only valid if in constant ITR mode */
  1982. if (adapter->rx_itr_setting <= 1)
  1983. ec->rx_coalesce_usecs = adapter->rx_itr_setting;
  1984. else
  1985. ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
  1986. /* if in mixed tx/rx queues per vector mode, report only rx settings */
  1987. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  1988. return 0;
  1989. /* only valid if in constant ITR mode */
  1990. if (adapter->tx_itr_setting <= 1)
  1991. ec->tx_coalesce_usecs = adapter->tx_itr_setting;
  1992. else
  1993. ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
  1994. return 0;
  1995. }
  1996. /*
  1997. * this function must be called before setting the new value of
  1998. * rx_itr_setting
  1999. */
  2000. static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
  2001. {
  2002. struct net_device *netdev = adapter->netdev;
  2003. /* nothing to do if LRO or RSC are not enabled */
  2004. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
  2005. !(netdev->features & NETIF_F_LRO))
  2006. return false;
  2007. /* check the feature flag value and enable RSC if necessary */
  2008. if (adapter->rx_itr_setting == 1 ||
  2009. adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
  2010. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
  2011. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  2012. e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
  2013. return true;
  2014. }
  2015. /* if interrupt rate is too high then disable RSC */
  2016. } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  2017. adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
  2018. e_info(probe, "rx-usecs set too low, disabling RSC\n");
  2019. return true;
  2020. }
  2021. return false;
  2022. }
  2023. static int ixgbe_set_coalesce(struct net_device *netdev,
  2024. struct ethtool_coalesce *ec)
  2025. {
  2026. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2027. struct ixgbe_q_vector *q_vector;
  2028. int i;
  2029. u16 tx_itr_param, rx_itr_param, tx_itr_prev;
  2030. bool need_reset = false;
  2031. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
  2032. /* reject Tx specific changes in case of mixed RxTx vectors */
  2033. if (ec->tx_coalesce_usecs)
  2034. return -EINVAL;
  2035. tx_itr_prev = adapter->rx_itr_setting;
  2036. } else {
  2037. tx_itr_prev = adapter->tx_itr_setting;
  2038. }
  2039. if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
  2040. (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
  2041. return -EINVAL;
  2042. if (ec->rx_coalesce_usecs > 1)
  2043. adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
  2044. else
  2045. adapter->rx_itr_setting = ec->rx_coalesce_usecs;
  2046. if (adapter->rx_itr_setting == 1)
  2047. rx_itr_param = IXGBE_20K_ITR;
  2048. else
  2049. rx_itr_param = adapter->rx_itr_setting;
  2050. if (ec->tx_coalesce_usecs > 1)
  2051. adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
  2052. else
  2053. adapter->tx_itr_setting = ec->tx_coalesce_usecs;
  2054. if (adapter->tx_itr_setting == 1)
  2055. tx_itr_param = IXGBE_12K_ITR;
  2056. else
  2057. tx_itr_param = adapter->tx_itr_setting;
  2058. /* mixed Rx/Tx */
  2059. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  2060. adapter->tx_itr_setting = adapter->rx_itr_setting;
  2061. /* detect ITR changes that require update of TXDCTL.WTHRESH */
  2062. if ((adapter->tx_itr_setting != 1) &&
  2063. (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
  2064. if ((tx_itr_prev == 1) ||
  2065. (tx_itr_prev >= IXGBE_100K_ITR))
  2066. need_reset = true;
  2067. } else {
  2068. if ((tx_itr_prev != 1) &&
  2069. (tx_itr_prev < IXGBE_100K_ITR))
  2070. need_reset = true;
  2071. }
  2072. /* check the old value and enable RSC if necessary */
  2073. need_reset |= ixgbe_update_rsc(adapter);
  2074. for (i = 0; i < adapter->num_q_vectors; i++) {
  2075. q_vector = adapter->q_vector[i];
  2076. if (q_vector->tx.count && !q_vector->rx.count)
  2077. /* tx only */
  2078. q_vector->itr = tx_itr_param;
  2079. else
  2080. /* rx only or mixed */
  2081. q_vector->itr = rx_itr_param;
  2082. ixgbe_write_eitr(q_vector);
  2083. }
  2084. /*
  2085. * do reset here at the end to make sure EITR==0 case is handled
  2086. * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
  2087. * also locks in RSC enable/disable which requires reset
  2088. */
  2089. if (need_reset)
  2090. ixgbe_do_reset(netdev);
  2091. return 0;
  2092. }
  2093. static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2094. struct ethtool_rxnfc *cmd)
  2095. {
  2096. union ixgbe_atr_input *mask = &adapter->fdir_mask;
  2097. struct ethtool_rx_flow_spec *fsp =
  2098. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2099. struct hlist_node *node2;
  2100. struct ixgbe_fdir_filter *rule = NULL;
  2101. /* report total rule count */
  2102. cmd->data = (1024 << adapter->fdir_pballoc) - 2;
  2103. hlist_for_each_entry_safe(rule, node2,
  2104. &adapter->fdir_filter_list, fdir_node) {
  2105. if (fsp->location <= rule->sw_idx)
  2106. break;
  2107. }
  2108. if (!rule || fsp->location != rule->sw_idx)
  2109. return -EINVAL;
  2110. /* fill out the flow spec entry */
  2111. /* set flow type field */
  2112. switch (rule->filter.formatted.flow_type) {
  2113. case IXGBE_ATR_FLOW_TYPE_TCPV4:
  2114. fsp->flow_type = TCP_V4_FLOW;
  2115. break;
  2116. case IXGBE_ATR_FLOW_TYPE_UDPV4:
  2117. fsp->flow_type = UDP_V4_FLOW;
  2118. break;
  2119. case IXGBE_ATR_FLOW_TYPE_SCTPV4:
  2120. fsp->flow_type = SCTP_V4_FLOW;
  2121. break;
  2122. case IXGBE_ATR_FLOW_TYPE_IPV4:
  2123. fsp->flow_type = IP_USER_FLOW;
  2124. fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
  2125. fsp->h_u.usr_ip4_spec.proto = 0;
  2126. fsp->m_u.usr_ip4_spec.proto = 0;
  2127. break;
  2128. default:
  2129. return -EINVAL;
  2130. }
  2131. fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
  2132. fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
  2133. fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
  2134. fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
  2135. fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
  2136. fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
  2137. fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
  2138. fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
  2139. fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
  2140. fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
  2141. fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
  2142. fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
  2143. fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
  2144. fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
  2145. fsp->flow_type |= FLOW_EXT;
  2146. /* record action */
  2147. if (rule->action == IXGBE_FDIR_DROP_QUEUE)
  2148. fsp->ring_cookie = RX_CLS_FLOW_DISC;
  2149. else
  2150. fsp->ring_cookie = rule->action;
  2151. return 0;
  2152. }
  2153. static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
  2154. struct ethtool_rxnfc *cmd,
  2155. u32 *rule_locs)
  2156. {
  2157. struct hlist_node *node2;
  2158. struct ixgbe_fdir_filter *rule;
  2159. int cnt = 0;
  2160. /* report total rule count */
  2161. cmd->data = (1024 << adapter->fdir_pballoc) - 2;
  2162. hlist_for_each_entry_safe(rule, node2,
  2163. &adapter->fdir_filter_list, fdir_node) {
  2164. if (cnt == cmd->rule_cnt)
  2165. return -EMSGSIZE;
  2166. rule_locs[cnt] = rule->sw_idx;
  2167. cnt++;
  2168. }
  2169. cmd->rule_cnt = cnt;
  2170. return 0;
  2171. }
  2172. static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
  2173. struct ethtool_rxnfc *cmd)
  2174. {
  2175. cmd->data = 0;
  2176. /* Report default options for RSS on ixgbe */
  2177. switch (cmd->flow_type) {
  2178. case TCP_V4_FLOW:
  2179. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2180. /* fallthrough */
  2181. case UDP_V4_FLOW:
  2182. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  2183. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2184. /* fallthrough */
  2185. case SCTP_V4_FLOW:
  2186. case AH_ESP_V4_FLOW:
  2187. case AH_V4_FLOW:
  2188. case ESP_V4_FLOW:
  2189. case IPV4_FLOW:
  2190. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  2191. break;
  2192. case TCP_V6_FLOW:
  2193. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2194. /* fallthrough */
  2195. case UDP_V6_FLOW:
  2196. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2197. cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2198. /* fallthrough */
  2199. case SCTP_V6_FLOW:
  2200. case AH_ESP_V6_FLOW:
  2201. case AH_V6_FLOW:
  2202. case ESP_V6_FLOW:
  2203. case IPV6_FLOW:
  2204. cmd->data |= RXH_IP_SRC | RXH_IP_DST;
  2205. break;
  2206. default:
  2207. return -EINVAL;
  2208. }
  2209. return 0;
  2210. }
  2211. static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  2212. u32 *rule_locs)
  2213. {
  2214. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2215. int ret = -EOPNOTSUPP;
  2216. switch (cmd->cmd) {
  2217. case ETHTOOL_GRXRINGS:
  2218. cmd->data = adapter->num_rx_queues;
  2219. ret = 0;
  2220. break;
  2221. case ETHTOOL_GRXCLSRLCNT:
  2222. cmd->rule_cnt = adapter->fdir_filter_count;
  2223. ret = 0;
  2224. break;
  2225. case ETHTOOL_GRXCLSRULE:
  2226. ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
  2227. break;
  2228. case ETHTOOL_GRXCLSRLALL:
  2229. ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
  2230. break;
  2231. case ETHTOOL_GRXFH:
  2232. ret = ixgbe_get_rss_hash_opts(adapter, cmd);
  2233. break;
  2234. default:
  2235. break;
  2236. }
  2237. return ret;
  2238. }
  2239. int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2240. struct ixgbe_fdir_filter *input,
  2241. u16 sw_idx)
  2242. {
  2243. struct ixgbe_hw *hw = &adapter->hw;
  2244. struct hlist_node *node2;
  2245. struct ixgbe_fdir_filter *rule, *parent;
  2246. int err = -EINVAL;
  2247. parent = NULL;
  2248. rule = NULL;
  2249. hlist_for_each_entry_safe(rule, node2,
  2250. &adapter->fdir_filter_list, fdir_node) {
  2251. /* hash found, or no matching entry */
  2252. if (rule->sw_idx >= sw_idx)
  2253. break;
  2254. parent = rule;
  2255. }
  2256. /* if there is an old rule occupying our place remove it */
  2257. if (rule && (rule->sw_idx == sw_idx)) {
  2258. if (!input || (rule->filter.formatted.bkt_hash !=
  2259. input->filter.formatted.bkt_hash)) {
  2260. err = ixgbe_fdir_erase_perfect_filter_82599(hw,
  2261. &rule->filter,
  2262. sw_idx);
  2263. }
  2264. hlist_del(&rule->fdir_node);
  2265. kfree(rule);
  2266. adapter->fdir_filter_count--;
  2267. }
  2268. /*
  2269. * If no input this was a delete, err should be 0 if a rule was
  2270. * successfully found and removed from the list else -EINVAL
  2271. */
  2272. if (!input)
  2273. return err;
  2274. /* initialize node and set software index */
  2275. INIT_HLIST_NODE(&input->fdir_node);
  2276. /* add filter to the list */
  2277. if (parent)
  2278. hlist_add_behind(&input->fdir_node, &parent->fdir_node);
  2279. else
  2280. hlist_add_head(&input->fdir_node,
  2281. &adapter->fdir_filter_list);
  2282. /* update counts */
  2283. adapter->fdir_filter_count++;
  2284. return 0;
  2285. }
  2286. static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
  2287. u8 *flow_type)
  2288. {
  2289. switch (fsp->flow_type & ~FLOW_EXT) {
  2290. case TCP_V4_FLOW:
  2291. *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  2292. break;
  2293. case UDP_V4_FLOW:
  2294. *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
  2295. break;
  2296. case SCTP_V4_FLOW:
  2297. *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
  2298. break;
  2299. case IP_USER_FLOW:
  2300. switch (fsp->h_u.usr_ip4_spec.proto) {
  2301. case IPPROTO_TCP:
  2302. *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  2303. break;
  2304. case IPPROTO_UDP:
  2305. *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
  2306. break;
  2307. case IPPROTO_SCTP:
  2308. *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
  2309. break;
  2310. case 0:
  2311. if (!fsp->m_u.usr_ip4_spec.proto) {
  2312. *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
  2313. break;
  2314. }
  2315. /* fall through */
  2316. default:
  2317. return 0;
  2318. }
  2319. break;
  2320. default:
  2321. return 0;
  2322. }
  2323. return 1;
  2324. }
  2325. static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2326. struct ethtool_rxnfc *cmd)
  2327. {
  2328. struct ethtool_rx_flow_spec *fsp =
  2329. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2330. struct ixgbe_hw *hw = &adapter->hw;
  2331. struct ixgbe_fdir_filter *input;
  2332. union ixgbe_atr_input mask;
  2333. u8 queue;
  2334. int err;
  2335. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  2336. return -EOPNOTSUPP;
  2337. /* ring_cookie is a masked into a set of queues and ixgbe pools or
  2338. * we use the drop index.
  2339. */
  2340. if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
  2341. queue = IXGBE_FDIR_DROP_QUEUE;
  2342. } else {
  2343. u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
  2344. u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
  2345. if (!vf && (ring >= adapter->num_rx_queues))
  2346. return -EINVAL;
  2347. else if (vf &&
  2348. ((vf > adapter->num_vfs) ||
  2349. ring >= adapter->num_rx_queues_per_pool))
  2350. return -EINVAL;
  2351. /* Map the ring onto the absolute queue index */
  2352. if (!vf)
  2353. queue = adapter->rx_ring[ring]->reg_idx;
  2354. else
  2355. queue = ((vf - 1) *
  2356. adapter->num_rx_queues_per_pool) + ring;
  2357. }
  2358. /* Don't allow indexes to exist outside of available space */
  2359. if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
  2360. e_err(drv, "Location out of range\n");
  2361. return -EINVAL;
  2362. }
  2363. input = kzalloc(sizeof(*input), GFP_ATOMIC);
  2364. if (!input)
  2365. return -ENOMEM;
  2366. memset(&mask, 0, sizeof(union ixgbe_atr_input));
  2367. /* set SW index */
  2368. input->sw_idx = fsp->location;
  2369. /* record flow type */
  2370. if (!ixgbe_flowspec_to_flow_type(fsp,
  2371. &input->filter.formatted.flow_type)) {
  2372. e_err(drv, "Unrecognized flow type\n");
  2373. goto err_out;
  2374. }
  2375. mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
  2376. IXGBE_ATR_L4TYPE_MASK;
  2377. if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
  2378. mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
  2379. /* Copy input into formatted structures */
  2380. input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
  2381. mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
  2382. input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
  2383. mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
  2384. input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
  2385. mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
  2386. input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
  2387. mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
  2388. if (fsp->flow_type & FLOW_EXT) {
  2389. input->filter.formatted.vm_pool =
  2390. (unsigned char)ntohl(fsp->h_ext.data[1]);
  2391. mask.formatted.vm_pool =
  2392. (unsigned char)ntohl(fsp->m_ext.data[1]);
  2393. input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
  2394. mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
  2395. input->filter.formatted.flex_bytes =
  2396. fsp->h_ext.vlan_etype;
  2397. mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
  2398. }
  2399. /* determine if we need to drop or route the packet */
  2400. if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
  2401. input->action = IXGBE_FDIR_DROP_QUEUE;
  2402. else
  2403. input->action = fsp->ring_cookie;
  2404. spin_lock(&adapter->fdir_perfect_lock);
  2405. if (hlist_empty(&adapter->fdir_filter_list)) {
  2406. /* save mask and program input mask into HW */
  2407. memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
  2408. err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
  2409. if (err) {
  2410. e_err(drv, "Error writing mask\n");
  2411. goto err_out_w_lock;
  2412. }
  2413. } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
  2414. e_err(drv, "Only one mask supported per port\n");
  2415. goto err_out_w_lock;
  2416. }
  2417. /* apply mask and compute/store hash */
  2418. ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
  2419. /* program filters to filter memory */
  2420. err = ixgbe_fdir_write_perfect_filter_82599(hw,
  2421. &input->filter, input->sw_idx, queue);
  2422. if (err)
  2423. goto err_out_w_lock;
  2424. ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
  2425. spin_unlock(&adapter->fdir_perfect_lock);
  2426. return err;
  2427. err_out_w_lock:
  2428. spin_unlock(&adapter->fdir_perfect_lock);
  2429. err_out:
  2430. kfree(input);
  2431. return -EINVAL;
  2432. }
  2433. static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
  2434. struct ethtool_rxnfc *cmd)
  2435. {
  2436. struct ethtool_rx_flow_spec *fsp =
  2437. (struct ethtool_rx_flow_spec *)&cmd->fs;
  2438. int err;
  2439. spin_lock(&adapter->fdir_perfect_lock);
  2440. err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
  2441. spin_unlock(&adapter->fdir_perfect_lock);
  2442. return err;
  2443. }
  2444. #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
  2445. IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2446. static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
  2447. struct ethtool_rxnfc *nfc)
  2448. {
  2449. u32 flags2 = adapter->flags2;
  2450. /*
  2451. * RSS does not support anything other than hashing
  2452. * to queues on src and dst IPs and ports
  2453. */
  2454. if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
  2455. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  2456. return -EINVAL;
  2457. switch (nfc->flow_type) {
  2458. case TCP_V4_FLOW:
  2459. case TCP_V6_FLOW:
  2460. if (!(nfc->data & RXH_IP_SRC) ||
  2461. !(nfc->data & RXH_IP_DST) ||
  2462. !(nfc->data & RXH_L4_B_0_1) ||
  2463. !(nfc->data & RXH_L4_B_2_3))
  2464. return -EINVAL;
  2465. break;
  2466. case UDP_V4_FLOW:
  2467. if (!(nfc->data & RXH_IP_SRC) ||
  2468. !(nfc->data & RXH_IP_DST))
  2469. return -EINVAL;
  2470. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2471. case 0:
  2472. flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
  2473. break;
  2474. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  2475. flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
  2476. break;
  2477. default:
  2478. return -EINVAL;
  2479. }
  2480. break;
  2481. case UDP_V6_FLOW:
  2482. if (!(nfc->data & RXH_IP_SRC) ||
  2483. !(nfc->data & RXH_IP_DST))
  2484. return -EINVAL;
  2485. switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2486. case 0:
  2487. flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
  2488. break;
  2489. case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
  2490. flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
  2491. break;
  2492. default:
  2493. return -EINVAL;
  2494. }
  2495. break;
  2496. case AH_ESP_V4_FLOW:
  2497. case AH_V4_FLOW:
  2498. case ESP_V4_FLOW:
  2499. case SCTP_V4_FLOW:
  2500. case AH_ESP_V6_FLOW:
  2501. case AH_V6_FLOW:
  2502. case ESP_V6_FLOW:
  2503. case SCTP_V6_FLOW:
  2504. if (!(nfc->data & RXH_IP_SRC) ||
  2505. !(nfc->data & RXH_IP_DST) ||
  2506. (nfc->data & RXH_L4_B_0_1) ||
  2507. (nfc->data & RXH_L4_B_2_3))
  2508. return -EINVAL;
  2509. break;
  2510. default:
  2511. return -EINVAL;
  2512. }
  2513. /* if we changed something we need to update flags */
  2514. if (flags2 != adapter->flags2) {
  2515. struct ixgbe_hw *hw = &adapter->hw;
  2516. u32 mrqc;
  2517. unsigned int pf_pool = adapter->num_vfs;
  2518. if ((hw->mac.type >= ixgbe_mac_X550) &&
  2519. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  2520. mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
  2521. else
  2522. mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
  2523. if ((flags2 & UDP_RSS_FLAGS) &&
  2524. !(adapter->flags2 & UDP_RSS_FLAGS))
  2525. e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
  2526. adapter->flags2 = flags2;
  2527. /* Perform hash on these packet types */
  2528. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
  2529. | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
  2530. | IXGBE_MRQC_RSS_FIELD_IPV6
  2531. | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
  2532. mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
  2533. IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
  2534. if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  2535. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
  2536. if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  2537. mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  2538. if ((hw->mac.type >= ixgbe_mac_X550) &&
  2539. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  2540. IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
  2541. else
  2542. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  2543. }
  2544. return 0;
  2545. }
  2546. static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  2547. {
  2548. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2549. int ret = -EOPNOTSUPP;
  2550. switch (cmd->cmd) {
  2551. case ETHTOOL_SRXCLSRLINS:
  2552. ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
  2553. break;
  2554. case ETHTOOL_SRXCLSRLDEL:
  2555. ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
  2556. break;
  2557. case ETHTOOL_SRXFH:
  2558. ret = ixgbe_set_rss_hash_opt(adapter, cmd);
  2559. break;
  2560. default:
  2561. break;
  2562. }
  2563. return ret;
  2564. }
  2565. static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
  2566. {
  2567. if (adapter->hw.mac.type < ixgbe_mac_X550)
  2568. return 16;
  2569. else
  2570. return 64;
  2571. }
  2572. static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
  2573. {
  2574. return IXGBE_RSS_KEY_SIZE;
  2575. }
  2576. static u32 ixgbe_rss_indir_size(struct net_device *netdev)
  2577. {
  2578. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2579. return ixgbe_rss_indir_tbl_entries(adapter);
  2580. }
  2581. static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
  2582. {
  2583. int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
  2584. u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
  2585. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2586. rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
  2587. for (i = 0; i < reta_size; i++)
  2588. indir[i] = adapter->rss_indir_tbl[i] & rss_m;
  2589. }
  2590. static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  2591. u8 *hfunc)
  2592. {
  2593. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2594. if (hfunc)
  2595. *hfunc = ETH_RSS_HASH_TOP;
  2596. if (indir)
  2597. ixgbe_get_reta(adapter, indir);
  2598. if (key)
  2599. memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
  2600. return 0;
  2601. }
  2602. static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
  2603. const u8 *key, const u8 hfunc)
  2604. {
  2605. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2606. int i;
  2607. u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  2608. if (hfunc)
  2609. return -EINVAL;
  2610. /* Fill out the redirection table */
  2611. if (indir) {
  2612. int max_queues = min_t(int, adapter->num_rx_queues,
  2613. ixgbe_rss_indir_tbl_max(adapter));
  2614. /*Allow at least 2 queues w/ SR-IOV.*/
  2615. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
  2616. (max_queues < 2))
  2617. max_queues = 2;
  2618. /* Verify user input. */
  2619. for (i = 0; i < reta_entries; i++)
  2620. if (indir[i] >= max_queues)
  2621. return -EINVAL;
  2622. for (i = 0; i < reta_entries; i++)
  2623. adapter->rss_indir_tbl[i] = indir[i];
  2624. ixgbe_store_reta(adapter);
  2625. }
  2626. /* Fill out the rss hash key */
  2627. if (key) {
  2628. memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
  2629. ixgbe_store_key(adapter);
  2630. }
  2631. return 0;
  2632. }
  2633. static int ixgbe_get_ts_info(struct net_device *dev,
  2634. struct ethtool_ts_info *info)
  2635. {
  2636. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2637. /* we always support timestamping disabled */
  2638. info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
  2639. switch (adapter->hw.mac.type) {
  2640. case ixgbe_mac_X550:
  2641. case ixgbe_mac_X550EM_x:
  2642. case ixgbe_mac_x550em_a:
  2643. info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
  2644. break;
  2645. case ixgbe_mac_X540:
  2646. case ixgbe_mac_82599EB:
  2647. info->rx_filters |=
  2648. BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  2649. BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
  2650. BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
  2651. break;
  2652. default:
  2653. return ethtool_op_get_ts_info(dev, info);
  2654. }
  2655. info->so_timestamping =
  2656. SOF_TIMESTAMPING_TX_SOFTWARE |
  2657. SOF_TIMESTAMPING_RX_SOFTWARE |
  2658. SOF_TIMESTAMPING_SOFTWARE |
  2659. SOF_TIMESTAMPING_TX_HARDWARE |
  2660. SOF_TIMESTAMPING_RX_HARDWARE |
  2661. SOF_TIMESTAMPING_RAW_HARDWARE;
  2662. if (adapter->ptp_clock)
  2663. info->phc_index = ptp_clock_index(adapter->ptp_clock);
  2664. else
  2665. info->phc_index = -1;
  2666. info->tx_types =
  2667. BIT(HWTSTAMP_TX_OFF) |
  2668. BIT(HWTSTAMP_TX_ON);
  2669. return 0;
  2670. }
  2671. static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
  2672. {
  2673. unsigned int max_combined;
  2674. u8 tcs = adapter->hw_tcs;
  2675. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  2676. /* We only support one q_vector without MSI-X */
  2677. max_combined = 1;
  2678. } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  2679. /* Limit value based on the queue mask */
  2680. max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
  2681. } else if (tcs > 1) {
  2682. /* For DCB report channels per traffic class */
  2683. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  2684. /* 8 TC w/ 4 queues per TC */
  2685. max_combined = 4;
  2686. } else if (tcs > 4) {
  2687. /* 8 TC w/ 8 queues per TC */
  2688. max_combined = 8;
  2689. } else {
  2690. /* 4 TC w/ 16 queues per TC */
  2691. max_combined = 16;
  2692. }
  2693. } else if (adapter->atr_sample_rate) {
  2694. /* support up to 64 queues with ATR */
  2695. max_combined = IXGBE_MAX_FDIR_INDICES;
  2696. } else {
  2697. /* support up to 16 queues with RSS */
  2698. max_combined = ixgbe_max_rss_indices(adapter);
  2699. }
  2700. return max_combined;
  2701. }
  2702. static void ixgbe_get_channels(struct net_device *dev,
  2703. struct ethtool_channels *ch)
  2704. {
  2705. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2706. /* report maximum channels */
  2707. ch->max_combined = ixgbe_max_channels(adapter);
  2708. /* report info for other vector */
  2709. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2710. ch->max_other = NON_Q_VECTORS;
  2711. ch->other_count = NON_Q_VECTORS;
  2712. }
  2713. /* record RSS queues */
  2714. ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
  2715. /* nothing else to report if RSS is disabled */
  2716. if (ch->combined_count == 1)
  2717. return;
  2718. /* we do not support ATR queueing if SR-IOV is enabled */
  2719. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2720. return;
  2721. /* same thing goes for being DCB enabled */
  2722. if (adapter->hw_tcs > 1)
  2723. return;
  2724. /* if ATR is disabled we can exit */
  2725. if (!adapter->atr_sample_rate)
  2726. return;
  2727. /* report flow director queues as maximum channels */
  2728. ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
  2729. }
  2730. static int ixgbe_set_channels(struct net_device *dev,
  2731. struct ethtool_channels *ch)
  2732. {
  2733. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2734. unsigned int count = ch->combined_count;
  2735. u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
  2736. /* verify they are not requesting separate vectors */
  2737. if (!count || ch->rx_count || ch->tx_count)
  2738. return -EINVAL;
  2739. /* verify other_count has not changed */
  2740. if (ch->other_count != NON_Q_VECTORS)
  2741. return -EINVAL;
  2742. /* verify the number of channels does not exceed hardware limits */
  2743. if (count > ixgbe_max_channels(adapter))
  2744. return -EINVAL;
  2745. /* update feature limits from largest to smallest supported values */
  2746. adapter->ring_feature[RING_F_FDIR].limit = count;
  2747. /* cap RSS limit */
  2748. if (count > max_rss_indices)
  2749. count = max_rss_indices;
  2750. adapter->ring_feature[RING_F_RSS].limit = count;
  2751. #ifdef IXGBE_FCOE
  2752. /* cap FCoE limit at 8 */
  2753. if (count > IXGBE_FCRETA_SIZE)
  2754. count = IXGBE_FCRETA_SIZE;
  2755. adapter->ring_feature[RING_F_FCOE].limit = count;
  2756. #endif
  2757. /* use setup TC to update any traffic class queue mapping */
  2758. return ixgbe_setup_tc(dev, adapter->hw_tcs);
  2759. }
  2760. static int ixgbe_get_module_info(struct net_device *dev,
  2761. struct ethtool_modinfo *modinfo)
  2762. {
  2763. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2764. struct ixgbe_hw *hw = &adapter->hw;
  2765. s32 status;
  2766. u8 sff8472_rev, addr_mode;
  2767. bool page_swap = false;
  2768. if (hw->phy.type == ixgbe_phy_fw)
  2769. return -ENXIO;
  2770. /* Check whether we support SFF-8472 or not */
  2771. status = hw->phy.ops.read_i2c_eeprom(hw,
  2772. IXGBE_SFF_SFF_8472_COMP,
  2773. &sff8472_rev);
  2774. if (status)
  2775. return -EIO;
  2776. /* addressing mode is not supported */
  2777. status = hw->phy.ops.read_i2c_eeprom(hw,
  2778. IXGBE_SFF_SFF_8472_SWAP,
  2779. &addr_mode);
  2780. if (status)
  2781. return -EIO;
  2782. if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
  2783. e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
  2784. page_swap = true;
  2785. }
  2786. if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
  2787. !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
  2788. /* We have a SFP, but it does not support SFF-8472 */
  2789. modinfo->type = ETH_MODULE_SFF_8079;
  2790. modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
  2791. } else {
  2792. /* We have a SFP which supports a revision of SFF-8472. */
  2793. modinfo->type = ETH_MODULE_SFF_8472;
  2794. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  2795. }
  2796. return 0;
  2797. }
  2798. static int ixgbe_get_module_eeprom(struct net_device *dev,
  2799. struct ethtool_eeprom *ee,
  2800. u8 *data)
  2801. {
  2802. struct ixgbe_adapter *adapter = netdev_priv(dev);
  2803. struct ixgbe_hw *hw = &adapter->hw;
  2804. s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
  2805. u8 databyte = 0xFF;
  2806. int i = 0;
  2807. if (ee->len == 0)
  2808. return -EINVAL;
  2809. if (hw->phy.type == ixgbe_phy_fw)
  2810. return -ENXIO;
  2811. for (i = ee->offset; i < ee->offset + ee->len; i++) {
  2812. /* I2C reads can take long time */
  2813. if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  2814. return -EBUSY;
  2815. if (i < ETH_MODULE_SFF_8079_LEN)
  2816. status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
  2817. else
  2818. status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
  2819. if (status)
  2820. return -EIO;
  2821. data[i - ee->offset] = databyte;
  2822. }
  2823. return 0;
  2824. }
  2825. static const struct {
  2826. ixgbe_link_speed mac_speed;
  2827. u32 supported;
  2828. } ixgbe_ls_map[] = {
  2829. { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
  2830. { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
  2831. { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
  2832. { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
  2833. { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
  2834. };
  2835. static const struct {
  2836. u32 lp_advertised;
  2837. u32 mac_speed;
  2838. } ixgbe_lp_map[] = {
  2839. { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
  2840. { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
  2841. { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
  2842. { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
  2843. { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
  2844. { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
  2845. };
  2846. static int
  2847. ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
  2848. {
  2849. u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
  2850. struct ixgbe_hw *hw = &adapter->hw;
  2851. s32 rc;
  2852. u16 i;
  2853. rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
  2854. if (rc)
  2855. return rc;
  2856. edata->lp_advertised = 0;
  2857. for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
  2858. if (info[0] & ixgbe_lp_map[i].lp_advertised)
  2859. edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
  2860. }
  2861. edata->supported = 0;
  2862. for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
  2863. if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
  2864. edata->supported |= ixgbe_ls_map[i].supported;
  2865. }
  2866. edata->advertised = 0;
  2867. for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
  2868. if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
  2869. edata->advertised |= ixgbe_ls_map[i].supported;
  2870. }
  2871. edata->eee_enabled = !!edata->advertised;
  2872. edata->tx_lpi_enabled = edata->eee_enabled;
  2873. if (edata->advertised & edata->lp_advertised)
  2874. edata->eee_active = true;
  2875. return 0;
  2876. }
  2877. static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
  2878. {
  2879. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2880. struct ixgbe_hw *hw = &adapter->hw;
  2881. if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
  2882. return -EOPNOTSUPP;
  2883. if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
  2884. return ixgbe_get_eee_fw(adapter, edata);
  2885. return -EOPNOTSUPP;
  2886. }
  2887. static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
  2888. {
  2889. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2890. struct ixgbe_hw *hw = &adapter->hw;
  2891. struct ethtool_eee eee_data;
  2892. s32 ret_val;
  2893. if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
  2894. return -EOPNOTSUPP;
  2895. memset(&eee_data, 0, sizeof(struct ethtool_eee));
  2896. ret_val = ixgbe_get_eee(netdev, &eee_data);
  2897. if (ret_val)
  2898. return ret_val;
  2899. if (eee_data.eee_enabled && !edata->eee_enabled) {
  2900. if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
  2901. e_err(drv, "Setting EEE tx-lpi is not supported\n");
  2902. return -EINVAL;
  2903. }
  2904. if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
  2905. e_err(drv,
  2906. "Setting EEE Tx LPI timer is not supported\n");
  2907. return -EINVAL;
  2908. }
  2909. if (eee_data.advertised != edata->advertised) {
  2910. e_err(drv,
  2911. "Setting EEE advertised speeds is not supported\n");
  2912. return -EINVAL;
  2913. }
  2914. }
  2915. if (eee_data.eee_enabled != edata->eee_enabled) {
  2916. if (edata->eee_enabled) {
  2917. adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
  2918. hw->phy.eee_speeds_advertised =
  2919. hw->phy.eee_speeds_supported;
  2920. } else {
  2921. adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
  2922. hw->phy.eee_speeds_advertised = 0;
  2923. }
  2924. /* reset link */
  2925. if (netif_running(netdev))
  2926. ixgbe_reinit_locked(adapter);
  2927. else
  2928. ixgbe_reset(adapter);
  2929. }
  2930. return 0;
  2931. }
  2932. static u32 ixgbe_get_priv_flags(struct net_device *netdev)
  2933. {
  2934. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2935. u32 priv_flags = 0;
  2936. if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
  2937. priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
  2938. return priv_flags;
  2939. }
  2940. static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
  2941. {
  2942. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  2943. unsigned int flags2 = adapter->flags2;
  2944. flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
  2945. if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
  2946. flags2 |= IXGBE_FLAG2_RX_LEGACY;
  2947. if (flags2 != adapter->flags2) {
  2948. adapter->flags2 = flags2;
  2949. /* reset interface to repopulate queues */
  2950. if (netif_running(netdev))
  2951. ixgbe_reinit_locked(adapter);
  2952. }
  2953. return 0;
  2954. }
  2955. static const struct ethtool_ops ixgbe_ethtool_ops = {
  2956. .get_drvinfo = ixgbe_get_drvinfo,
  2957. .get_regs_len = ixgbe_get_regs_len,
  2958. .get_regs = ixgbe_get_regs,
  2959. .get_wol = ixgbe_get_wol,
  2960. .set_wol = ixgbe_set_wol,
  2961. .nway_reset = ixgbe_nway_reset,
  2962. .get_link = ethtool_op_get_link,
  2963. .get_eeprom_len = ixgbe_get_eeprom_len,
  2964. .get_eeprom = ixgbe_get_eeprom,
  2965. .set_eeprom = ixgbe_set_eeprom,
  2966. .get_ringparam = ixgbe_get_ringparam,
  2967. .set_ringparam = ixgbe_set_ringparam,
  2968. .get_pauseparam = ixgbe_get_pauseparam,
  2969. .set_pauseparam = ixgbe_set_pauseparam,
  2970. .get_msglevel = ixgbe_get_msglevel,
  2971. .set_msglevel = ixgbe_set_msglevel,
  2972. .self_test = ixgbe_diag_test,
  2973. .get_strings = ixgbe_get_strings,
  2974. .set_phys_id = ixgbe_set_phys_id,
  2975. .get_sset_count = ixgbe_get_sset_count,
  2976. .get_ethtool_stats = ixgbe_get_ethtool_stats,
  2977. .get_coalesce = ixgbe_get_coalesce,
  2978. .set_coalesce = ixgbe_set_coalesce,
  2979. .get_rxnfc = ixgbe_get_rxnfc,
  2980. .set_rxnfc = ixgbe_set_rxnfc,
  2981. .get_rxfh_indir_size = ixgbe_rss_indir_size,
  2982. .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
  2983. .get_rxfh = ixgbe_get_rxfh,
  2984. .set_rxfh = ixgbe_set_rxfh,
  2985. .get_eee = ixgbe_get_eee,
  2986. .set_eee = ixgbe_set_eee,
  2987. .get_channels = ixgbe_get_channels,
  2988. .set_channels = ixgbe_set_channels,
  2989. .get_priv_flags = ixgbe_get_priv_flags,
  2990. .set_priv_flags = ixgbe_set_priv_flags,
  2991. .get_ts_info = ixgbe_get_ts_info,
  2992. .get_module_info = ixgbe_get_module_info,
  2993. .get_module_eeprom = ixgbe_get_module_eeprom,
  2994. .get_link_ksettings = ixgbe_get_link_ksettings,
  2995. .set_link_ksettings = ixgbe_set_link_ksettings,
  2996. };
  2997. void ixgbe_set_ethtool_ops(struct net_device *netdev)
  2998. {
  2999. netdev->ethtool_ops = &ixgbe_ethtool_ops;
  3000. }