ixgb_main.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2008 Intel Corporation. */
  3. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  4. #include <linux/prefetch.h>
  5. #include "ixgb.h"
  6. char ixgb_driver_name[] = "ixgb";
  7. static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  8. #define DRIVERNAPI "-NAPI"
  9. #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
  10. const char ixgb_driver_version[] = DRV_VERSION;
  11. static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
  12. #define IXGB_CB_LENGTH 256
  13. static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
  14. module_param(copybreak, uint, 0644);
  15. MODULE_PARM_DESC(copybreak,
  16. "Maximum size of packet that is copied to a new buffer on receive");
  17. /* ixgb_pci_tbl - PCI Device ID Table
  18. *
  19. * Wildcard entries (PCI_ANY_ID) should come last
  20. * Last entry must be all 0s
  21. *
  22. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  23. * Class, Class Mask, private data (not used) }
  24. */
  25. static const struct pci_device_id ixgb_pci_tbl[] = {
  26. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
  27. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  28. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
  29. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  30. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
  31. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  32. {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
  33. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  34. /* required last entry */
  35. {0,}
  36. };
  37. MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  38. /* Local Function Prototypes */
  39. static int ixgb_init_module(void);
  40. static void ixgb_exit_module(void);
  41. static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  42. static void ixgb_remove(struct pci_dev *pdev);
  43. static int ixgb_sw_init(struct ixgb_adapter *adapter);
  44. static int ixgb_open(struct net_device *netdev);
  45. static int ixgb_close(struct net_device *netdev);
  46. static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  47. static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  48. static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  49. static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  50. static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  51. static void ixgb_set_multi(struct net_device *netdev);
  52. static void ixgb_watchdog(struct timer_list *t);
  53. static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
  54. struct net_device *netdev);
  55. static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  56. static int ixgb_set_mac(struct net_device *netdev, void *p);
  57. static irqreturn_t ixgb_intr(int irq, void *data);
  58. static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  59. static int ixgb_clean(struct napi_struct *, int);
  60. static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
  61. static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
  62. static void ixgb_tx_timeout(struct net_device *dev);
  63. static void ixgb_tx_timeout_task(struct work_struct *work);
  64. static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
  65. static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
  66. static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
  67. __be16 proto, u16 vid);
  68. static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
  69. __be16 proto, u16 vid);
  70. static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
  71. static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
  72. enum pci_channel_state state);
  73. static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
  74. static void ixgb_io_resume (struct pci_dev *pdev);
  75. static const struct pci_error_handlers ixgb_err_handler = {
  76. .error_detected = ixgb_io_error_detected,
  77. .slot_reset = ixgb_io_slot_reset,
  78. .resume = ixgb_io_resume,
  79. };
  80. static struct pci_driver ixgb_driver = {
  81. .name = ixgb_driver_name,
  82. .id_table = ixgb_pci_tbl,
  83. .probe = ixgb_probe,
  84. .remove = ixgb_remove,
  85. .err_handler = &ixgb_err_handler
  86. };
  87. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  88. MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
  89. MODULE_LICENSE("GPL");
  90. MODULE_VERSION(DRV_VERSION);
  91. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  92. static int debug = -1;
  93. module_param(debug, int, 0);
  94. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  95. /**
  96. * ixgb_init_module - Driver Registration Routine
  97. *
  98. * ixgb_init_module is the first routine called when the driver is
  99. * loaded. All it does is register with the PCI subsystem.
  100. **/
  101. static int __init
  102. ixgb_init_module(void)
  103. {
  104. pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
  105. pr_info("%s\n", ixgb_copyright);
  106. return pci_register_driver(&ixgb_driver);
  107. }
  108. module_init(ixgb_init_module);
  109. /**
  110. * ixgb_exit_module - Driver Exit Cleanup Routine
  111. *
  112. * ixgb_exit_module is called just before the driver is removed
  113. * from memory.
  114. **/
  115. static void __exit
  116. ixgb_exit_module(void)
  117. {
  118. pci_unregister_driver(&ixgb_driver);
  119. }
  120. module_exit(ixgb_exit_module);
  121. /**
  122. * ixgb_irq_disable - Mask off interrupt generation on the NIC
  123. * @adapter: board private structure
  124. **/
  125. static void
  126. ixgb_irq_disable(struct ixgb_adapter *adapter)
  127. {
  128. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  129. IXGB_WRITE_FLUSH(&adapter->hw);
  130. synchronize_irq(adapter->pdev->irq);
  131. }
  132. /**
  133. * ixgb_irq_enable - Enable default interrupt generation settings
  134. * @adapter: board private structure
  135. **/
  136. static void
  137. ixgb_irq_enable(struct ixgb_adapter *adapter)
  138. {
  139. u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
  140. IXGB_INT_TXDW | IXGB_INT_LSC;
  141. if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
  142. val |= IXGB_INT_GPI0;
  143. IXGB_WRITE_REG(&adapter->hw, IMS, val);
  144. IXGB_WRITE_FLUSH(&adapter->hw);
  145. }
  146. int
  147. ixgb_up(struct ixgb_adapter *adapter)
  148. {
  149. struct net_device *netdev = adapter->netdev;
  150. int err, irq_flags = IRQF_SHARED;
  151. int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  152. struct ixgb_hw *hw = &adapter->hw;
  153. /* hardware has been reset, we need to reload some things */
  154. ixgb_rar_set(hw, netdev->dev_addr, 0);
  155. ixgb_set_multi(netdev);
  156. ixgb_restore_vlan(adapter);
  157. ixgb_configure_tx(adapter);
  158. ixgb_setup_rctl(adapter);
  159. ixgb_configure_rx(adapter);
  160. ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
  161. /* disable interrupts and get the hardware into a known state */
  162. IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
  163. /* only enable MSI if bus is in PCI-X mode */
  164. if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
  165. err = pci_enable_msi(adapter->pdev);
  166. if (!err) {
  167. adapter->have_msi = true;
  168. irq_flags = 0;
  169. }
  170. /* proceed to try to request regular interrupt */
  171. }
  172. err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
  173. netdev->name, netdev);
  174. if (err) {
  175. if (adapter->have_msi)
  176. pci_disable_msi(adapter->pdev);
  177. netif_err(adapter, probe, adapter->netdev,
  178. "Unable to allocate interrupt Error: %d\n", err);
  179. return err;
  180. }
  181. if ((hw->max_frame_size != max_frame) ||
  182. (hw->max_frame_size !=
  183. (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
  184. hw->max_frame_size = max_frame;
  185. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  186. if (hw->max_frame_size >
  187. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  188. u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
  189. if (!(ctrl0 & IXGB_CTRL0_JFE)) {
  190. ctrl0 |= IXGB_CTRL0_JFE;
  191. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  192. }
  193. }
  194. }
  195. clear_bit(__IXGB_DOWN, &adapter->flags);
  196. napi_enable(&adapter->napi);
  197. ixgb_irq_enable(adapter);
  198. netif_wake_queue(netdev);
  199. mod_timer(&adapter->watchdog_timer, jiffies);
  200. return 0;
  201. }
  202. void
  203. ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
  204. {
  205. struct net_device *netdev = adapter->netdev;
  206. /* prevent the interrupt handler from restarting watchdog */
  207. set_bit(__IXGB_DOWN, &adapter->flags);
  208. netif_carrier_off(netdev);
  209. napi_disable(&adapter->napi);
  210. /* waiting for NAPI to complete can re-enable interrupts */
  211. ixgb_irq_disable(adapter);
  212. free_irq(adapter->pdev->irq, netdev);
  213. if (adapter->have_msi)
  214. pci_disable_msi(adapter->pdev);
  215. if (kill_watchdog)
  216. del_timer_sync(&adapter->watchdog_timer);
  217. adapter->link_speed = 0;
  218. adapter->link_duplex = 0;
  219. netif_stop_queue(netdev);
  220. ixgb_reset(adapter);
  221. ixgb_clean_tx_ring(adapter);
  222. ixgb_clean_rx_ring(adapter);
  223. }
  224. void
  225. ixgb_reset(struct ixgb_adapter *adapter)
  226. {
  227. struct ixgb_hw *hw = &adapter->hw;
  228. ixgb_adapter_stop(hw);
  229. if (!ixgb_init_hw(hw))
  230. netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
  231. /* restore frame size information */
  232. IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
  233. if (hw->max_frame_size >
  234. IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
  235. u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
  236. if (!(ctrl0 & IXGB_CTRL0_JFE)) {
  237. ctrl0 |= IXGB_CTRL0_JFE;
  238. IXGB_WRITE_REG(hw, CTRL0, ctrl0);
  239. }
  240. }
  241. }
  242. static netdev_features_t
  243. ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
  244. {
  245. /*
  246. * Tx VLAN insertion does not work per HW design when Rx stripping is
  247. * disabled.
  248. */
  249. if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
  250. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  251. return features;
  252. }
  253. static int
  254. ixgb_set_features(struct net_device *netdev, netdev_features_t features)
  255. {
  256. struct ixgb_adapter *adapter = netdev_priv(netdev);
  257. netdev_features_t changed = features ^ netdev->features;
  258. if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
  259. return 0;
  260. adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
  261. if (netif_running(netdev)) {
  262. ixgb_down(adapter, true);
  263. ixgb_up(adapter);
  264. ixgb_set_speed_duplex(netdev);
  265. } else
  266. ixgb_reset(adapter);
  267. return 0;
  268. }
  269. static const struct net_device_ops ixgb_netdev_ops = {
  270. .ndo_open = ixgb_open,
  271. .ndo_stop = ixgb_close,
  272. .ndo_start_xmit = ixgb_xmit_frame,
  273. .ndo_set_rx_mode = ixgb_set_multi,
  274. .ndo_validate_addr = eth_validate_addr,
  275. .ndo_set_mac_address = ixgb_set_mac,
  276. .ndo_change_mtu = ixgb_change_mtu,
  277. .ndo_tx_timeout = ixgb_tx_timeout,
  278. .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
  279. .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
  280. .ndo_fix_features = ixgb_fix_features,
  281. .ndo_set_features = ixgb_set_features,
  282. };
  283. /**
  284. * ixgb_probe - Device Initialization Routine
  285. * @pdev: PCI device information struct
  286. * @ent: entry in ixgb_pci_tbl
  287. *
  288. * Returns 0 on success, negative on failure
  289. *
  290. * ixgb_probe initializes an adapter identified by a pci_dev structure.
  291. * The OS initialization, configuring of the adapter private structure,
  292. * and a hardware reset occur.
  293. **/
  294. static int
  295. ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  296. {
  297. struct net_device *netdev = NULL;
  298. struct ixgb_adapter *adapter;
  299. static int cards_found = 0;
  300. int pci_using_dac;
  301. int i;
  302. int err;
  303. err = pci_enable_device(pdev);
  304. if (err)
  305. return err;
  306. pci_using_dac = 0;
  307. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  308. if (!err) {
  309. pci_using_dac = 1;
  310. } else {
  311. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  312. if (err) {
  313. pr_err("No usable DMA configuration, aborting\n");
  314. goto err_dma_mask;
  315. }
  316. }
  317. err = pci_request_regions(pdev, ixgb_driver_name);
  318. if (err)
  319. goto err_request_regions;
  320. pci_set_master(pdev);
  321. netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
  322. if (!netdev) {
  323. err = -ENOMEM;
  324. goto err_alloc_etherdev;
  325. }
  326. SET_NETDEV_DEV(netdev, &pdev->dev);
  327. pci_set_drvdata(pdev, netdev);
  328. adapter = netdev_priv(netdev);
  329. adapter->netdev = netdev;
  330. adapter->pdev = pdev;
  331. adapter->hw.back = adapter;
  332. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  333. adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
  334. if (!adapter->hw.hw_addr) {
  335. err = -EIO;
  336. goto err_ioremap;
  337. }
  338. for (i = BAR_1; i <= BAR_5; i++) {
  339. if (pci_resource_len(pdev, i) == 0)
  340. continue;
  341. if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  342. adapter->hw.io_base = pci_resource_start(pdev, i);
  343. break;
  344. }
  345. }
  346. netdev->netdev_ops = &ixgb_netdev_ops;
  347. ixgb_set_ethtool_ops(netdev);
  348. netdev->watchdog_timeo = 5 * HZ;
  349. netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
  350. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  351. adapter->bd_number = cards_found;
  352. adapter->link_speed = 0;
  353. adapter->link_duplex = 0;
  354. /* setup the private structure */
  355. err = ixgb_sw_init(adapter);
  356. if (err)
  357. goto err_sw_init;
  358. netdev->hw_features = NETIF_F_SG |
  359. NETIF_F_TSO |
  360. NETIF_F_HW_CSUM |
  361. NETIF_F_HW_VLAN_CTAG_TX |
  362. NETIF_F_HW_VLAN_CTAG_RX;
  363. netdev->features = netdev->hw_features |
  364. NETIF_F_HW_VLAN_CTAG_FILTER;
  365. netdev->hw_features |= NETIF_F_RXCSUM;
  366. if (pci_using_dac) {
  367. netdev->features |= NETIF_F_HIGHDMA;
  368. netdev->vlan_features |= NETIF_F_HIGHDMA;
  369. }
  370. /* MTU range: 68 - 16114 */
  371. netdev->min_mtu = ETH_MIN_MTU;
  372. netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
  373. /* make sure the EEPROM is good */
  374. if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  375. netif_err(adapter, probe, adapter->netdev,
  376. "The EEPROM Checksum Is Not Valid\n");
  377. err = -EIO;
  378. goto err_eeprom;
  379. }
  380. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  381. if (!is_valid_ether_addr(netdev->dev_addr)) {
  382. netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
  383. err = -EIO;
  384. goto err_eeprom;
  385. }
  386. adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
  387. timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
  388. INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
  389. strcpy(netdev->name, "eth%d");
  390. err = register_netdev(netdev);
  391. if (err)
  392. goto err_register;
  393. /* carrier off reporting is important to ethtool even BEFORE open */
  394. netif_carrier_off(netdev);
  395. netif_info(adapter, probe, adapter->netdev,
  396. "Intel(R) PRO/10GbE Network Connection\n");
  397. ixgb_check_options(adapter);
  398. /* reset the hardware with the new settings */
  399. ixgb_reset(adapter);
  400. cards_found++;
  401. return 0;
  402. err_register:
  403. err_sw_init:
  404. err_eeprom:
  405. iounmap(adapter->hw.hw_addr);
  406. err_ioremap:
  407. free_netdev(netdev);
  408. err_alloc_etherdev:
  409. pci_release_regions(pdev);
  410. err_request_regions:
  411. err_dma_mask:
  412. pci_disable_device(pdev);
  413. return err;
  414. }
  415. /**
  416. * ixgb_remove - Device Removal Routine
  417. * @pdev: PCI device information struct
  418. *
  419. * ixgb_remove is called by the PCI subsystem to alert the driver
  420. * that it should release a PCI device. The could be caused by a
  421. * Hot-Plug event, or because the driver is going to be removed from
  422. * memory.
  423. **/
  424. static void
  425. ixgb_remove(struct pci_dev *pdev)
  426. {
  427. struct net_device *netdev = pci_get_drvdata(pdev);
  428. struct ixgb_adapter *adapter = netdev_priv(netdev);
  429. cancel_work_sync(&adapter->tx_timeout_task);
  430. unregister_netdev(netdev);
  431. iounmap(adapter->hw.hw_addr);
  432. pci_release_regions(pdev);
  433. free_netdev(netdev);
  434. pci_disable_device(pdev);
  435. }
  436. /**
  437. * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
  438. * @adapter: board private structure to initialize
  439. *
  440. * ixgb_sw_init initializes the Adapter private data structure.
  441. * Fields are initialized based on PCI device information and
  442. * OS network device settings (MTU size).
  443. **/
  444. static int
  445. ixgb_sw_init(struct ixgb_adapter *adapter)
  446. {
  447. struct ixgb_hw *hw = &adapter->hw;
  448. struct net_device *netdev = adapter->netdev;
  449. struct pci_dev *pdev = adapter->pdev;
  450. /* PCI config space info */
  451. hw->vendor_id = pdev->vendor;
  452. hw->device_id = pdev->device;
  453. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  454. hw->subsystem_id = pdev->subsystem_device;
  455. hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  456. adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
  457. if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
  458. (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
  459. (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
  460. (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
  461. hw->mac_type = ixgb_82597;
  462. else {
  463. /* should never have loaded on this device */
  464. netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
  465. }
  466. /* enable flow control to be programmed */
  467. hw->fc.send_xon = 1;
  468. set_bit(__IXGB_DOWN, &adapter->flags);
  469. return 0;
  470. }
  471. /**
  472. * ixgb_open - Called when a network interface is made active
  473. * @netdev: network interface device structure
  474. *
  475. * Returns 0 on success, negative value on failure
  476. *
  477. * The open entry point is called when a network interface is made
  478. * active by the system (IFF_UP). At this point all resources needed
  479. * for transmit and receive operations are allocated, the interrupt
  480. * handler is registered with the OS, the watchdog timer is started,
  481. * and the stack is notified that the interface is ready.
  482. **/
  483. static int
  484. ixgb_open(struct net_device *netdev)
  485. {
  486. struct ixgb_adapter *adapter = netdev_priv(netdev);
  487. int err;
  488. /* allocate transmit descriptors */
  489. err = ixgb_setup_tx_resources(adapter);
  490. if (err)
  491. goto err_setup_tx;
  492. netif_carrier_off(netdev);
  493. /* allocate receive descriptors */
  494. err = ixgb_setup_rx_resources(adapter);
  495. if (err)
  496. goto err_setup_rx;
  497. err = ixgb_up(adapter);
  498. if (err)
  499. goto err_up;
  500. netif_start_queue(netdev);
  501. return 0;
  502. err_up:
  503. ixgb_free_rx_resources(adapter);
  504. err_setup_rx:
  505. ixgb_free_tx_resources(adapter);
  506. err_setup_tx:
  507. ixgb_reset(adapter);
  508. return err;
  509. }
  510. /**
  511. * ixgb_close - Disables a network interface
  512. * @netdev: network interface device structure
  513. *
  514. * Returns 0, this is not allowed to fail
  515. *
  516. * The close entry point is called when an interface is de-activated
  517. * by the OS. The hardware is still under the drivers control, but
  518. * needs to be disabled. A global MAC reset is issued to stop the
  519. * hardware, and all transmit and receive resources are freed.
  520. **/
  521. static int
  522. ixgb_close(struct net_device *netdev)
  523. {
  524. struct ixgb_adapter *adapter = netdev_priv(netdev);
  525. ixgb_down(adapter, true);
  526. ixgb_free_tx_resources(adapter);
  527. ixgb_free_rx_resources(adapter);
  528. return 0;
  529. }
  530. /**
  531. * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
  532. * @adapter: board private structure
  533. *
  534. * Return 0 on success, negative on failure
  535. **/
  536. int
  537. ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
  538. {
  539. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  540. struct pci_dev *pdev = adapter->pdev;
  541. int size;
  542. size = sizeof(struct ixgb_buffer) * txdr->count;
  543. txdr->buffer_info = vzalloc(size);
  544. if (!txdr->buffer_info)
  545. return -ENOMEM;
  546. /* round up to nearest 4K */
  547. txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
  548. txdr->size = ALIGN(txdr->size, 4096);
  549. txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
  550. GFP_KERNEL);
  551. if (!txdr->desc) {
  552. vfree(txdr->buffer_info);
  553. return -ENOMEM;
  554. }
  555. txdr->next_to_use = 0;
  556. txdr->next_to_clean = 0;
  557. return 0;
  558. }
  559. /**
  560. * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
  561. * @adapter: board private structure
  562. *
  563. * Configure the Tx unit of the MAC after a reset.
  564. **/
  565. static void
  566. ixgb_configure_tx(struct ixgb_adapter *adapter)
  567. {
  568. u64 tdba = adapter->tx_ring.dma;
  569. u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
  570. u32 tctl;
  571. struct ixgb_hw *hw = &adapter->hw;
  572. /* Setup the Base and Length of the Tx Descriptor Ring
  573. * tx_ring.dma can be either a 32 or 64 bit value
  574. */
  575. IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  576. IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
  577. IXGB_WRITE_REG(hw, TDLEN, tdlen);
  578. /* Setup the HW Tx Head and Tail descriptor pointers */
  579. IXGB_WRITE_REG(hw, TDH, 0);
  580. IXGB_WRITE_REG(hw, TDT, 0);
  581. /* don't set up txdctl, it induces performance problems if configured
  582. * incorrectly */
  583. /* Set the Tx Interrupt Delay register */
  584. IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  585. /* Program the Transmit Control Register */
  586. tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
  587. IXGB_WRITE_REG(hw, TCTL, tctl);
  588. /* Setup Transmit Descriptor Settings for this adapter */
  589. adapter->tx_cmd_type =
  590. IXGB_TX_DESC_TYPE |
  591. (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
  592. }
  593. /**
  594. * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
  595. * @adapter: board private structure
  596. *
  597. * Returns 0 on success, negative on failure
  598. **/
  599. int
  600. ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
  601. {
  602. struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
  603. struct pci_dev *pdev = adapter->pdev;
  604. int size;
  605. size = sizeof(struct ixgb_buffer) * rxdr->count;
  606. rxdr->buffer_info = vzalloc(size);
  607. if (!rxdr->buffer_info)
  608. return -ENOMEM;
  609. /* Round up to nearest 4K */
  610. rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
  611. rxdr->size = ALIGN(rxdr->size, 4096);
  612. rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
  613. GFP_KERNEL);
  614. if (!rxdr->desc) {
  615. vfree(rxdr->buffer_info);
  616. return -ENOMEM;
  617. }
  618. rxdr->next_to_clean = 0;
  619. rxdr->next_to_use = 0;
  620. return 0;
  621. }
  622. /**
  623. * ixgb_setup_rctl - configure the receive control register
  624. * @adapter: Board private structure
  625. **/
  626. static void
  627. ixgb_setup_rctl(struct ixgb_adapter *adapter)
  628. {
  629. u32 rctl;
  630. rctl = IXGB_READ_REG(&adapter->hw, RCTL);
  631. rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
  632. rctl |=
  633. IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
  634. IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
  635. (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
  636. rctl |= IXGB_RCTL_SECRC;
  637. if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
  638. rctl |= IXGB_RCTL_BSIZE_2048;
  639. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
  640. rctl |= IXGB_RCTL_BSIZE_4096;
  641. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
  642. rctl |= IXGB_RCTL_BSIZE_8192;
  643. else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
  644. rctl |= IXGB_RCTL_BSIZE_16384;
  645. IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
  646. }
  647. /**
  648. * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
  649. * @adapter: board private structure
  650. *
  651. * Configure the Rx unit of the MAC after a reset.
  652. **/
  653. static void
  654. ixgb_configure_rx(struct ixgb_adapter *adapter)
  655. {
  656. u64 rdba = adapter->rx_ring.dma;
  657. u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
  658. struct ixgb_hw *hw = &adapter->hw;
  659. u32 rctl;
  660. u32 rxcsum;
  661. /* make sure receives are disabled while setting up the descriptors */
  662. rctl = IXGB_READ_REG(hw, RCTL);
  663. IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
  664. /* set the Receive Delay Timer Register */
  665. IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  666. /* Setup the Base and Length of the Rx Descriptor Ring */
  667. IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  668. IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
  669. IXGB_WRITE_REG(hw, RDLEN, rdlen);
  670. /* Setup the HW Rx Head and Tail Descriptor Pointers */
  671. IXGB_WRITE_REG(hw, RDH, 0);
  672. IXGB_WRITE_REG(hw, RDT, 0);
  673. /* due to the hardware errata with RXDCTL, we are unable to use any of
  674. * the performance enhancing features of it without causing other
  675. * subtle bugs, some of the bugs could include receive length
  676. * corruption at high data rates (WTHRESH > 0) and/or receive
  677. * descriptor ring irregularites (particularly in hardware cache) */
  678. IXGB_WRITE_REG(hw, RXDCTL, 0);
  679. /* Enable Receive Checksum Offload for TCP and UDP */
  680. if (adapter->rx_csum) {
  681. rxcsum = IXGB_READ_REG(hw, RXCSUM);
  682. rxcsum |= IXGB_RXCSUM_TUOFL;
  683. IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
  684. }
  685. /* Enable Receives */
  686. IXGB_WRITE_REG(hw, RCTL, rctl);
  687. }
  688. /**
  689. * ixgb_free_tx_resources - Free Tx Resources
  690. * @adapter: board private structure
  691. *
  692. * Free all transmit software resources
  693. **/
  694. void
  695. ixgb_free_tx_resources(struct ixgb_adapter *adapter)
  696. {
  697. struct pci_dev *pdev = adapter->pdev;
  698. ixgb_clean_tx_ring(adapter);
  699. vfree(adapter->tx_ring.buffer_info);
  700. adapter->tx_ring.buffer_info = NULL;
  701. dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
  702. adapter->tx_ring.desc, adapter->tx_ring.dma);
  703. adapter->tx_ring.desc = NULL;
  704. }
  705. static void
  706. ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
  707. struct ixgb_buffer *buffer_info)
  708. {
  709. if (buffer_info->dma) {
  710. if (buffer_info->mapped_as_page)
  711. dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
  712. buffer_info->length, DMA_TO_DEVICE);
  713. else
  714. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  715. buffer_info->length, DMA_TO_DEVICE);
  716. buffer_info->dma = 0;
  717. }
  718. if (buffer_info->skb) {
  719. dev_kfree_skb_any(buffer_info->skb);
  720. buffer_info->skb = NULL;
  721. }
  722. buffer_info->time_stamp = 0;
  723. /* these fields must always be initialized in tx
  724. * buffer_info->length = 0;
  725. * buffer_info->next_to_watch = 0; */
  726. }
  727. /**
  728. * ixgb_clean_tx_ring - Free Tx Buffers
  729. * @adapter: board private structure
  730. **/
  731. static void
  732. ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
  733. {
  734. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  735. struct ixgb_buffer *buffer_info;
  736. unsigned long size;
  737. unsigned int i;
  738. /* Free all the Tx ring sk_buffs */
  739. for (i = 0; i < tx_ring->count; i++) {
  740. buffer_info = &tx_ring->buffer_info[i];
  741. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  742. }
  743. size = sizeof(struct ixgb_buffer) * tx_ring->count;
  744. memset(tx_ring->buffer_info, 0, size);
  745. /* Zero out the descriptor ring */
  746. memset(tx_ring->desc, 0, tx_ring->size);
  747. tx_ring->next_to_use = 0;
  748. tx_ring->next_to_clean = 0;
  749. IXGB_WRITE_REG(&adapter->hw, TDH, 0);
  750. IXGB_WRITE_REG(&adapter->hw, TDT, 0);
  751. }
  752. /**
  753. * ixgb_free_rx_resources - Free Rx Resources
  754. * @adapter: board private structure
  755. *
  756. * Free all receive software resources
  757. **/
  758. void
  759. ixgb_free_rx_resources(struct ixgb_adapter *adapter)
  760. {
  761. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  762. struct pci_dev *pdev = adapter->pdev;
  763. ixgb_clean_rx_ring(adapter);
  764. vfree(rx_ring->buffer_info);
  765. rx_ring->buffer_info = NULL;
  766. dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
  767. rx_ring->dma);
  768. rx_ring->desc = NULL;
  769. }
  770. /**
  771. * ixgb_clean_rx_ring - Free Rx Buffers
  772. * @adapter: board private structure
  773. **/
  774. static void
  775. ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
  776. {
  777. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  778. struct ixgb_buffer *buffer_info;
  779. struct pci_dev *pdev = adapter->pdev;
  780. unsigned long size;
  781. unsigned int i;
  782. /* Free all the Rx ring sk_buffs */
  783. for (i = 0; i < rx_ring->count; i++) {
  784. buffer_info = &rx_ring->buffer_info[i];
  785. if (buffer_info->dma) {
  786. dma_unmap_single(&pdev->dev,
  787. buffer_info->dma,
  788. buffer_info->length,
  789. DMA_FROM_DEVICE);
  790. buffer_info->dma = 0;
  791. buffer_info->length = 0;
  792. }
  793. if (buffer_info->skb) {
  794. dev_kfree_skb(buffer_info->skb);
  795. buffer_info->skb = NULL;
  796. }
  797. }
  798. size = sizeof(struct ixgb_buffer) * rx_ring->count;
  799. memset(rx_ring->buffer_info, 0, size);
  800. /* Zero out the descriptor ring */
  801. memset(rx_ring->desc, 0, rx_ring->size);
  802. rx_ring->next_to_clean = 0;
  803. rx_ring->next_to_use = 0;
  804. IXGB_WRITE_REG(&adapter->hw, RDH, 0);
  805. IXGB_WRITE_REG(&adapter->hw, RDT, 0);
  806. }
  807. /**
  808. * ixgb_set_mac - Change the Ethernet Address of the NIC
  809. * @netdev: network interface device structure
  810. * @p: pointer to an address structure
  811. *
  812. * Returns 0 on success, negative on failure
  813. **/
  814. static int
  815. ixgb_set_mac(struct net_device *netdev, void *p)
  816. {
  817. struct ixgb_adapter *adapter = netdev_priv(netdev);
  818. struct sockaddr *addr = p;
  819. if (!is_valid_ether_addr(addr->sa_data))
  820. return -EADDRNOTAVAIL;
  821. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  822. ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
  823. return 0;
  824. }
  825. /**
  826. * ixgb_set_multi - Multicast and Promiscuous mode set
  827. * @netdev: network interface device structure
  828. *
  829. * The set_multi entry point is called whenever the multicast address
  830. * list or the network interface flags are updated. This routine is
  831. * responsible for configuring the hardware for proper multicast,
  832. * promiscuous mode, and all-multi behavior.
  833. **/
  834. static void
  835. ixgb_set_multi(struct net_device *netdev)
  836. {
  837. struct ixgb_adapter *adapter = netdev_priv(netdev);
  838. struct ixgb_hw *hw = &adapter->hw;
  839. struct netdev_hw_addr *ha;
  840. u32 rctl;
  841. /* Check for Promiscuous and All Multicast modes */
  842. rctl = IXGB_READ_REG(hw, RCTL);
  843. if (netdev->flags & IFF_PROMISC) {
  844. rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  845. /* disable VLAN filtering */
  846. rctl &= ~IXGB_RCTL_CFIEN;
  847. rctl &= ~IXGB_RCTL_VFE;
  848. } else {
  849. if (netdev->flags & IFF_ALLMULTI) {
  850. rctl |= IXGB_RCTL_MPE;
  851. rctl &= ~IXGB_RCTL_UPE;
  852. } else {
  853. rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
  854. }
  855. /* enable VLAN filtering */
  856. rctl |= IXGB_RCTL_VFE;
  857. rctl &= ~IXGB_RCTL_CFIEN;
  858. }
  859. if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
  860. rctl |= IXGB_RCTL_MPE;
  861. IXGB_WRITE_REG(hw, RCTL, rctl);
  862. } else {
  863. u8 *mta = kmalloc_array(ETH_ALEN,
  864. IXGB_MAX_NUM_MULTICAST_ADDRESSES,
  865. GFP_ATOMIC);
  866. u8 *addr;
  867. if (!mta)
  868. goto alloc_failed;
  869. IXGB_WRITE_REG(hw, RCTL, rctl);
  870. addr = mta;
  871. netdev_for_each_mc_addr(ha, netdev) {
  872. memcpy(addr, ha->addr, ETH_ALEN);
  873. addr += ETH_ALEN;
  874. }
  875. ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
  876. kfree(mta);
  877. }
  878. alloc_failed:
  879. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  880. ixgb_vlan_strip_enable(adapter);
  881. else
  882. ixgb_vlan_strip_disable(adapter);
  883. }
  884. /**
  885. * ixgb_watchdog - Timer Call-back
  886. * @data: pointer to netdev cast into an unsigned long
  887. **/
  888. static void
  889. ixgb_watchdog(struct timer_list *t)
  890. {
  891. struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
  892. struct net_device *netdev = adapter->netdev;
  893. struct ixgb_desc_ring *txdr = &adapter->tx_ring;
  894. ixgb_check_for_link(&adapter->hw);
  895. if (ixgb_check_for_bad_link(&adapter->hw)) {
  896. /* force the reset path */
  897. netif_stop_queue(netdev);
  898. }
  899. if (adapter->hw.link_up) {
  900. if (!netif_carrier_ok(netdev)) {
  901. netdev_info(netdev,
  902. "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
  903. (adapter->hw.fc.type == ixgb_fc_full) ?
  904. "RX/TX" :
  905. (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
  906. "RX" :
  907. (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
  908. "TX" : "None");
  909. adapter->link_speed = 10000;
  910. adapter->link_duplex = FULL_DUPLEX;
  911. netif_carrier_on(netdev);
  912. }
  913. } else {
  914. if (netif_carrier_ok(netdev)) {
  915. adapter->link_speed = 0;
  916. adapter->link_duplex = 0;
  917. netdev_info(netdev, "NIC Link is Down\n");
  918. netif_carrier_off(netdev);
  919. }
  920. }
  921. ixgb_update_stats(adapter);
  922. if (!netif_carrier_ok(netdev)) {
  923. if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
  924. /* We've lost link, so the controller stops DMA,
  925. * but we've got queued Tx work that's never going
  926. * to get done, so reset controller to flush Tx.
  927. * (Do the reset outside of interrupt context). */
  928. schedule_work(&adapter->tx_timeout_task);
  929. /* return immediately since reset is imminent */
  930. return;
  931. }
  932. }
  933. /* Force detection of hung controller every watchdog period */
  934. adapter->detect_tx_hung = true;
  935. /* generate an interrupt to force clean up of any stragglers */
  936. IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
  937. /* Reset the timer */
  938. mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  939. }
  940. #define IXGB_TX_FLAGS_CSUM 0x00000001
  941. #define IXGB_TX_FLAGS_VLAN 0x00000002
  942. #define IXGB_TX_FLAGS_TSO 0x00000004
  943. static int
  944. ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
  945. {
  946. struct ixgb_context_desc *context_desc;
  947. unsigned int i;
  948. u8 ipcss, ipcso, tucss, tucso, hdr_len;
  949. u16 ipcse, tucse, mss;
  950. if (likely(skb_is_gso(skb))) {
  951. struct ixgb_buffer *buffer_info;
  952. struct iphdr *iph;
  953. int err;
  954. err = skb_cow_head(skb, 0);
  955. if (err < 0)
  956. return err;
  957. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  958. mss = skb_shinfo(skb)->gso_size;
  959. iph = ip_hdr(skb);
  960. iph->tot_len = 0;
  961. iph->check = 0;
  962. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  963. iph->daddr, 0,
  964. IPPROTO_TCP, 0);
  965. ipcss = skb_network_offset(skb);
  966. ipcso = (void *)&(iph->check) - (void *)skb->data;
  967. ipcse = skb_transport_offset(skb) - 1;
  968. tucss = skb_transport_offset(skb);
  969. tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
  970. tucse = 0;
  971. i = adapter->tx_ring.next_to_use;
  972. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  973. buffer_info = &adapter->tx_ring.buffer_info[i];
  974. WARN_ON(buffer_info->dma != 0);
  975. context_desc->ipcss = ipcss;
  976. context_desc->ipcso = ipcso;
  977. context_desc->ipcse = cpu_to_le16(ipcse);
  978. context_desc->tucss = tucss;
  979. context_desc->tucso = tucso;
  980. context_desc->tucse = cpu_to_le16(tucse);
  981. context_desc->mss = cpu_to_le16(mss);
  982. context_desc->hdr_len = hdr_len;
  983. context_desc->status = 0;
  984. context_desc->cmd_type_len = cpu_to_le32(
  985. IXGB_CONTEXT_DESC_TYPE
  986. | IXGB_CONTEXT_DESC_CMD_TSE
  987. | IXGB_CONTEXT_DESC_CMD_IP
  988. | IXGB_CONTEXT_DESC_CMD_TCP
  989. | IXGB_CONTEXT_DESC_CMD_IDE
  990. | (skb->len - (hdr_len)));
  991. if (++i == adapter->tx_ring.count) i = 0;
  992. adapter->tx_ring.next_to_use = i;
  993. return 1;
  994. }
  995. return 0;
  996. }
  997. static bool
  998. ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
  999. {
  1000. struct ixgb_context_desc *context_desc;
  1001. unsigned int i;
  1002. u8 css, cso;
  1003. if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  1004. struct ixgb_buffer *buffer_info;
  1005. css = skb_checksum_start_offset(skb);
  1006. cso = css + skb->csum_offset;
  1007. i = adapter->tx_ring.next_to_use;
  1008. context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
  1009. buffer_info = &adapter->tx_ring.buffer_info[i];
  1010. WARN_ON(buffer_info->dma != 0);
  1011. context_desc->tucss = css;
  1012. context_desc->tucso = cso;
  1013. context_desc->tucse = 0;
  1014. /* zero out any previously existing data in one instruction */
  1015. *(u32 *)&(context_desc->ipcss) = 0;
  1016. context_desc->status = 0;
  1017. context_desc->hdr_len = 0;
  1018. context_desc->mss = 0;
  1019. context_desc->cmd_type_len =
  1020. cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
  1021. | IXGB_TX_DESC_CMD_IDE);
  1022. if (++i == adapter->tx_ring.count) i = 0;
  1023. adapter->tx_ring.next_to_use = i;
  1024. return true;
  1025. }
  1026. return false;
  1027. }
  1028. #define IXGB_MAX_TXD_PWR 14
  1029. #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
  1030. static int
  1031. ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
  1032. unsigned int first)
  1033. {
  1034. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1035. struct pci_dev *pdev = adapter->pdev;
  1036. struct ixgb_buffer *buffer_info;
  1037. int len = skb_headlen(skb);
  1038. unsigned int offset = 0, size, count = 0, i;
  1039. unsigned int mss = skb_shinfo(skb)->gso_size;
  1040. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  1041. unsigned int f;
  1042. i = tx_ring->next_to_use;
  1043. while (len) {
  1044. buffer_info = &tx_ring->buffer_info[i];
  1045. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1046. /* Workaround for premature desc write-backs
  1047. * in TSO mode. Append 4-byte sentinel desc */
  1048. if (unlikely(mss && !nr_frags && size == len && size > 8))
  1049. size -= 4;
  1050. buffer_info->length = size;
  1051. WARN_ON(buffer_info->dma != 0);
  1052. buffer_info->time_stamp = jiffies;
  1053. buffer_info->mapped_as_page = false;
  1054. buffer_info->dma = dma_map_single(&pdev->dev,
  1055. skb->data + offset,
  1056. size, DMA_TO_DEVICE);
  1057. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  1058. goto dma_error;
  1059. buffer_info->next_to_watch = 0;
  1060. len -= size;
  1061. offset += size;
  1062. count++;
  1063. if (len) {
  1064. i++;
  1065. if (i == tx_ring->count)
  1066. i = 0;
  1067. }
  1068. }
  1069. for (f = 0; f < nr_frags; f++) {
  1070. const struct skb_frag_struct *frag;
  1071. frag = &skb_shinfo(skb)->frags[f];
  1072. len = skb_frag_size(frag);
  1073. offset = 0;
  1074. while (len) {
  1075. i++;
  1076. if (i == tx_ring->count)
  1077. i = 0;
  1078. buffer_info = &tx_ring->buffer_info[i];
  1079. size = min(len, IXGB_MAX_DATA_PER_TXD);
  1080. /* Workaround for premature desc write-backs
  1081. * in TSO mode. Append 4-byte sentinel desc */
  1082. if (unlikely(mss && (f == (nr_frags - 1))
  1083. && size == len && size > 8))
  1084. size -= 4;
  1085. buffer_info->length = size;
  1086. buffer_info->time_stamp = jiffies;
  1087. buffer_info->mapped_as_page = true;
  1088. buffer_info->dma =
  1089. skb_frag_dma_map(&pdev->dev, frag, offset, size,
  1090. DMA_TO_DEVICE);
  1091. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  1092. goto dma_error;
  1093. buffer_info->next_to_watch = 0;
  1094. len -= size;
  1095. offset += size;
  1096. count++;
  1097. }
  1098. }
  1099. tx_ring->buffer_info[i].skb = skb;
  1100. tx_ring->buffer_info[first].next_to_watch = i;
  1101. return count;
  1102. dma_error:
  1103. dev_err(&pdev->dev, "TX DMA map failed\n");
  1104. buffer_info->dma = 0;
  1105. if (count)
  1106. count--;
  1107. while (count--) {
  1108. if (i==0)
  1109. i += tx_ring->count;
  1110. i--;
  1111. buffer_info = &tx_ring->buffer_info[i];
  1112. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  1113. }
  1114. return 0;
  1115. }
  1116. static void
  1117. ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
  1118. {
  1119. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1120. struct ixgb_tx_desc *tx_desc = NULL;
  1121. struct ixgb_buffer *buffer_info;
  1122. u32 cmd_type_len = adapter->tx_cmd_type;
  1123. u8 status = 0;
  1124. u8 popts = 0;
  1125. unsigned int i;
  1126. if (tx_flags & IXGB_TX_FLAGS_TSO) {
  1127. cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
  1128. popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
  1129. }
  1130. if (tx_flags & IXGB_TX_FLAGS_CSUM)
  1131. popts |= IXGB_TX_DESC_POPTS_TXSM;
  1132. if (tx_flags & IXGB_TX_FLAGS_VLAN)
  1133. cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
  1134. i = tx_ring->next_to_use;
  1135. while (count--) {
  1136. buffer_info = &tx_ring->buffer_info[i];
  1137. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1138. tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1139. tx_desc->cmd_type_len =
  1140. cpu_to_le32(cmd_type_len | buffer_info->length);
  1141. tx_desc->status = status;
  1142. tx_desc->popts = popts;
  1143. tx_desc->vlan = cpu_to_le16(vlan_id);
  1144. if (++i == tx_ring->count) i = 0;
  1145. }
  1146. tx_desc->cmd_type_len |=
  1147. cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
  1148. /* Force memory writes to complete before letting h/w
  1149. * know there are new descriptors to fetch. (Only
  1150. * applicable for weak-ordered memory model archs,
  1151. * such as IA-64). */
  1152. wmb();
  1153. tx_ring->next_to_use = i;
  1154. IXGB_WRITE_REG(&adapter->hw, TDT, i);
  1155. }
  1156. static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
  1157. {
  1158. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1159. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1160. netif_stop_queue(netdev);
  1161. /* Herbert's original patch had:
  1162. * smp_mb__after_netif_stop_queue();
  1163. * but since that doesn't exist yet, just open code it. */
  1164. smp_mb();
  1165. /* We need to check again in a case another CPU has just
  1166. * made room available. */
  1167. if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
  1168. return -EBUSY;
  1169. /* A reprieve! */
  1170. netif_start_queue(netdev);
  1171. ++adapter->restart_queue;
  1172. return 0;
  1173. }
  1174. static int ixgb_maybe_stop_tx(struct net_device *netdev,
  1175. struct ixgb_desc_ring *tx_ring, int size)
  1176. {
  1177. if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
  1178. return 0;
  1179. return __ixgb_maybe_stop_tx(netdev, size);
  1180. }
  1181. /* Tx Descriptors needed, worst case */
  1182. #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
  1183. (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
  1184. #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
  1185. MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
  1186. + 1 /* one more needed for sentinel TSO workaround */
  1187. static netdev_tx_t
  1188. ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  1189. {
  1190. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1191. unsigned int first;
  1192. unsigned int tx_flags = 0;
  1193. int vlan_id = 0;
  1194. int count = 0;
  1195. int tso;
  1196. if (test_bit(__IXGB_DOWN, &adapter->flags)) {
  1197. dev_kfree_skb_any(skb);
  1198. return NETDEV_TX_OK;
  1199. }
  1200. if (skb->len <= 0) {
  1201. dev_kfree_skb_any(skb);
  1202. return NETDEV_TX_OK;
  1203. }
  1204. if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
  1205. DESC_NEEDED)))
  1206. return NETDEV_TX_BUSY;
  1207. if (skb_vlan_tag_present(skb)) {
  1208. tx_flags |= IXGB_TX_FLAGS_VLAN;
  1209. vlan_id = skb_vlan_tag_get(skb);
  1210. }
  1211. first = adapter->tx_ring.next_to_use;
  1212. tso = ixgb_tso(adapter, skb);
  1213. if (tso < 0) {
  1214. dev_kfree_skb_any(skb);
  1215. return NETDEV_TX_OK;
  1216. }
  1217. if (likely(tso))
  1218. tx_flags |= IXGB_TX_FLAGS_TSO;
  1219. else if (ixgb_tx_csum(adapter, skb))
  1220. tx_flags |= IXGB_TX_FLAGS_CSUM;
  1221. count = ixgb_tx_map(adapter, skb, first);
  1222. if (count) {
  1223. ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
  1224. /* Make sure there is space in the ring for the next send. */
  1225. ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
  1226. } else {
  1227. dev_kfree_skb_any(skb);
  1228. adapter->tx_ring.buffer_info[first].time_stamp = 0;
  1229. adapter->tx_ring.next_to_use = first;
  1230. }
  1231. return NETDEV_TX_OK;
  1232. }
  1233. /**
  1234. * ixgb_tx_timeout - Respond to a Tx Hang
  1235. * @netdev: network interface device structure
  1236. **/
  1237. static void
  1238. ixgb_tx_timeout(struct net_device *netdev)
  1239. {
  1240. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1241. /* Do the reset outside of interrupt context */
  1242. schedule_work(&adapter->tx_timeout_task);
  1243. }
  1244. static void
  1245. ixgb_tx_timeout_task(struct work_struct *work)
  1246. {
  1247. struct ixgb_adapter *adapter =
  1248. container_of(work, struct ixgb_adapter, tx_timeout_task);
  1249. adapter->tx_timeout_count++;
  1250. ixgb_down(adapter, true);
  1251. ixgb_up(adapter);
  1252. }
  1253. /**
  1254. * ixgb_change_mtu - Change the Maximum Transfer Unit
  1255. * @netdev: network interface device structure
  1256. * @new_mtu: new value for maximum frame size
  1257. *
  1258. * Returns 0 on success, negative on failure
  1259. **/
  1260. static int
  1261. ixgb_change_mtu(struct net_device *netdev, int new_mtu)
  1262. {
  1263. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1264. int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
  1265. if (netif_running(netdev))
  1266. ixgb_down(adapter, true);
  1267. adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
  1268. netdev->mtu = new_mtu;
  1269. if (netif_running(netdev))
  1270. ixgb_up(adapter);
  1271. return 0;
  1272. }
  1273. /**
  1274. * ixgb_update_stats - Update the board statistics counters.
  1275. * @adapter: board private structure
  1276. **/
  1277. void
  1278. ixgb_update_stats(struct ixgb_adapter *adapter)
  1279. {
  1280. struct net_device *netdev = adapter->netdev;
  1281. struct pci_dev *pdev = adapter->pdev;
  1282. /* Prevent stats update while adapter is being reset */
  1283. if (pci_channel_offline(pdev))
  1284. return;
  1285. if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
  1286. (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
  1287. u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
  1288. u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
  1289. u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
  1290. u64 bcast = ((u64)bcast_h << 32) | bcast_l;
  1291. multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
  1292. /* fix up multicast stats by removing broadcasts */
  1293. if (multi >= bcast)
  1294. multi -= bcast;
  1295. adapter->stats.mprcl += (multi & 0xFFFFFFFF);
  1296. adapter->stats.mprch += (multi >> 32);
  1297. adapter->stats.bprcl += bcast_l;
  1298. adapter->stats.bprch += bcast_h;
  1299. } else {
  1300. adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
  1301. adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
  1302. adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
  1303. adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
  1304. }
  1305. adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
  1306. adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
  1307. adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
  1308. adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
  1309. adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
  1310. adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
  1311. adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
  1312. adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
  1313. adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
  1314. adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
  1315. adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
  1316. adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
  1317. adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
  1318. adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
  1319. adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
  1320. adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
  1321. adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
  1322. adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
  1323. adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
  1324. adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
  1325. adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
  1326. adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
  1327. adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
  1328. adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
  1329. adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
  1330. adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
  1331. adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
  1332. adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
  1333. adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
  1334. adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
  1335. adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
  1336. adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
  1337. adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
  1338. adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
  1339. adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
  1340. adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
  1341. adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
  1342. adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
  1343. adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
  1344. adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
  1345. adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
  1346. adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
  1347. adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
  1348. adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
  1349. adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
  1350. adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
  1351. adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
  1352. adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
  1353. adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
  1354. adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
  1355. adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
  1356. adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
  1357. adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
  1358. adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
  1359. adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
  1360. adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
  1361. /* Fill out the OS statistics structure */
  1362. netdev->stats.rx_packets = adapter->stats.gprcl;
  1363. netdev->stats.tx_packets = adapter->stats.gptcl;
  1364. netdev->stats.rx_bytes = adapter->stats.gorcl;
  1365. netdev->stats.tx_bytes = adapter->stats.gotcl;
  1366. netdev->stats.multicast = adapter->stats.mprcl;
  1367. netdev->stats.collisions = 0;
  1368. /* ignore RLEC as it reports errors for padded (<64bytes) frames
  1369. * with a length in the type/len field */
  1370. netdev->stats.rx_errors =
  1371. /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
  1372. adapter->stats.ruc +
  1373. adapter->stats.roc /*+ adapter->stats.rlec */ +
  1374. adapter->stats.icbc +
  1375. adapter->stats.ecbc + adapter->stats.mpc;
  1376. /* see above
  1377. * netdev->stats.rx_length_errors = adapter->stats.rlec;
  1378. */
  1379. netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
  1380. netdev->stats.rx_fifo_errors = adapter->stats.mpc;
  1381. netdev->stats.rx_missed_errors = adapter->stats.mpc;
  1382. netdev->stats.rx_over_errors = adapter->stats.mpc;
  1383. netdev->stats.tx_errors = 0;
  1384. netdev->stats.rx_frame_errors = 0;
  1385. netdev->stats.tx_aborted_errors = 0;
  1386. netdev->stats.tx_carrier_errors = 0;
  1387. netdev->stats.tx_fifo_errors = 0;
  1388. netdev->stats.tx_heartbeat_errors = 0;
  1389. netdev->stats.tx_window_errors = 0;
  1390. }
  1391. #define IXGB_MAX_INTR 10
  1392. /**
  1393. * ixgb_intr - Interrupt Handler
  1394. * @irq: interrupt number
  1395. * @data: pointer to a network interface device structure
  1396. **/
  1397. static irqreturn_t
  1398. ixgb_intr(int irq, void *data)
  1399. {
  1400. struct net_device *netdev = data;
  1401. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1402. struct ixgb_hw *hw = &adapter->hw;
  1403. u32 icr = IXGB_READ_REG(hw, ICR);
  1404. if (unlikely(!icr))
  1405. return IRQ_NONE; /* Not our interrupt */
  1406. if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
  1407. if (!test_bit(__IXGB_DOWN, &adapter->flags))
  1408. mod_timer(&adapter->watchdog_timer, jiffies);
  1409. if (napi_schedule_prep(&adapter->napi)) {
  1410. /* Disable interrupts and register for poll. The flush
  1411. of the posted write is intentionally left out.
  1412. */
  1413. IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
  1414. __napi_schedule(&adapter->napi);
  1415. }
  1416. return IRQ_HANDLED;
  1417. }
  1418. /**
  1419. * ixgb_clean - NAPI Rx polling callback
  1420. * @adapter: board private structure
  1421. **/
  1422. static int
  1423. ixgb_clean(struct napi_struct *napi, int budget)
  1424. {
  1425. struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
  1426. int work_done = 0;
  1427. ixgb_clean_tx_irq(adapter);
  1428. ixgb_clean_rx_irq(adapter, &work_done, budget);
  1429. /* If budget not fully consumed, exit the polling mode */
  1430. if (work_done < budget) {
  1431. napi_complete_done(napi, work_done);
  1432. if (!test_bit(__IXGB_DOWN, &adapter->flags))
  1433. ixgb_irq_enable(adapter);
  1434. }
  1435. return work_done;
  1436. }
  1437. /**
  1438. * ixgb_clean_tx_irq - Reclaim resources after transmit completes
  1439. * @adapter: board private structure
  1440. **/
  1441. static bool
  1442. ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
  1443. {
  1444. struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
  1445. struct net_device *netdev = adapter->netdev;
  1446. struct ixgb_tx_desc *tx_desc, *eop_desc;
  1447. struct ixgb_buffer *buffer_info;
  1448. unsigned int i, eop;
  1449. bool cleaned = false;
  1450. i = tx_ring->next_to_clean;
  1451. eop = tx_ring->buffer_info[i].next_to_watch;
  1452. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1453. while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
  1454. rmb(); /* read buffer_info after eop_desc */
  1455. for (cleaned = false; !cleaned; ) {
  1456. tx_desc = IXGB_TX_DESC(*tx_ring, i);
  1457. buffer_info = &tx_ring->buffer_info[i];
  1458. if (tx_desc->popts &
  1459. (IXGB_TX_DESC_POPTS_TXSM |
  1460. IXGB_TX_DESC_POPTS_IXSM))
  1461. adapter->hw_csum_tx_good++;
  1462. ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
  1463. *(u32 *)&(tx_desc->status) = 0;
  1464. cleaned = (i == eop);
  1465. if (++i == tx_ring->count) i = 0;
  1466. }
  1467. eop = tx_ring->buffer_info[i].next_to_watch;
  1468. eop_desc = IXGB_TX_DESC(*tx_ring, eop);
  1469. }
  1470. tx_ring->next_to_clean = i;
  1471. if (unlikely(cleaned && netif_carrier_ok(netdev) &&
  1472. IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
  1473. /* Make sure that anybody stopping the queue after this
  1474. * sees the new next_to_clean. */
  1475. smp_mb();
  1476. if (netif_queue_stopped(netdev) &&
  1477. !(test_bit(__IXGB_DOWN, &adapter->flags))) {
  1478. netif_wake_queue(netdev);
  1479. ++adapter->restart_queue;
  1480. }
  1481. }
  1482. if (adapter->detect_tx_hung) {
  1483. /* detect a transmit hang in hardware, this serializes the
  1484. * check with the clearing of time_stamp and movement of i */
  1485. adapter->detect_tx_hung = false;
  1486. if (tx_ring->buffer_info[eop].time_stamp &&
  1487. time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
  1488. && !(IXGB_READ_REG(&adapter->hw, STATUS) &
  1489. IXGB_STATUS_TXOFF)) {
  1490. /* detected Tx unit hang */
  1491. netif_err(adapter, drv, adapter->netdev,
  1492. "Detected Tx Unit Hang\n"
  1493. " TDH <%x>\n"
  1494. " TDT <%x>\n"
  1495. " next_to_use <%x>\n"
  1496. " next_to_clean <%x>\n"
  1497. "buffer_info[next_to_clean]\n"
  1498. " time_stamp <%lx>\n"
  1499. " next_to_watch <%x>\n"
  1500. " jiffies <%lx>\n"
  1501. " next_to_watch.status <%x>\n",
  1502. IXGB_READ_REG(&adapter->hw, TDH),
  1503. IXGB_READ_REG(&adapter->hw, TDT),
  1504. tx_ring->next_to_use,
  1505. tx_ring->next_to_clean,
  1506. tx_ring->buffer_info[eop].time_stamp,
  1507. eop,
  1508. jiffies,
  1509. eop_desc->status);
  1510. netif_stop_queue(netdev);
  1511. }
  1512. }
  1513. return cleaned;
  1514. }
  1515. /**
  1516. * ixgb_rx_checksum - Receive Checksum Offload for 82597.
  1517. * @adapter: board private structure
  1518. * @rx_desc: receive descriptor
  1519. * @sk_buff: socket buffer with received data
  1520. **/
  1521. static void
  1522. ixgb_rx_checksum(struct ixgb_adapter *adapter,
  1523. struct ixgb_rx_desc *rx_desc,
  1524. struct sk_buff *skb)
  1525. {
  1526. /* Ignore Checksum bit is set OR
  1527. * TCP Checksum has not been calculated
  1528. */
  1529. if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
  1530. (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
  1531. skb_checksum_none_assert(skb);
  1532. return;
  1533. }
  1534. /* At this point we know the hardware did the TCP checksum */
  1535. /* now look at the TCP checksum error bit */
  1536. if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
  1537. /* let the stack verify checksum errors */
  1538. skb_checksum_none_assert(skb);
  1539. adapter->hw_csum_rx_error++;
  1540. } else {
  1541. /* TCP checksum is good */
  1542. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1543. adapter->hw_csum_rx_good++;
  1544. }
  1545. }
  1546. /*
  1547. * this should improve performance for small packets with large amounts
  1548. * of reassembly being done in the stack
  1549. */
  1550. static void ixgb_check_copybreak(struct napi_struct *napi,
  1551. struct ixgb_buffer *buffer_info,
  1552. u32 length, struct sk_buff **skb)
  1553. {
  1554. struct sk_buff *new_skb;
  1555. if (length > copybreak)
  1556. return;
  1557. new_skb = napi_alloc_skb(napi, length);
  1558. if (!new_skb)
  1559. return;
  1560. skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
  1561. (*skb)->data - NET_IP_ALIGN,
  1562. length + NET_IP_ALIGN);
  1563. /* save the skb in buffer_info as good */
  1564. buffer_info->skb = *skb;
  1565. *skb = new_skb;
  1566. }
  1567. /**
  1568. * ixgb_clean_rx_irq - Send received data up the network stack,
  1569. * @adapter: board private structure
  1570. **/
  1571. static bool
  1572. ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
  1573. {
  1574. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1575. struct net_device *netdev = adapter->netdev;
  1576. struct pci_dev *pdev = adapter->pdev;
  1577. struct ixgb_rx_desc *rx_desc, *next_rxd;
  1578. struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
  1579. u32 length;
  1580. unsigned int i, j;
  1581. int cleaned_count = 0;
  1582. bool cleaned = false;
  1583. i = rx_ring->next_to_clean;
  1584. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1585. buffer_info = &rx_ring->buffer_info[i];
  1586. while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
  1587. struct sk_buff *skb;
  1588. u8 status;
  1589. if (*work_done >= work_to_do)
  1590. break;
  1591. (*work_done)++;
  1592. rmb(); /* read descriptor and rx_buffer_info after status DD */
  1593. status = rx_desc->status;
  1594. skb = buffer_info->skb;
  1595. buffer_info->skb = NULL;
  1596. prefetch(skb->data - NET_IP_ALIGN);
  1597. if (++i == rx_ring->count)
  1598. i = 0;
  1599. next_rxd = IXGB_RX_DESC(*rx_ring, i);
  1600. prefetch(next_rxd);
  1601. j = i + 1;
  1602. if (j == rx_ring->count)
  1603. j = 0;
  1604. next2_buffer = &rx_ring->buffer_info[j];
  1605. prefetch(next2_buffer);
  1606. next_buffer = &rx_ring->buffer_info[i];
  1607. cleaned = true;
  1608. cleaned_count++;
  1609. dma_unmap_single(&pdev->dev,
  1610. buffer_info->dma,
  1611. buffer_info->length,
  1612. DMA_FROM_DEVICE);
  1613. buffer_info->dma = 0;
  1614. length = le16_to_cpu(rx_desc->length);
  1615. rx_desc->length = 0;
  1616. if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
  1617. /* All receives must fit into a single buffer */
  1618. pr_debug("Receive packet consumed multiple buffers length<%x>\n",
  1619. length);
  1620. dev_kfree_skb_irq(skb);
  1621. goto rxdesc_done;
  1622. }
  1623. if (unlikely(rx_desc->errors &
  1624. (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
  1625. IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
  1626. dev_kfree_skb_irq(skb);
  1627. goto rxdesc_done;
  1628. }
  1629. ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
  1630. /* Good Receive */
  1631. skb_put(skb, length);
  1632. /* Receive Checksum Offload */
  1633. ixgb_rx_checksum(adapter, rx_desc, skb);
  1634. skb->protocol = eth_type_trans(skb, netdev);
  1635. if (status & IXGB_RX_DESC_STATUS_VP)
  1636. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  1637. le16_to_cpu(rx_desc->special));
  1638. netif_receive_skb(skb);
  1639. rxdesc_done:
  1640. /* clean up descriptor, might be written over by hw */
  1641. rx_desc->status = 0;
  1642. /* return some buffers to hardware, one at a time is too slow */
  1643. if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
  1644. ixgb_alloc_rx_buffers(adapter, cleaned_count);
  1645. cleaned_count = 0;
  1646. }
  1647. /* use prefetched values */
  1648. rx_desc = next_rxd;
  1649. buffer_info = next_buffer;
  1650. }
  1651. rx_ring->next_to_clean = i;
  1652. cleaned_count = IXGB_DESC_UNUSED(rx_ring);
  1653. if (cleaned_count)
  1654. ixgb_alloc_rx_buffers(adapter, cleaned_count);
  1655. return cleaned;
  1656. }
  1657. /**
  1658. * ixgb_alloc_rx_buffers - Replace used receive buffers
  1659. * @adapter: address of board private structure
  1660. **/
  1661. static void
  1662. ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
  1663. {
  1664. struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
  1665. struct net_device *netdev = adapter->netdev;
  1666. struct pci_dev *pdev = adapter->pdev;
  1667. struct ixgb_rx_desc *rx_desc;
  1668. struct ixgb_buffer *buffer_info;
  1669. struct sk_buff *skb;
  1670. unsigned int i;
  1671. long cleancount;
  1672. i = rx_ring->next_to_use;
  1673. buffer_info = &rx_ring->buffer_info[i];
  1674. cleancount = IXGB_DESC_UNUSED(rx_ring);
  1675. /* leave three descriptors unused */
  1676. while (--cleancount > 2 && cleaned_count--) {
  1677. /* recycle! its good for you */
  1678. skb = buffer_info->skb;
  1679. if (skb) {
  1680. skb_trim(skb, 0);
  1681. goto map_skb;
  1682. }
  1683. skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
  1684. if (unlikely(!skb)) {
  1685. /* Better luck next round */
  1686. adapter->alloc_rx_buff_failed++;
  1687. break;
  1688. }
  1689. buffer_info->skb = skb;
  1690. buffer_info->length = adapter->rx_buffer_len;
  1691. map_skb:
  1692. buffer_info->dma = dma_map_single(&pdev->dev,
  1693. skb->data,
  1694. adapter->rx_buffer_len,
  1695. DMA_FROM_DEVICE);
  1696. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  1697. adapter->alloc_rx_buff_failed++;
  1698. break;
  1699. }
  1700. rx_desc = IXGB_RX_DESC(*rx_ring, i);
  1701. rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
  1702. /* guarantee DD bit not set now before h/w gets descriptor
  1703. * this is the rest of the workaround for h/w double
  1704. * writeback. */
  1705. rx_desc->status = 0;
  1706. if (++i == rx_ring->count)
  1707. i = 0;
  1708. buffer_info = &rx_ring->buffer_info[i];
  1709. }
  1710. if (likely(rx_ring->next_to_use != i)) {
  1711. rx_ring->next_to_use = i;
  1712. if (unlikely(i-- == 0))
  1713. i = (rx_ring->count - 1);
  1714. /* Force memory writes to complete before letting h/w
  1715. * know there are new descriptors to fetch. (Only
  1716. * applicable for weak-ordered memory model archs, such
  1717. * as IA-64). */
  1718. wmb();
  1719. IXGB_WRITE_REG(&adapter->hw, RDT, i);
  1720. }
  1721. }
  1722. static void
  1723. ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
  1724. {
  1725. u32 ctrl;
  1726. /* enable VLAN tag insert/strip */
  1727. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1728. ctrl |= IXGB_CTRL0_VME;
  1729. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1730. }
  1731. static void
  1732. ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
  1733. {
  1734. u32 ctrl;
  1735. /* disable VLAN tag insert/strip */
  1736. ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
  1737. ctrl &= ~IXGB_CTRL0_VME;
  1738. IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
  1739. }
  1740. static int
  1741. ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  1742. {
  1743. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1744. u32 vfta, index;
  1745. /* add VID to filter table */
  1746. index = (vid >> 5) & 0x7F;
  1747. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1748. vfta |= (1 << (vid & 0x1F));
  1749. ixgb_write_vfta(&adapter->hw, index, vfta);
  1750. set_bit(vid, adapter->active_vlans);
  1751. return 0;
  1752. }
  1753. static int
  1754. ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
  1755. {
  1756. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1757. u32 vfta, index;
  1758. /* remove VID from filter table */
  1759. index = (vid >> 5) & 0x7F;
  1760. vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  1761. vfta &= ~(1 << (vid & 0x1F));
  1762. ixgb_write_vfta(&adapter->hw, index, vfta);
  1763. clear_bit(vid, adapter->active_vlans);
  1764. return 0;
  1765. }
  1766. static void
  1767. ixgb_restore_vlan(struct ixgb_adapter *adapter)
  1768. {
  1769. u16 vid;
  1770. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  1771. ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  1772. }
  1773. /**
  1774. * ixgb_io_error_detected - called when PCI error is detected
  1775. * @pdev: pointer to pci device with error
  1776. * @state: pci channel state after error
  1777. *
  1778. * This callback is called by the PCI subsystem whenever
  1779. * a PCI bus error is detected.
  1780. */
  1781. static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
  1782. enum pci_channel_state state)
  1783. {
  1784. struct net_device *netdev = pci_get_drvdata(pdev);
  1785. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1786. netif_device_detach(netdev);
  1787. if (state == pci_channel_io_perm_failure)
  1788. return PCI_ERS_RESULT_DISCONNECT;
  1789. if (netif_running(netdev))
  1790. ixgb_down(adapter, true);
  1791. pci_disable_device(pdev);
  1792. /* Request a slot reset. */
  1793. return PCI_ERS_RESULT_NEED_RESET;
  1794. }
  1795. /**
  1796. * ixgb_io_slot_reset - called after the pci bus has been reset.
  1797. * @pdev pointer to pci device with error
  1798. *
  1799. * This callback is called after the PCI bus has been reset.
  1800. * Basically, this tries to restart the card from scratch.
  1801. * This is a shortened version of the device probe/discovery code,
  1802. * it resembles the first-half of the ixgb_probe() routine.
  1803. */
  1804. static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
  1805. {
  1806. struct net_device *netdev = pci_get_drvdata(pdev);
  1807. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1808. if (pci_enable_device(pdev)) {
  1809. netif_err(adapter, probe, adapter->netdev,
  1810. "Cannot re-enable PCI device after reset\n");
  1811. return PCI_ERS_RESULT_DISCONNECT;
  1812. }
  1813. /* Perform card reset only on one instance of the card */
  1814. if (0 != PCI_FUNC (pdev->devfn))
  1815. return PCI_ERS_RESULT_RECOVERED;
  1816. pci_set_master(pdev);
  1817. netif_carrier_off(netdev);
  1818. netif_stop_queue(netdev);
  1819. ixgb_reset(adapter);
  1820. /* Make sure the EEPROM is good */
  1821. if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
  1822. netif_err(adapter, probe, adapter->netdev,
  1823. "After reset, the EEPROM checksum is not valid\n");
  1824. return PCI_ERS_RESULT_DISCONNECT;
  1825. }
  1826. ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
  1827. memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  1828. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1829. netif_err(adapter, probe, adapter->netdev,
  1830. "After reset, invalid MAC address\n");
  1831. return PCI_ERS_RESULT_DISCONNECT;
  1832. }
  1833. return PCI_ERS_RESULT_RECOVERED;
  1834. }
  1835. /**
  1836. * ixgb_io_resume - called when its OK to resume normal operations
  1837. * @pdev pointer to pci device with error
  1838. *
  1839. * The error recovery driver tells us that its OK to resume
  1840. * normal operation. Implementation resembles the second-half
  1841. * of the ixgb_probe() routine.
  1842. */
  1843. static void ixgb_io_resume(struct pci_dev *pdev)
  1844. {
  1845. struct net_device *netdev = pci_get_drvdata(pdev);
  1846. struct ixgb_adapter *adapter = netdev_priv(netdev);
  1847. pci_set_master(pdev);
  1848. if (netif_running(netdev)) {
  1849. if (ixgb_up(adapter)) {
  1850. pr_err("can't bring device back up after reset\n");
  1851. return;
  1852. }
  1853. }
  1854. netif_device_attach(netdev);
  1855. mod_timer(&adapter->watchdog_timer, jiffies);
  1856. }
  1857. /* ixgb_main.c */