fjes_main.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * FUJITSU Extended Socket Network Device driver
  4. * Copyright (c) 2015 FUJITSU LIMITED
  5. */
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/nls.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/interrupt.h>
  12. #include "fjes.h"
  13. #include "fjes_trace.h"
  14. #define DRV_VERSION "1.2"
  15. #define DRV_NAME "fjes"
  16. char fjes_driver_name[] = DRV_NAME;
  17. char fjes_driver_version[] = DRV_VERSION;
  18. static const char fjes_driver_string[] =
  19. "FUJITSU Extended Socket Network Device Driver";
  20. static const char fjes_copyright[] =
  21. "Copyright (c) 2015 FUJITSU LIMITED";
  22. MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
  23. MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
  24. MODULE_LICENSE("GPL");
  25. MODULE_VERSION(DRV_VERSION);
  26. #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
  27. static const struct acpi_device_id fjes_acpi_ids[] = {
  28. {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
  29. {"", 0},
  30. };
  31. MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
  32. static bool is_extended_socket_device(struct acpi_device *device)
  33. {
  34. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
  35. char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
  36. union acpi_object *str;
  37. acpi_status status;
  38. int result;
  39. status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
  40. if (ACPI_FAILURE(status))
  41. return false;
  42. str = buffer.pointer;
  43. result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
  44. str->string.length, UTF16_LITTLE_ENDIAN,
  45. str_buf, sizeof(str_buf) - 1);
  46. str_buf[result] = 0;
  47. if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
  48. kfree(buffer.pointer);
  49. return false;
  50. }
  51. kfree(buffer.pointer);
  52. return true;
  53. }
  54. static int acpi_check_extended_socket_status(struct acpi_device *device)
  55. {
  56. unsigned long long sta;
  57. acpi_status status;
  58. status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
  59. if (ACPI_FAILURE(status))
  60. return -ENODEV;
  61. if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
  62. (sta & ACPI_STA_DEVICE_ENABLED) &&
  63. (sta & ACPI_STA_DEVICE_UI) &&
  64. (sta & ACPI_STA_DEVICE_FUNCTIONING)))
  65. return -ENODEV;
  66. return 0;
  67. }
  68. static acpi_status
  69. fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
  70. {
  71. struct acpi_resource_address32 *addr;
  72. struct acpi_resource_irq *irq;
  73. struct resource *res = data;
  74. switch (acpi_res->type) {
  75. case ACPI_RESOURCE_TYPE_ADDRESS32:
  76. addr = &acpi_res->data.address32;
  77. res[0].start = addr->address.minimum;
  78. res[0].end = addr->address.minimum +
  79. addr->address.address_length - 1;
  80. break;
  81. case ACPI_RESOURCE_TYPE_IRQ:
  82. irq = &acpi_res->data.irq;
  83. if (irq->interrupt_count != 1)
  84. return AE_ERROR;
  85. res[1].start = irq->interrupts[0];
  86. res[1].end = irq->interrupts[0];
  87. break;
  88. default:
  89. break;
  90. }
  91. return AE_OK;
  92. }
  93. static struct resource fjes_resource[] = {
  94. DEFINE_RES_MEM(0, 1),
  95. DEFINE_RES_IRQ(0)
  96. };
  97. static int fjes_acpi_add(struct acpi_device *device)
  98. {
  99. struct platform_device *plat_dev;
  100. acpi_status status;
  101. if (!is_extended_socket_device(device))
  102. return -ENODEV;
  103. if (acpi_check_extended_socket_status(device))
  104. return -ENODEV;
  105. status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  106. fjes_get_acpi_resource, fjes_resource);
  107. if (ACPI_FAILURE(status))
  108. return -ENODEV;
  109. /* create platform_device */
  110. plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
  111. ARRAY_SIZE(fjes_resource));
  112. if (IS_ERR(plat_dev))
  113. return PTR_ERR(plat_dev);
  114. device->driver_data = plat_dev;
  115. return 0;
  116. }
  117. static void fjes_acpi_remove(struct acpi_device *device)
  118. {
  119. struct platform_device *plat_dev;
  120. plat_dev = (struct platform_device *)acpi_driver_data(device);
  121. platform_device_unregister(plat_dev);
  122. }
  123. static struct acpi_driver fjes_acpi_driver = {
  124. .name = DRV_NAME,
  125. .class = DRV_NAME,
  126. .ids = fjes_acpi_ids,
  127. .ops = {
  128. .add = fjes_acpi_add,
  129. .remove = fjes_acpi_remove,
  130. },
  131. };
  132. static int fjes_setup_resources(struct fjes_adapter *adapter)
  133. {
  134. struct net_device *netdev = adapter->netdev;
  135. struct ep_share_mem_info *buf_pair;
  136. struct fjes_hw *hw = &adapter->hw;
  137. unsigned long flags;
  138. int result;
  139. int epidx;
  140. mutex_lock(&hw->hw_info.lock);
  141. result = fjes_hw_request_info(hw);
  142. switch (result) {
  143. case 0:
  144. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  145. hw->ep_shm_info[epidx].es_status =
  146. hw->hw_info.res_buf->info.info[epidx].es_status;
  147. hw->ep_shm_info[epidx].zone =
  148. hw->hw_info.res_buf->info.info[epidx].zone;
  149. }
  150. break;
  151. default:
  152. case -ENOMSG:
  153. case -EBUSY:
  154. adapter->force_reset = true;
  155. mutex_unlock(&hw->hw_info.lock);
  156. return result;
  157. }
  158. mutex_unlock(&hw->hw_info.lock);
  159. for (epidx = 0; epidx < (hw->max_epid); epidx++) {
  160. if ((epidx != hw->my_epid) &&
  161. (hw->ep_shm_info[epidx].es_status ==
  162. FJES_ZONING_STATUS_ENABLE)) {
  163. fjes_hw_raise_interrupt(hw, epidx,
  164. REG_ICTL_MASK_INFO_UPDATE);
  165. hw->ep_shm_info[epidx].ep_stats
  166. .send_intr_zoneupdate += 1;
  167. }
  168. }
  169. msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
  170. for (epidx = 0; epidx < (hw->max_epid); epidx++) {
  171. if (epidx == hw->my_epid)
  172. continue;
  173. buf_pair = &hw->ep_shm_info[epidx];
  174. spin_lock_irqsave(&hw->rx_status_lock, flags);
  175. fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
  176. netdev->mtu);
  177. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  178. if (fjes_hw_epid_is_same_zone(hw, epidx)) {
  179. mutex_lock(&hw->hw_info.lock);
  180. result =
  181. fjes_hw_register_buff_addr(hw, epidx, buf_pair);
  182. mutex_unlock(&hw->hw_info.lock);
  183. switch (result) {
  184. case 0:
  185. break;
  186. case -ENOMSG:
  187. case -EBUSY:
  188. default:
  189. adapter->force_reset = true;
  190. return result;
  191. }
  192. hw->ep_shm_info[epidx].ep_stats
  193. .com_regist_buf_exec += 1;
  194. }
  195. }
  196. return 0;
  197. }
  198. static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
  199. {
  200. struct fjes_hw *hw = &adapter->hw;
  201. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
  202. adapter->unset_rx_last = true;
  203. napi_schedule(&adapter->napi);
  204. }
  205. static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
  206. {
  207. struct fjes_hw *hw = &adapter->hw;
  208. enum ep_partner_status status;
  209. unsigned long flags;
  210. set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
  211. status = fjes_hw_get_partner_ep_status(hw, src_epid);
  212. trace_fjes_stop_req_irq_pre(hw, src_epid, status);
  213. switch (status) {
  214. case EP_PARTNER_WAITING:
  215. spin_lock_irqsave(&hw->rx_status_lock, flags);
  216. hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
  217. FJES_RX_STOP_REQ_DONE;
  218. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  219. clear_bit(src_epid, &hw->txrx_stop_req_bit);
  220. fallthrough;
  221. case EP_PARTNER_UNSHARE:
  222. case EP_PARTNER_COMPLETE:
  223. default:
  224. set_bit(src_epid, &adapter->unshare_watch_bitmask);
  225. if (!work_pending(&adapter->unshare_watch_task))
  226. queue_work(adapter->control_wq,
  227. &adapter->unshare_watch_task);
  228. break;
  229. case EP_PARTNER_SHARED:
  230. set_bit(src_epid, &hw->epstop_req_bit);
  231. if (!work_pending(&hw->epstop_task))
  232. queue_work(adapter->control_wq, &hw->epstop_task);
  233. break;
  234. }
  235. trace_fjes_stop_req_irq_post(hw, src_epid);
  236. }
  237. static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
  238. int src_epid)
  239. {
  240. struct fjes_hw *hw = &adapter->hw;
  241. enum ep_partner_status status;
  242. unsigned long flags;
  243. status = fjes_hw_get_partner_ep_status(hw, src_epid);
  244. trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
  245. switch (status) {
  246. case EP_PARTNER_UNSHARE:
  247. case EP_PARTNER_COMPLETE:
  248. default:
  249. break;
  250. case EP_PARTNER_WAITING:
  251. if (src_epid < hw->my_epid) {
  252. spin_lock_irqsave(&hw->rx_status_lock, flags);
  253. hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
  254. FJES_RX_STOP_REQ_DONE;
  255. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  256. clear_bit(src_epid, &hw->txrx_stop_req_bit);
  257. set_bit(src_epid, &adapter->unshare_watch_bitmask);
  258. if (!work_pending(&adapter->unshare_watch_task))
  259. queue_work(adapter->control_wq,
  260. &adapter->unshare_watch_task);
  261. }
  262. break;
  263. case EP_PARTNER_SHARED:
  264. if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
  265. FJES_RX_STOP_REQ_REQUEST) {
  266. set_bit(src_epid, &hw->epstop_req_bit);
  267. if (!work_pending(&hw->epstop_task))
  268. queue_work(adapter->control_wq,
  269. &hw->epstop_task);
  270. }
  271. break;
  272. }
  273. trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
  274. }
  275. static void fjes_update_zone_irq(struct fjes_adapter *adapter,
  276. int src_epid)
  277. {
  278. struct fjes_hw *hw = &adapter->hw;
  279. if (!work_pending(&hw->update_zone_task))
  280. queue_work(adapter->control_wq, &hw->update_zone_task);
  281. }
  282. static irqreturn_t fjes_intr(int irq, void *data)
  283. {
  284. struct fjes_adapter *adapter = data;
  285. struct fjes_hw *hw = &adapter->hw;
  286. irqreturn_t ret;
  287. u32 icr;
  288. icr = fjes_hw_capture_interrupt_status(hw);
  289. if (icr & REG_IS_MASK_IS_ASSERT) {
  290. if (icr & REG_ICTL_MASK_RX_DATA) {
  291. fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
  292. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  293. .recv_intr_rx += 1;
  294. }
  295. if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
  296. fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
  297. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  298. .recv_intr_stop += 1;
  299. }
  300. if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
  301. fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
  302. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  303. .recv_intr_unshare += 1;
  304. }
  305. if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
  306. fjes_hw_set_irqmask(hw,
  307. REG_ICTL_MASK_TXRX_STOP_DONE, true);
  308. if (icr & REG_ICTL_MASK_INFO_UPDATE) {
  309. fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
  310. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  311. .recv_intr_zoneupdate += 1;
  312. }
  313. ret = IRQ_HANDLED;
  314. } else {
  315. ret = IRQ_NONE;
  316. }
  317. return ret;
  318. }
  319. static int fjes_request_irq(struct fjes_adapter *adapter)
  320. {
  321. struct net_device *netdev = adapter->netdev;
  322. int result = -1;
  323. adapter->interrupt_watch_enable = true;
  324. if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
  325. queue_delayed_work(adapter->control_wq,
  326. &adapter->interrupt_watch_task,
  327. FJES_IRQ_WATCH_DELAY);
  328. }
  329. if (!adapter->irq_registered) {
  330. result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
  331. IRQF_SHARED, netdev->name, adapter);
  332. if (result)
  333. adapter->irq_registered = false;
  334. else
  335. adapter->irq_registered = true;
  336. }
  337. return result;
  338. }
  339. static void fjes_free_irq(struct fjes_adapter *adapter)
  340. {
  341. struct fjes_hw *hw = &adapter->hw;
  342. adapter->interrupt_watch_enable = false;
  343. cancel_delayed_work_sync(&adapter->interrupt_watch_task);
  344. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
  345. if (adapter->irq_registered) {
  346. free_irq(adapter->hw.hw_res.irq, adapter);
  347. adapter->irq_registered = false;
  348. }
  349. }
  350. static void fjes_free_resources(struct fjes_adapter *adapter)
  351. {
  352. struct net_device *netdev = adapter->netdev;
  353. struct fjes_device_command_param param;
  354. struct ep_share_mem_info *buf_pair;
  355. struct fjes_hw *hw = &adapter->hw;
  356. bool reset_flag = false;
  357. unsigned long flags;
  358. int result;
  359. int epidx;
  360. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  361. if (epidx == hw->my_epid)
  362. continue;
  363. mutex_lock(&hw->hw_info.lock);
  364. result = fjes_hw_unregister_buff_addr(hw, epidx);
  365. mutex_unlock(&hw->hw_info.lock);
  366. hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
  367. if (result)
  368. reset_flag = true;
  369. buf_pair = &hw->ep_shm_info[epidx];
  370. spin_lock_irqsave(&hw->rx_status_lock, flags);
  371. fjes_hw_setup_epbuf(&buf_pair->tx,
  372. netdev->dev_addr, netdev->mtu);
  373. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  374. clear_bit(epidx, &hw->txrx_stop_req_bit);
  375. }
  376. if (reset_flag || adapter->force_reset) {
  377. result = fjes_hw_reset(hw);
  378. adapter->force_reset = false;
  379. if (result)
  380. adapter->open_guard = true;
  381. hw->hw_info.buffer_share_bit = 0;
  382. memset((void *)&param, 0, sizeof(param));
  383. param.req_len = hw->hw_info.req_buf_size;
  384. param.req_start = __pa(hw->hw_info.req_buf);
  385. param.res_len = hw->hw_info.res_buf_size;
  386. param.res_start = __pa(hw->hw_info.res_buf);
  387. param.share_start = __pa(hw->hw_info.share->ep_status);
  388. fjes_hw_init_command_registers(hw, &param);
  389. }
  390. }
  391. /* fjes_open - Called when a network interface is made active */
  392. static int fjes_open(struct net_device *netdev)
  393. {
  394. struct fjes_adapter *adapter = netdev_priv(netdev);
  395. struct fjes_hw *hw = &adapter->hw;
  396. int result;
  397. if (adapter->open_guard)
  398. return -ENXIO;
  399. result = fjes_setup_resources(adapter);
  400. if (result)
  401. goto err_setup_res;
  402. hw->txrx_stop_req_bit = 0;
  403. hw->epstop_req_bit = 0;
  404. napi_enable(&adapter->napi);
  405. fjes_hw_capture_interrupt_status(hw);
  406. result = fjes_request_irq(adapter);
  407. if (result)
  408. goto err_req_irq;
  409. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
  410. netif_tx_start_all_queues(netdev);
  411. netif_carrier_on(netdev);
  412. return 0;
  413. err_req_irq:
  414. fjes_free_irq(adapter);
  415. napi_disable(&adapter->napi);
  416. err_setup_res:
  417. fjes_free_resources(adapter);
  418. return result;
  419. }
  420. /* fjes_close - Disables a network interface */
  421. static int fjes_close(struct net_device *netdev)
  422. {
  423. struct fjes_adapter *adapter = netdev_priv(netdev);
  424. struct fjes_hw *hw = &adapter->hw;
  425. unsigned long flags;
  426. int epidx;
  427. netif_tx_stop_all_queues(netdev);
  428. netif_carrier_off(netdev);
  429. fjes_hw_raise_epstop(hw);
  430. napi_disable(&adapter->napi);
  431. spin_lock_irqsave(&hw->rx_status_lock, flags);
  432. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  433. if (epidx == hw->my_epid)
  434. continue;
  435. if (fjes_hw_get_partner_ep_status(hw, epidx) ==
  436. EP_PARTNER_SHARED)
  437. adapter->hw.ep_shm_info[epidx]
  438. .tx.info->v1i.rx_status &=
  439. ~FJES_RX_POLL_WORK;
  440. }
  441. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  442. fjes_free_irq(adapter);
  443. cancel_delayed_work_sync(&adapter->interrupt_watch_task);
  444. cancel_work_sync(&adapter->unshare_watch_task);
  445. adapter->unshare_watch_bitmask = 0;
  446. cancel_work_sync(&adapter->raise_intr_rxdata_task);
  447. cancel_work_sync(&adapter->tx_stall_task);
  448. cancel_work_sync(&hw->update_zone_task);
  449. cancel_work_sync(&hw->epstop_task);
  450. fjes_hw_wait_epstop(hw);
  451. fjes_free_resources(adapter);
  452. return 0;
  453. }
  454. static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
  455. void *data, size_t len)
  456. {
  457. int retval;
  458. retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
  459. data, len);
  460. if (retval)
  461. return retval;
  462. adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
  463. FJES_TX_DELAY_SEND_PENDING;
  464. if (!work_pending(&adapter->raise_intr_rxdata_task))
  465. queue_work(adapter->txrx_wq,
  466. &adapter->raise_intr_rxdata_task);
  467. retval = 0;
  468. return retval;
  469. }
  470. static netdev_tx_t
  471. fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  472. {
  473. struct fjes_adapter *adapter = netdev_priv(netdev);
  474. struct fjes_hw *hw = &adapter->hw;
  475. int max_epid, my_epid, dest_epid;
  476. enum ep_partner_status pstatus;
  477. struct netdev_queue *cur_queue;
  478. char shortpkt[VLAN_ETH_HLEN];
  479. bool is_multi, vlan;
  480. struct ethhdr *eth;
  481. u16 queue_no = 0;
  482. u16 vlan_id = 0;
  483. netdev_tx_t ret;
  484. char *data;
  485. int len;
  486. ret = NETDEV_TX_OK;
  487. is_multi = false;
  488. cur_queue = netdev_get_tx_queue(netdev, queue_no);
  489. eth = (struct ethhdr *)skb->data;
  490. my_epid = hw->my_epid;
  491. vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
  492. data = skb->data;
  493. len = skb->len;
  494. if (is_multicast_ether_addr(eth->h_dest)) {
  495. dest_epid = 0;
  496. max_epid = hw->max_epid;
  497. is_multi = true;
  498. } else if (is_local_ether_addr(eth->h_dest)) {
  499. dest_epid = eth->h_dest[ETH_ALEN - 1];
  500. max_epid = dest_epid + 1;
  501. if ((eth->h_dest[0] == 0x02) &&
  502. (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
  503. eth->h_dest[3] | eth->h_dest[4])) &&
  504. (dest_epid < hw->max_epid)) {
  505. ;
  506. } else {
  507. dest_epid = 0;
  508. max_epid = 0;
  509. ret = NETDEV_TX_OK;
  510. adapter->stats64.tx_packets += 1;
  511. hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
  512. adapter->stats64.tx_bytes += len;
  513. hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
  514. }
  515. } else {
  516. dest_epid = 0;
  517. max_epid = 0;
  518. ret = NETDEV_TX_OK;
  519. adapter->stats64.tx_packets += 1;
  520. hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
  521. adapter->stats64.tx_bytes += len;
  522. hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
  523. }
  524. for (; dest_epid < max_epid; dest_epid++) {
  525. if (my_epid == dest_epid)
  526. continue;
  527. pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
  528. if (pstatus != EP_PARTNER_SHARED) {
  529. if (!is_multi)
  530. hw->ep_shm_info[dest_epid].ep_stats
  531. .tx_dropped_not_shared += 1;
  532. ret = NETDEV_TX_OK;
  533. } else if (!fjes_hw_check_epbuf_version(
  534. &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
  535. /* version is NOT 0 */
  536. adapter->stats64.tx_carrier_errors += 1;
  537. hw->ep_shm_info[dest_epid].net_stats
  538. .tx_carrier_errors += 1;
  539. hw->ep_shm_info[dest_epid].ep_stats
  540. .tx_dropped_ver_mismatch += 1;
  541. ret = NETDEV_TX_OK;
  542. } else if (!fjes_hw_check_mtu(
  543. &adapter->hw.ep_shm_info[dest_epid].rx,
  544. netdev->mtu)) {
  545. adapter->stats64.tx_dropped += 1;
  546. hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
  547. adapter->stats64.tx_errors += 1;
  548. hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
  549. hw->ep_shm_info[dest_epid].ep_stats
  550. .tx_dropped_buf_size_mismatch += 1;
  551. ret = NETDEV_TX_OK;
  552. } else if (vlan &&
  553. !fjes_hw_check_vlan_id(
  554. &adapter->hw.ep_shm_info[dest_epid].rx,
  555. vlan_id)) {
  556. hw->ep_shm_info[dest_epid].ep_stats
  557. .tx_dropped_vlanid_mismatch += 1;
  558. ret = NETDEV_TX_OK;
  559. } else {
  560. if (len < VLAN_ETH_HLEN) {
  561. memset(shortpkt, 0, VLAN_ETH_HLEN);
  562. memcpy(shortpkt, skb->data, skb->len);
  563. len = VLAN_ETH_HLEN;
  564. data = shortpkt;
  565. }
  566. if (adapter->tx_retry_count == 0) {
  567. adapter->tx_start_jiffies = jiffies;
  568. adapter->tx_retry_count = 1;
  569. } else {
  570. adapter->tx_retry_count++;
  571. }
  572. if (fjes_tx_send(adapter, dest_epid, data, len)) {
  573. if (is_multi) {
  574. ret = NETDEV_TX_OK;
  575. } else if (
  576. ((long)jiffies -
  577. (long)adapter->tx_start_jiffies) >=
  578. FJES_TX_RETRY_TIMEOUT) {
  579. adapter->stats64.tx_fifo_errors += 1;
  580. hw->ep_shm_info[dest_epid].net_stats
  581. .tx_fifo_errors += 1;
  582. adapter->stats64.tx_errors += 1;
  583. hw->ep_shm_info[dest_epid].net_stats
  584. .tx_errors += 1;
  585. ret = NETDEV_TX_OK;
  586. } else {
  587. netif_trans_update(netdev);
  588. hw->ep_shm_info[dest_epid].ep_stats
  589. .tx_buffer_full += 1;
  590. netif_tx_stop_queue(cur_queue);
  591. if (!work_pending(&adapter->tx_stall_task))
  592. queue_work(adapter->txrx_wq,
  593. &adapter->tx_stall_task);
  594. ret = NETDEV_TX_BUSY;
  595. }
  596. } else {
  597. if (!is_multi) {
  598. adapter->stats64.tx_packets += 1;
  599. hw->ep_shm_info[dest_epid].net_stats
  600. .tx_packets += 1;
  601. adapter->stats64.tx_bytes += len;
  602. hw->ep_shm_info[dest_epid].net_stats
  603. .tx_bytes += len;
  604. }
  605. adapter->tx_retry_count = 0;
  606. ret = NETDEV_TX_OK;
  607. }
  608. }
  609. }
  610. if (ret == NETDEV_TX_OK) {
  611. dev_kfree_skb(skb);
  612. if (is_multi) {
  613. adapter->stats64.tx_packets += 1;
  614. hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
  615. adapter->stats64.tx_bytes += 1;
  616. hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
  617. }
  618. }
  619. return ret;
  620. }
  621. static void
  622. fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
  623. {
  624. struct fjes_adapter *adapter = netdev_priv(netdev);
  625. memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
  626. }
  627. static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
  628. {
  629. struct fjes_adapter *adapter = netdev_priv(netdev);
  630. bool running = netif_running(netdev);
  631. struct fjes_hw *hw = &adapter->hw;
  632. unsigned long flags;
  633. int ret = -EINVAL;
  634. int idx, epidx;
  635. for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
  636. if (new_mtu <= fjes_support_mtu[idx]) {
  637. new_mtu = fjes_support_mtu[idx];
  638. if (new_mtu == netdev->mtu)
  639. return 0;
  640. ret = 0;
  641. break;
  642. }
  643. }
  644. if (ret)
  645. return ret;
  646. if (running) {
  647. spin_lock_irqsave(&hw->rx_status_lock, flags);
  648. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  649. if (epidx == hw->my_epid)
  650. continue;
  651. hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
  652. ~FJES_RX_MTU_CHANGING_DONE;
  653. }
  654. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  655. netif_tx_stop_all_queues(netdev);
  656. netif_carrier_off(netdev);
  657. cancel_work_sync(&adapter->tx_stall_task);
  658. napi_disable(&adapter->napi);
  659. msleep(1000);
  660. netif_tx_stop_all_queues(netdev);
  661. }
  662. WRITE_ONCE(netdev->mtu, new_mtu);
  663. if (running) {
  664. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  665. if (epidx == hw->my_epid)
  666. continue;
  667. spin_lock_irqsave(&hw->rx_status_lock, flags);
  668. fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
  669. netdev->dev_addr,
  670. netdev->mtu);
  671. hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
  672. FJES_RX_MTU_CHANGING_DONE;
  673. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  674. }
  675. netif_tx_wake_all_queues(netdev);
  676. netif_carrier_on(netdev);
  677. napi_enable(&adapter->napi);
  678. napi_schedule(&adapter->napi);
  679. }
  680. return ret;
  681. }
  682. static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
  683. {
  684. struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
  685. netif_tx_wake_queue(queue);
  686. }
  687. static int fjes_vlan_rx_add_vid(struct net_device *netdev,
  688. __be16 proto, u16 vid)
  689. {
  690. struct fjes_adapter *adapter = netdev_priv(netdev);
  691. bool ret = true;
  692. int epid;
  693. for (epid = 0; epid < adapter->hw.max_epid; epid++) {
  694. if (epid == adapter->hw.my_epid)
  695. continue;
  696. if (!fjes_hw_check_vlan_id(
  697. &adapter->hw.ep_shm_info[epid].tx, vid))
  698. ret = fjes_hw_set_vlan_id(
  699. &adapter->hw.ep_shm_info[epid].tx, vid);
  700. }
  701. return ret ? 0 : -ENOSPC;
  702. }
  703. static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
  704. __be16 proto, u16 vid)
  705. {
  706. struct fjes_adapter *adapter = netdev_priv(netdev);
  707. int epid;
  708. for (epid = 0; epid < adapter->hw.max_epid; epid++) {
  709. if (epid == adapter->hw.my_epid)
  710. continue;
  711. fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
  712. }
  713. return 0;
  714. }
  715. static const struct net_device_ops fjes_netdev_ops = {
  716. .ndo_open = fjes_open,
  717. .ndo_stop = fjes_close,
  718. .ndo_start_xmit = fjes_xmit_frame,
  719. .ndo_get_stats64 = fjes_get_stats64,
  720. .ndo_change_mtu = fjes_change_mtu,
  721. .ndo_tx_timeout = fjes_tx_retry,
  722. .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
  723. .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
  724. };
  725. /* fjes_netdev_setup - netdevice initialization routine */
  726. static void fjes_netdev_setup(struct net_device *netdev)
  727. {
  728. ether_setup(netdev);
  729. netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
  730. netdev->netdev_ops = &fjes_netdev_ops;
  731. fjes_set_ethtool_ops(netdev);
  732. netdev->mtu = fjes_support_mtu[3];
  733. netdev->min_mtu = fjes_support_mtu[0];
  734. netdev->max_mtu = fjes_support_mtu[3];
  735. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  736. }
  737. static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
  738. int start_epid)
  739. {
  740. struct fjes_hw *hw = &adapter->hw;
  741. enum ep_partner_status pstatus;
  742. int max_epid, cur_epid;
  743. int i;
  744. max_epid = hw->max_epid;
  745. start_epid = (start_epid + 1 + max_epid) % max_epid;
  746. for (i = 0; i < max_epid; i++) {
  747. cur_epid = (start_epid + i) % max_epid;
  748. if (cur_epid == hw->my_epid)
  749. continue;
  750. pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
  751. if (pstatus == EP_PARTNER_SHARED) {
  752. if (!fjes_hw_epbuf_rx_is_empty(
  753. &hw->ep_shm_info[cur_epid].rx))
  754. return cur_epid;
  755. }
  756. }
  757. return -1;
  758. }
  759. static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
  760. int *cur_epid)
  761. {
  762. void *frame;
  763. *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
  764. if (*cur_epid < 0)
  765. return NULL;
  766. frame =
  767. fjes_hw_epbuf_rx_curpkt_get_addr(
  768. &adapter->hw.ep_shm_info[*cur_epid].rx, psize);
  769. return frame;
  770. }
  771. static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
  772. {
  773. fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
  774. }
  775. static int fjes_poll(struct napi_struct *napi, int budget)
  776. {
  777. struct fjes_adapter *adapter =
  778. container_of(napi, struct fjes_adapter, napi);
  779. struct net_device *netdev = napi->dev;
  780. struct fjes_hw *hw = &adapter->hw;
  781. struct sk_buff *skb;
  782. int work_done = 0;
  783. int cur_epid = 0;
  784. int epidx;
  785. size_t frame_len;
  786. void *frame;
  787. spin_lock(&hw->rx_status_lock);
  788. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  789. if (epidx == hw->my_epid)
  790. continue;
  791. if (fjes_hw_get_partner_ep_status(hw, epidx) ==
  792. EP_PARTNER_SHARED)
  793. adapter->hw.ep_shm_info[epidx]
  794. .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
  795. }
  796. spin_unlock(&hw->rx_status_lock);
  797. while (work_done < budget) {
  798. prefetch(&adapter->hw);
  799. frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
  800. if (frame) {
  801. skb = napi_alloc_skb(napi, frame_len);
  802. if (!skb) {
  803. adapter->stats64.rx_dropped += 1;
  804. hw->ep_shm_info[cur_epid].net_stats
  805. .rx_dropped += 1;
  806. adapter->stats64.rx_errors += 1;
  807. hw->ep_shm_info[cur_epid].net_stats
  808. .rx_errors += 1;
  809. } else {
  810. skb_put_data(skb, frame, frame_len);
  811. skb->protocol = eth_type_trans(skb, netdev);
  812. skb->ip_summed = CHECKSUM_UNNECESSARY;
  813. netif_receive_skb(skb);
  814. work_done++;
  815. adapter->stats64.rx_packets += 1;
  816. hw->ep_shm_info[cur_epid].net_stats
  817. .rx_packets += 1;
  818. adapter->stats64.rx_bytes += frame_len;
  819. hw->ep_shm_info[cur_epid].net_stats
  820. .rx_bytes += frame_len;
  821. if (is_multicast_ether_addr(
  822. ((struct ethhdr *)frame)->h_dest)) {
  823. adapter->stats64.multicast += 1;
  824. hw->ep_shm_info[cur_epid].net_stats
  825. .multicast += 1;
  826. }
  827. }
  828. fjes_rxframe_release(adapter, cur_epid);
  829. adapter->unset_rx_last = true;
  830. } else {
  831. break;
  832. }
  833. }
  834. if (work_done < budget) {
  835. napi_complete_done(napi, work_done);
  836. if (adapter->unset_rx_last) {
  837. adapter->rx_last_jiffies = jiffies;
  838. adapter->unset_rx_last = false;
  839. }
  840. if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
  841. napi_schedule(napi);
  842. } else {
  843. spin_lock(&hw->rx_status_lock);
  844. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  845. if (epidx == hw->my_epid)
  846. continue;
  847. if (fjes_hw_get_partner_ep_status(hw, epidx) ==
  848. EP_PARTNER_SHARED)
  849. adapter->hw.ep_shm_info[epidx].tx
  850. .info->v1i.rx_status &=
  851. ~FJES_RX_POLL_WORK;
  852. }
  853. spin_unlock(&hw->rx_status_lock);
  854. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
  855. }
  856. }
  857. return work_done;
  858. }
  859. static int fjes_sw_init(struct fjes_adapter *adapter)
  860. {
  861. struct net_device *netdev = adapter->netdev;
  862. netif_napi_add(netdev, &adapter->napi, fjes_poll);
  863. return 0;
  864. }
  865. static void fjes_force_close_task(struct work_struct *work)
  866. {
  867. struct fjes_adapter *adapter = container_of(work,
  868. struct fjes_adapter, force_close_task);
  869. struct net_device *netdev = adapter->netdev;
  870. rtnl_lock();
  871. dev_close(netdev);
  872. rtnl_unlock();
  873. }
  874. static void fjes_tx_stall_task(struct work_struct *work)
  875. {
  876. struct fjes_adapter *adapter = container_of(work,
  877. struct fjes_adapter, tx_stall_task);
  878. struct net_device *netdev = adapter->netdev;
  879. struct fjes_hw *hw = &adapter->hw;
  880. int all_queue_available, sendable;
  881. enum ep_partner_status pstatus;
  882. int max_epid, my_epid, epid;
  883. union ep_buffer_info *info;
  884. int i;
  885. if (((long)jiffies -
  886. dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
  887. netif_wake_queue(netdev);
  888. return;
  889. }
  890. my_epid = hw->my_epid;
  891. max_epid = hw->max_epid;
  892. for (i = 0; i < 5; i++) {
  893. all_queue_available = 1;
  894. for (epid = 0; epid < max_epid; epid++) {
  895. if (my_epid == epid)
  896. continue;
  897. pstatus = fjes_hw_get_partner_ep_status(hw, epid);
  898. sendable = (pstatus == EP_PARTNER_SHARED);
  899. if (!sendable)
  900. continue;
  901. info = adapter->hw.ep_shm_info[epid].tx.info;
  902. if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
  903. return;
  904. if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
  905. info->v1i.count_max)) {
  906. all_queue_available = 0;
  907. break;
  908. }
  909. }
  910. if (all_queue_available) {
  911. netif_wake_queue(netdev);
  912. return;
  913. }
  914. }
  915. usleep_range(50, 100);
  916. queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
  917. }
  918. static void fjes_raise_intr_rxdata_task(struct work_struct *work)
  919. {
  920. struct fjes_adapter *adapter = container_of(work,
  921. struct fjes_adapter, raise_intr_rxdata_task);
  922. struct fjes_hw *hw = &adapter->hw;
  923. enum ep_partner_status pstatus;
  924. int max_epid, my_epid, epid;
  925. my_epid = hw->my_epid;
  926. max_epid = hw->max_epid;
  927. for (epid = 0; epid < max_epid; epid++)
  928. hw->ep_shm_info[epid].tx_status_work = 0;
  929. for (epid = 0; epid < max_epid; epid++) {
  930. if (epid == my_epid)
  931. continue;
  932. pstatus = fjes_hw_get_partner_ep_status(hw, epid);
  933. if (pstatus == EP_PARTNER_SHARED) {
  934. hw->ep_shm_info[epid].tx_status_work =
  935. hw->ep_shm_info[epid].tx.info->v1i.tx_status;
  936. if (hw->ep_shm_info[epid].tx_status_work ==
  937. FJES_TX_DELAY_SEND_PENDING) {
  938. hw->ep_shm_info[epid].tx.info->v1i.tx_status =
  939. FJES_TX_DELAY_SEND_NONE;
  940. }
  941. }
  942. }
  943. for (epid = 0; epid < max_epid; epid++) {
  944. if (epid == my_epid)
  945. continue;
  946. pstatus = fjes_hw_get_partner_ep_status(hw, epid);
  947. if ((hw->ep_shm_info[epid].tx_status_work ==
  948. FJES_TX_DELAY_SEND_PENDING) &&
  949. (pstatus == EP_PARTNER_SHARED) &&
  950. !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
  951. FJES_RX_POLL_WORK)) {
  952. fjes_hw_raise_interrupt(hw, epid,
  953. REG_ICTL_MASK_RX_DATA);
  954. hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
  955. }
  956. }
  957. usleep_range(500, 1000);
  958. }
  959. static void fjes_watch_unshare_task(struct work_struct *work)
  960. {
  961. struct fjes_adapter *adapter =
  962. container_of(work, struct fjes_adapter, unshare_watch_task);
  963. struct net_device *netdev = adapter->netdev;
  964. struct fjes_hw *hw = &adapter->hw;
  965. int unshare_watch, unshare_reserve;
  966. int max_epid, my_epid, epidx;
  967. int stop_req, stop_req_done;
  968. ulong unshare_watch_bitmask;
  969. unsigned long flags;
  970. int wait_time = 0;
  971. int is_shared;
  972. int ret;
  973. my_epid = hw->my_epid;
  974. max_epid = hw->max_epid;
  975. unshare_watch_bitmask = adapter->unshare_watch_bitmask;
  976. adapter->unshare_watch_bitmask = 0;
  977. while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
  978. (wait_time < 3000)) {
  979. for (epidx = 0; epidx < max_epid; epidx++) {
  980. if (epidx == my_epid)
  981. continue;
  982. is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
  983. epidx);
  984. stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
  985. stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
  986. FJES_RX_STOP_REQ_DONE;
  987. unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
  988. unshare_reserve = test_bit(epidx,
  989. &hw->hw_info.buffer_unshare_reserve_bit);
  990. if ((!stop_req ||
  991. (is_shared && (!is_shared || !stop_req_done))) &&
  992. (is_shared || !unshare_watch || !unshare_reserve))
  993. continue;
  994. mutex_lock(&hw->hw_info.lock);
  995. ret = fjes_hw_unregister_buff_addr(hw, epidx);
  996. switch (ret) {
  997. case 0:
  998. break;
  999. case -ENOMSG:
  1000. case -EBUSY:
  1001. default:
  1002. if (!work_pending(
  1003. &adapter->force_close_task)) {
  1004. adapter->force_reset = true;
  1005. schedule_work(
  1006. &adapter->force_close_task);
  1007. }
  1008. break;
  1009. }
  1010. mutex_unlock(&hw->hw_info.lock);
  1011. hw->ep_shm_info[epidx].ep_stats
  1012. .com_unregist_buf_exec += 1;
  1013. spin_lock_irqsave(&hw->rx_status_lock, flags);
  1014. fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
  1015. netdev->dev_addr, netdev->mtu);
  1016. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  1017. clear_bit(epidx, &hw->txrx_stop_req_bit);
  1018. clear_bit(epidx, &unshare_watch_bitmask);
  1019. clear_bit(epidx,
  1020. &hw->hw_info.buffer_unshare_reserve_bit);
  1021. }
  1022. msleep(100);
  1023. wait_time += 100;
  1024. }
  1025. if (hw->hw_info.buffer_unshare_reserve_bit) {
  1026. for (epidx = 0; epidx < max_epid; epidx++) {
  1027. if (epidx == my_epid)
  1028. continue;
  1029. if (test_bit(epidx,
  1030. &hw->hw_info.buffer_unshare_reserve_bit)) {
  1031. mutex_lock(&hw->hw_info.lock);
  1032. ret = fjes_hw_unregister_buff_addr(hw, epidx);
  1033. switch (ret) {
  1034. case 0:
  1035. break;
  1036. case -ENOMSG:
  1037. case -EBUSY:
  1038. default:
  1039. if (!work_pending(
  1040. &adapter->force_close_task)) {
  1041. adapter->force_reset = true;
  1042. schedule_work(
  1043. &adapter->force_close_task);
  1044. }
  1045. break;
  1046. }
  1047. mutex_unlock(&hw->hw_info.lock);
  1048. hw->ep_shm_info[epidx].ep_stats
  1049. .com_unregist_buf_exec += 1;
  1050. spin_lock_irqsave(&hw->rx_status_lock, flags);
  1051. fjes_hw_setup_epbuf(
  1052. &hw->ep_shm_info[epidx].tx,
  1053. netdev->dev_addr, netdev->mtu);
  1054. spin_unlock_irqrestore(&hw->rx_status_lock,
  1055. flags);
  1056. clear_bit(epidx, &hw->txrx_stop_req_bit);
  1057. clear_bit(epidx, &unshare_watch_bitmask);
  1058. clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
  1059. }
  1060. if (test_bit(epidx, &unshare_watch_bitmask)) {
  1061. spin_lock_irqsave(&hw->rx_status_lock, flags);
  1062. hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
  1063. ~FJES_RX_STOP_REQ_DONE;
  1064. spin_unlock_irqrestore(&hw->rx_status_lock,
  1065. flags);
  1066. }
  1067. }
  1068. }
  1069. }
  1070. static void fjes_irq_watch_task(struct work_struct *work)
  1071. {
  1072. struct fjes_adapter *adapter = container_of(to_delayed_work(work),
  1073. struct fjes_adapter, interrupt_watch_task);
  1074. local_irq_disable();
  1075. fjes_intr(adapter->hw.hw_res.irq, adapter);
  1076. local_irq_enable();
  1077. if (fjes_rxframe_search_exist(adapter, 0) >= 0)
  1078. napi_schedule(&adapter->napi);
  1079. if (adapter->interrupt_watch_enable) {
  1080. if (!delayed_work_pending(&adapter->interrupt_watch_task))
  1081. queue_delayed_work(adapter->control_wq,
  1082. &adapter->interrupt_watch_task,
  1083. FJES_IRQ_WATCH_DELAY);
  1084. }
  1085. }
  1086. /* fjes_probe - Device Initialization Routine */
  1087. static int fjes_probe(struct platform_device *plat_dev)
  1088. {
  1089. struct fjes_adapter *adapter;
  1090. struct net_device *netdev;
  1091. struct resource *res;
  1092. struct fjes_hw *hw;
  1093. u8 addr[ETH_ALEN];
  1094. int err;
  1095. err = -ENOMEM;
  1096. netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
  1097. NET_NAME_UNKNOWN, fjes_netdev_setup,
  1098. FJES_MAX_QUEUES);
  1099. if (!netdev)
  1100. goto err_out;
  1101. SET_NETDEV_DEV(netdev, &plat_dev->dev);
  1102. dev_set_drvdata(&plat_dev->dev, netdev);
  1103. adapter = netdev_priv(netdev);
  1104. adapter->netdev = netdev;
  1105. adapter->plat_dev = plat_dev;
  1106. hw = &adapter->hw;
  1107. hw->back = adapter;
  1108. /* setup the private structure */
  1109. err = fjes_sw_init(adapter);
  1110. if (err)
  1111. goto err_free_netdev;
  1112. INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
  1113. adapter->force_reset = false;
  1114. adapter->open_guard = false;
  1115. adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
  1116. if (unlikely(!adapter->txrx_wq)) {
  1117. err = -ENOMEM;
  1118. goto err_free_netdev;
  1119. }
  1120. adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
  1121. WQ_MEM_RECLAIM, 0);
  1122. if (unlikely(!adapter->control_wq)) {
  1123. err = -ENOMEM;
  1124. goto err_free_txrx_wq;
  1125. }
  1126. INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
  1127. INIT_WORK(&adapter->raise_intr_rxdata_task,
  1128. fjes_raise_intr_rxdata_task);
  1129. INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
  1130. adapter->unshare_watch_bitmask = 0;
  1131. INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
  1132. adapter->interrupt_watch_enable = false;
  1133. res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
  1134. if (!res) {
  1135. err = -EINVAL;
  1136. goto err_free_control_wq;
  1137. }
  1138. hw->hw_res.start = res->start;
  1139. hw->hw_res.size = resource_size(res);
  1140. hw->hw_res.irq = platform_get_irq(plat_dev, 0);
  1141. if (hw->hw_res.irq < 0) {
  1142. err = hw->hw_res.irq;
  1143. goto err_free_control_wq;
  1144. }
  1145. err = fjes_hw_init(&adapter->hw);
  1146. if (err)
  1147. goto err_free_control_wq;
  1148. /* setup MAC address (02:00:00:00:00:[epid])*/
  1149. addr[0] = 2;
  1150. addr[1] = 0;
  1151. addr[2] = 0;
  1152. addr[3] = 0;
  1153. addr[4] = 0;
  1154. addr[5] = hw->my_epid; /* EPID */
  1155. eth_hw_addr_set(netdev, addr);
  1156. err = register_netdev(netdev);
  1157. if (err)
  1158. goto err_hw_exit;
  1159. netif_carrier_off(netdev);
  1160. fjes_dbg_adapter_init(adapter);
  1161. return 0;
  1162. err_hw_exit:
  1163. fjes_hw_exit(&adapter->hw);
  1164. err_free_control_wq:
  1165. destroy_workqueue(adapter->control_wq);
  1166. err_free_txrx_wq:
  1167. destroy_workqueue(adapter->txrx_wq);
  1168. err_free_netdev:
  1169. free_netdev(netdev);
  1170. err_out:
  1171. return err;
  1172. }
  1173. /* fjes_remove - Device Removal Routine */
  1174. static void fjes_remove(struct platform_device *plat_dev)
  1175. {
  1176. struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
  1177. struct fjes_adapter *adapter = netdev_priv(netdev);
  1178. struct fjes_hw *hw = &adapter->hw;
  1179. fjes_dbg_adapter_exit(adapter);
  1180. cancel_delayed_work_sync(&adapter->interrupt_watch_task);
  1181. cancel_work_sync(&adapter->unshare_watch_task);
  1182. cancel_work_sync(&adapter->raise_intr_rxdata_task);
  1183. cancel_work_sync(&adapter->tx_stall_task);
  1184. if (adapter->control_wq)
  1185. destroy_workqueue(adapter->control_wq);
  1186. if (adapter->txrx_wq)
  1187. destroy_workqueue(adapter->txrx_wq);
  1188. unregister_netdev(netdev);
  1189. fjes_hw_exit(hw);
  1190. netif_napi_del(&adapter->napi);
  1191. free_netdev(netdev);
  1192. }
  1193. static struct platform_driver fjes_driver = {
  1194. .driver = {
  1195. .name = DRV_NAME,
  1196. },
  1197. .probe = fjes_probe,
  1198. .remove_new = fjes_remove,
  1199. };
  1200. static acpi_status
  1201. acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
  1202. void *context, void **return_value)
  1203. {
  1204. struct acpi_device *device;
  1205. bool *found = context;
  1206. device = acpi_fetch_acpi_dev(obj_handle);
  1207. if (!device)
  1208. return AE_OK;
  1209. if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
  1210. return AE_OK;
  1211. if (!is_extended_socket_device(device))
  1212. return AE_OK;
  1213. if (acpi_check_extended_socket_status(device))
  1214. return AE_OK;
  1215. *found = true;
  1216. return AE_CTRL_TERMINATE;
  1217. }
  1218. /* fjes_init_module - Driver Registration Routine */
  1219. static int __init fjes_init_module(void)
  1220. {
  1221. bool found = false;
  1222. int result;
  1223. acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
  1224. acpi_find_extended_socket_device, NULL, &found,
  1225. NULL);
  1226. if (!found)
  1227. return -ENODEV;
  1228. pr_info("%s - version %s - %s\n",
  1229. fjes_driver_string, fjes_driver_version, fjes_copyright);
  1230. fjes_dbg_init();
  1231. result = platform_driver_register(&fjes_driver);
  1232. if (result < 0) {
  1233. fjes_dbg_exit();
  1234. return result;
  1235. }
  1236. result = acpi_bus_register_driver(&fjes_acpi_driver);
  1237. if (result < 0)
  1238. goto fail_acpi_driver;
  1239. return 0;
  1240. fail_acpi_driver:
  1241. platform_driver_unregister(&fjes_driver);
  1242. fjes_dbg_exit();
  1243. return result;
  1244. }
  1245. module_init(fjes_init_module);
  1246. /* fjes_exit_module - Driver Exit Cleanup Routine */
  1247. static void __exit fjes_exit_module(void)
  1248. {
  1249. acpi_bus_unregister_driver(&fjes_acpi_driver);
  1250. platform_driver_unregister(&fjes_driver);
  1251. fjes_dbg_exit();
  1252. }
  1253. module_exit(fjes_exit_module);