sh_eth.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * sh_eth.c - Driver for Renesas ethernet controller.
  4. *
  5. * Copyright (C) 2008, 2011 Renesas Solutions Corp.
  6. * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
  7. * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
  8. * Copyright (C) 2013, 2014 Renesas Electronics Corporation
  9. */
  10. #include <config.h>
  11. #include <common.h>
  12. #include <cpu_func.h>
  13. #include <env.h>
  14. #include <log.h>
  15. #include <malloc.h>
  16. #include <net.h>
  17. #include <netdev.h>
  18. #include <miiphy.h>
  19. #include <asm/cache.h>
  20. #include <linux/delay.h>
  21. #include <linux/errno.h>
  22. #include <asm/global_data.h>
  23. #include <asm/io.h>
  24. #include <clk.h>
  25. #include <dm.h>
  26. #include <linux/mii.h>
  27. #include <asm/gpio.h>
  28. #include "sh_eth.h"
  29. #ifndef CFG_SH_ETHER_USE_PORT
  30. # error "Please define CFG_SH_ETHER_USE_PORT"
  31. #endif
  32. #ifndef CFG_SH_ETHER_PHY_ADDR
  33. # error "Please define CFG_SH_ETHER_PHY_ADDR"
  34. #endif
  35. #if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \
  36. !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  37. #define flush_cache_wback(addr, len) \
  38. flush_dcache_range((unsigned long)addr, \
  39. (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE)))
  40. #else
  41. #define flush_cache_wback(...)
  42. #endif
  43. #if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
  44. #define invalidate_cache(addr, len) \
  45. { \
  46. unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \
  47. unsigned long start, end; \
  48. \
  49. start = (unsigned long)addr; \
  50. end = start + len; \
  51. start &= ~(line_size - 1); \
  52. end = ((end + line_size - 1) & ~(line_size - 1)); \
  53. \
  54. invalidate_dcache_range(start, end); \
  55. }
  56. #else
  57. #define invalidate_cache(...)
  58. #endif
  59. #define TIMEOUT_CNT 1000
  60. static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
  61. {
  62. int ret = 0, timeout;
  63. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  64. if (!packet || len > 0xffff) {
  65. printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
  66. ret = -EINVAL;
  67. goto err;
  68. }
  69. /* packet must be a 4 byte boundary */
  70. if ((uintptr_t)packet & 3) {
  71. printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
  72. , __func__);
  73. ret = -EFAULT;
  74. goto err;
  75. }
  76. /* Update tx descriptor */
  77. flush_cache_wback(packet, len);
  78. port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
  79. port_info->tx_desc_cur->td1 = len << 16;
  80. /* Must preserve the end of descriptor list indication */
  81. if (port_info->tx_desc_cur->td0 & TD_TDLE)
  82. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
  83. else
  84. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
  85. flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
  86. /* Restart the transmitter if disabled */
  87. if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
  88. sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
  89. /* Wait until packet is transmitted */
  90. timeout = TIMEOUT_CNT;
  91. do {
  92. invalidate_cache(port_info->tx_desc_cur,
  93. sizeof(struct tx_desc_s));
  94. udelay(100);
  95. } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
  96. if (timeout < 0) {
  97. printf(SHETHER_NAME ": transmit timeout\n");
  98. ret = -ETIMEDOUT;
  99. goto err;
  100. }
  101. port_info->tx_desc_cur++;
  102. if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
  103. port_info->tx_desc_cur = port_info->tx_desc_base;
  104. err:
  105. return ret;
  106. }
  107. static int sh_eth_recv_start(struct sh_eth_dev *eth)
  108. {
  109. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  110. /* Check if the rx descriptor is ready */
  111. invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
  112. if (port_info->rx_desc_cur->rd0 & RD_RACT)
  113. return -EAGAIN;
  114. /* Check for errors */
  115. if (port_info->rx_desc_cur->rd0 & RD_RFE)
  116. return 0;
  117. return port_info->rx_desc_cur->rd1 & 0xffff;
  118. }
  119. static void sh_eth_recv_finish(struct sh_eth_dev *eth)
  120. {
  121. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  122. invalidate_cache(ADDR_TO_P2(port_info->rx_desc_cur->rd2), MAX_BUF_SIZE);
  123. /* Make current descriptor available again */
  124. if (port_info->rx_desc_cur->rd0 & RD_RDLE)
  125. port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
  126. else
  127. port_info->rx_desc_cur->rd0 = RD_RACT;
  128. flush_cache_wback(port_info->rx_desc_cur,
  129. sizeof(struct rx_desc_s));
  130. /* Point to the next descriptor */
  131. port_info->rx_desc_cur++;
  132. if (port_info->rx_desc_cur >=
  133. port_info->rx_desc_base + NUM_RX_DESC)
  134. port_info->rx_desc_cur = port_info->rx_desc_base;
  135. }
  136. static int sh_eth_reset(struct sh_eth_dev *eth)
  137. {
  138. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  139. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  140. int ret = 0, i;
  141. /* Start e-dmac transmitter and receiver */
  142. sh_eth_write(port_info, EDSR_ENALL, EDSR);
  143. /* Perform a software reset and wait for it to complete */
  144. sh_eth_write(port_info, EDMR_SRST, EDMR);
  145. for (i = 0; i < TIMEOUT_CNT; i++) {
  146. if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
  147. break;
  148. udelay(1000);
  149. }
  150. if (i == TIMEOUT_CNT) {
  151. printf(SHETHER_NAME ": Software reset timeout\n");
  152. ret = -EIO;
  153. }
  154. return ret;
  155. #else
  156. sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
  157. mdelay(3);
  158. sh_eth_write(port_info,
  159. sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
  160. return 0;
  161. #endif
  162. }
  163. static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
  164. {
  165. int i, ret = 0;
  166. u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
  167. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  168. struct tx_desc_s *cur_tx_desc;
  169. /*
  170. * Allocate rx descriptors. They must be aligned to size of struct
  171. * tx_desc_s.
  172. */
  173. port_info->tx_desc_alloc =
  174. memalign(sizeof(struct tx_desc_s), alloc_desc_size);
  175. if (!port_info->tx_desc_alloc) {
  176. printf(SHETHER_NAME ": memalign failed\n");
  177. ret = -ENOMEM;
  178. goto err;
  179. }
  180. /* Make sure we use a P2 address (non-cacheable) */
  181. port_info->tx_desc_base =
  182. (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
  183. port_info->tx_desc_cur = port_info->tx_desc_base;
  184. /* Initialize all descriptors */
  185. for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
  186. cur_tx_desc++, i++) {
  187. cur_tx_desc->td0 = 0x00;
  188. cur_tx_desc->td1 = 0x00;
  189. cur_tx_desc->td2 = 0x00;
  190. }
  191. /* Mark the end of the descriptors */
  192. cur_tx_desc--;
  193. cur_tx_desc->td0 |= TD_TDLE;
  194. flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
  195. /*
  196. * Point the controller to the tx descriptor list. Must use physical
  197. * addresses
  198. */
  199. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
  200. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  201. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
  202. sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
  203. sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
  204. #endif
  205. err:
  206. return ret;
  207. }
  208. static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
  209. {
  210. int i, ret = 0;
  211. u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
  212. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  213. struct rx_desc_s *cur_rx_desc;
  214. u8 *rx_buf;
  215. /*
  216. * Allocate rx descriptors. They must be aligned to size of struct
  217. * rx_desc_s.
  218. */
  219. port_info->rx_desc_alloc =
  220. memalign(sizeof(struct rx_desc_s), alloc_desc_size);
  221. if (!port_info->rx_desc_alloc) {
  222. printf(SHETHER_NAME ": memalign failed\n");
  223. ret = -ENOMEM;
  224. goto err;
  225. }
  226. /* Make sure we use a P2 address (non-cacheable) */
  227. port_info->rx_desc_base =
  228. (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
  229. port_info->rx_desc_cur = port_info->rx_desc_base;
  230. /*
  231. * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
  232. * aligned and in P2 area.
  233. */
  234. port_info->rx_buf_alloc =
  235. memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
  236. if (!port_info->rx_buf_alloc) {
  237. printf(SHETHER_NAME ": alloc failed\n");
  238. ret = -ENOMEM;
  239. goto err_buf_alloc;
  240. }
  241. port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
  242. /* Initialize all descriptors */
  243. for (cur_rx_desc = port_info->rx_desc_base,
  244. rx_buf = port_info->rx_buf_base, i = 0;
  245. i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
  246. cur_rx_desc->rd0 = RD_RACT;
  247. cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
  248. cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
  249. }
  250. /* Mark the end of the descriptors */
  251. cur_rx_desc--;
  252. cur_rx_desc->rd0 |= RD_RDLE;
  253. invalidate_cache(port_info->rx_buf_alloc, NUM_RX_DESC * MAX_BUF_SIZE);
  254. flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
  255. /* Point the controller to the rx descriptor list */
  256. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
  257. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  258. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
  259. sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
  260. sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
  261. #endif
  262. return ret;
  263. err_buf_alloc:
  264. free(port_info->rx_desc_alloc);
  265. port_info->rx_desc_alloc = NULL;
  266. err:
  267. return ret;
  268. }
  269. static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
  270. {
  271. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  272. if (port_info->tx_desc_alloc) {
  273. free(port_info->tx_desc_alloc);
  274. port_info->tx_desc_alloc = NULL;
  275. }
  276. }
  277. static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
  278. {
  279. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  280. if (port_info->rx_desc_alloc) {
  281. free(port_info->rx_desc_alloc);
  282. port_info->rx_desc_alloc = NULL;
  283. }
  284. if (port_info->rx_buf_alloc) {
  285. free(port_info->rx_buf_alloc);
  286. port_info->rx_buf_alloc = NULL;
  287. }
  288. }
  289. static int sh_eth_desc_init(struct sh_eth_dev *eth)
  290. {
  291. int ret = 0;
  292. ret = sh_eth_tx_desc_init(eth);
  293. if (ret)
  294. goto err_tx_init;
  295. ret = sh_eth_rx_desc_init(eth);
  296. if (ret)
  297. goto err_rx_init;
  298. return ret;
  299. err_rx_init:
  300. sh_eth_tx_desc_free(eth);
  301. err_tx_init:
  302. return ret;
  303. }
  304. static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
  305. unsigned char *mac)
  306. {
  307. u32 val;
  308. val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
  309. sh_eth_write(port_info, val, MAHR);
  310. val = (mac[4] << 8) | mac[5];
  311. sh_eth_write(port_info, val, MALR);
  312. }
  313. static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
  314. {
  315. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  316. unsigned long edmr;
  317. /* Configure e-dmac registers */
  318. edmr = sh_eth_read(port_info, EDMR);
  319. edmr &= ~EMDR_DESC_R;
  320. edmr |= EMDR_DESC | EDMR_EL;
  321. #if defined(CONFIG_R8A77980)
  322. edmr |= EDMR_NBST;
  323. #endif
  324. sh_eth_write(port_info, edmr, EDMR);
  325. sh_eth_write(port_info, 0, EESIPR);
  326. sh_eth_write(port_info, 0, TRSCER);
  327. sh_eth_write(port_info, 0, TFTR);
  328. sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
  329. sh_eth_write(port_info, RMCR_RST, RMCR);
  330. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  331. sh_eth_write(port_info, 0, RPADIR);
  332. #endif
  333. sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
  334. /* Configure e-mac registers */
  335. sh_eth_write(port_info, 0, ECSIPR);
  336. /* Set Mac address */
  337. sh_eth_write_hwaddr(port_info, mac);
  338. sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
  339. #if defined(SH_ETH_TYPE_GETHER)
  340. sh_eth_write(port_info, 0, PIPR);
  341. #endif
  342. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  343. sh_eth_write(port_info, APR_AP, APR);
  344. sh_eth_write(port_info, MPR_MP, MPR);
  345. sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
  346. #endif
  347. #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
  348. sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
  349. #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
  350. sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
  351. #endif
  352. }
  353. static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
  354. {
  355. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  356. struct phy_device *phy = port_info->phydev;
  357. int ret = 0;
  358. u32 val = 0;
  359. /* Set the transfer speed */
  360. if (phy->speed == 100) {
  361. printf(SHETHER_NAME ": 100Base/");
  362. #if defined(SH_ETH_TYPE_GETHER)
  363. sh_eth_write(port_info, GECMR_100B, GECMR);
  364. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  365. sh_eth_write(port_info, 1, RTRATE);
  366. #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
  367. val = ECMR_RTM;
  368. #endif
  369. } else if (phy->speed == 10) {
  370. printf(SHETHER_NAME ": 10Base/");
  371. #if defined(SH_ETH_TYPE_GETHER)
  372. sh_eth_write(port_info, GECMR_10B, GECMR);
  373. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  374. sh_eth_write(port_info, 0, RTRATE);
  375. #endif
  376. }
  377. #if defined(SH_ETH_TYPE_GETHER)
  378. else if (phy->speed == 1000) {
  379. printf(SHETHER_NAME ": 1000Base/");
  380. sh_eth_write(port_info, GECMR_1000B, GECMR);
  381. }
  382. #endif
  383. /* Check if full duplex mode is supported by the phy */
  384. if (phy->duplex) {
  385. printf("Full\n");
  386. sh_eth_write(port_info,
  387. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
  388. ECMR);
  389. } else {
  390. printf("Half\n");
  391. sh_eth_write(port_info,
  392. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
  393. ECMR);
  394. }
  395. return ret;
  396. }
  397. static void sh_eth_start(struct sh_eth_dev *eth)
  398. {
  399. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  400. /*
  401. * Enable the e-dmac receiver only. The transmitter will be enabled when
  402. * we have something to transmit
  403. */
  404. sh_eth_write(port_info, EDRRR_R, EDRRR);
  405. }
  406. static void sh_eth_stop(struct sh_eth_dev *eth)
  407. {
  408. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  409. sh_eth_write(port_info, ~EDRRR_R, EDRRR);
  410. }
  411. static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
  412. {
  413. int ret = 0;
  414. ret = sh_eth_reset(eth);
  415. if (ret)
  416. return ret;
  417. ret = sh_eth_desc_init(eth);
  418. if (ret)
  419. return ret;
  420. sh_eth_mac_regs_config(eth, mac);
  421. return 0;
  422. }
  423. static int sh_eth_start_common(struct sh_eth_dev *eth)
  424. {
  425. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  426. int ret;
  427. ret = phy_startup(port_info->phydev);
  428. if (ret) {
  429. printf(SHETHER_NAME ": phy startup failure\n");
  430. return ret;
  431. }
  432. ret = sh_eth_phy_regs_config(eth);
  433. if (ret)
  434. return ret;
  435. sh_eth_start(eth);
  436. return 0;
  437. }
  438. struct sh_ether_priv {
  439. struct sh_eth_dev shdev;
  440. struct mii_dev *bus;
  441. phys_addr_t iobase;
  442. struct clk clk;
  443. };
  444. static int sh_ether_send(struct udevice *dev, void *packet, int len)
  445. {
  446. struct sh_ether_priv *priv = dev_get_priv(dev);
  447. struct sh_eth_dev *eth = &priv->shdev;
  448. return sh_eth_send_common(eth, packet, len);
  449. }
  450. static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
  451. {
  452. struct sh_ether_priv *priv = dev_get_priv(dev);
  453. struct sh_eth_dev *eth = &priv->shdev;
  454. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  455. uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
  456. int len;
  457. len = sh_eth_recv_start(eth);
  458. if (len > 0) {
  459. invalidate_cache(packet, len);
  460. *packetp = packet;
  461. return len;
  462. }
  463. /* Restart the receiver if disabled */
  464. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  465. sh_eth_write(port_info, EDRRR_R, EDRRR);
  466. return len;
  467. }
  468. static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
  469. {
  470. struct sh_ether_priv *priv = dev_get_priv(dev);
  471. struct sh_eth_dev *eth = &priv->shdev;
  472. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  473. sh_eth_recv_finish(eth);
  474. /* Restart the receiver if disabled */
  475. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  476. sh_eth_write(port_info, EDRRR_R, EDRRR);
  477. return 0;
  478. }
  479. static int sh_ether_write_hwaddr(struct udevice *dev)
  480. {
  481. struct sh_ether_priv *priv = dev_get_priv(dev);
  482. struct sh_eth_dev *eth = &priv->shdev;
  483. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  484. struct eth_pdata *pdata = dev_get_plat(dev);
  485. sh_eth_write_hwaddr(port_info, pdata->enetaddr);
  486. return 0;
  487. }
  488. static int sh_eth_phy_config(struct udevice *dev)
  489. {
  490. struct sh_ether_priv *priv = dev_get_priv(dev);
  491. struct eth_pdata *pdata = dev_get_plat(dev);
  492. struct sh_eth_dev *eth = &priv->shdev;
  493. int ret = 0;
  494. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  495. struct phy_device *phydev;
  496. phydev = phy_connect(priv->bus, -1, dev, pdata->phy_interface);
  497. if (!phydev)
  498. return -ENODEV;
  499. port_info->phydev = phydev;
  500. phy_config(phydev);
  501. return ret;
  502. }
  503. static int sh_ether_start(struct udevice *dev)
  504. {
  505. struct sh_ether_priv *priv = dev_get_priv(dev);
  506. struct eth_pdata *pdata = dev_get_plat(dev);
  507. struct sh_eth_dev *eth = &priv->shdev;
  508. int ret;
  509. ret = sh_eth_init_common(eth, pdata->enetaddr);
  510. if (ret)
  511. return ret;
  512. ret = sh_eth_start_common(eth);
  513. if (ret)
  514. goto err_start;
  515. return 0;
  516. err_start:
  517. sh_eth_tx_desc_free(eth);
  518. sh_eth_rx_desc_free(eth);
  519. return ret;
  520. }
  521. static void sh_ether_stop(struct udevice *dev)
  522. {
  523. struct sh_ether_priv *priv = dev_get_priv(dev);
  524. struct sh_eth_dev *eth = &priv->shdev;
  525. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  526. phy_shutdown(port_info->phydev);
  527. sh_eth_stop(&priv->shdev);
  528. }
  529. static int sh_ether_probe(struct udevice *udev)
  530. {
  531. struct eth_pdata *pdata = dev_get_plat(udev);
  532. struct sh_ether_priv *priv = dev_get_priv(udev);
  533. struct sh_eth_dev *eth = &priv->shdev;
  534. struct mii_dev *mdiodev;
  535. int ret;
  536. priv->iobase = pdata->iobase;
  537. #if CONFIG_IS_ENABLED(CLK)
  538. ret = clk_get_by_index(udev, 0, &priv->clk);
  539. if (ret < 0)
  540. return ret;
  541. #endif
  542. mdiodev = mdio_alloc();
  543. if (!mdiodev) {
  544. ret = -ENOMEM;
  545. return ret;
  546. }
  547. mdiodev->read = bb_miiphy_read;
  548. mdiodev->write = bb_miiphy_write;
  549. bb_miiphy_buses[0].priv = eth;
  550. snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
  551. ret = mdio_register(mdiodev);
  552. if (ret < 0)
  553. goto err_mdio_register;
  554. priv->bus = miiphy_get_dev_by_name(udev->name);
  555. eth->port = CFG_SH_ETHER_USE_PORT;
  556. eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR;
  557. eth->port_info[eth->port].iobase =
  558. (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
  559. #if CONFIG_IS_ENABLED(CLK)
  560. ret = clk_enable(&priv->clk);
  561. if (ret)
  562. goto err_mdio_register;
  563. #endif
  564. ret = sh_eth_init_common(eth, pdata->enetaddr);
  565. if (ret)
  566. goto err_phy_config;
  567. ret = sh_eth_phy_config(udev);
  568. if (ret) {
  569. printf(SHETHER_NAME ": phy config timeout\n");
  570. goto err_phy_config;
  571. }
  572. return 0;
  573. err_phy_config:
  574. #if CONFIG_IS_ENABLED(CLK)
  575. clk_disable(&priv->clk);
  576. #endif
  577. err_mdio_register:
  578. mdio_free(mdiodev);
  579. return ret;
  580. }
  581. static int sh_ether_remove(struct udevice *udev)
  582. {
  583. struct sh_ether_priv *priv = dev_get_priv(udev);
  584. struct sh_eth_dev *eth = &priv->shdev;
  585. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  586. #if CONFIG_IS_ENABLED(CLK)
  587. clk_disable(&priv->clk);
  588. #endif
  589. free(port_info->phydev);
  590. mdio_unregister(priv->bus);
  591. mdio_free(priv->bus);
  592. return 0;
  593. }
  594. static const struct eth_ops sh_ether_ops = {
  595. .start = sh_ether_start,
  596. .send = sh_ether_send,
  597. .recv = sh_ether_recv,
  598. .free_pkt = sh_ether_free_pkt,
  599. .stop = sh_ether_stop,
  600. .write_hwaddr = sh_ether_write_hwaddr,
  601. };
  602. int sh_ether_of_to_plat(struct udevice *dev)
  603. {
  604. struct eth_pdata *pdata = dev_get_plat(dev);
  605. const fdt32_t *cell;
  606. pdata->iobase = dev_read_addr(dev);
  607. pdata->phy_interface = dev_read_phy_mode(dev);
  608. if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
  609. return -EINVAL;
  610. pdata->max_speed = 1000;
  611. cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
  612. if (cell)
  613. pdata->max_speed = fdt32_to_cpu(*cell);
  614. sprintf(bb_miiphy_buses[0].name, dev->name);
  615. return 0;
  616. }
  617. static const struct udevice_id sh_ether_ids[] = {
  618. { .compatible = "renesas,ether-r7s72100" },
  619. { .compatible = "renesas,ether-r8a7790" },
  620. { .compatible = "renesas,ether-r8a7791" },
  621. { .compatible = "renesas,ether-r8a7793" },
  622. { .compatible = "renesas,ether-r8a7794" },
  623. { .compatible = "renesas,gether-r8a77980" },
  624. { }
  625. };
  626. U_BOOT_DRIVER(eth_sh_ether) = {
  627. .name = "sh_ether",
  628. .id = UCLASS_ETH,
  629. .of_match = sh_ether_ids,
  630. .of_to_plat = sh_ether_of_to_plat,
  631. .probe = sh_ether_probe,
  632. .remove = sh_ether_remove,
  633. .ops = &sh_ether_ops,
  634. .priv_auto = sizeof(struct sh_ether_priv),
  635. .plat_auto = sizeof(struct eth_pdata),
  636. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  637. };
  638. /******* for bb_miiphy *******/
  639. static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
  640. {
  641. return 0;
  642. }
  643. static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
  644. {
  645. struct sh_eth_dev *eth = bus->priv;
  646. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  647. sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
  648. return 0;
  649. }
  650. static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
  651. {
  652. struct sh_eth_dev *eth = bus->priv;
  653. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  654. sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
  655. return 0;
  656. }
  657. static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
  658. {
  659. struct sh_eth_dev *eth = bus->priv;
  660. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  661. if (v)
  662. sh_eth_write(port_info,
  663. sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
  664. else
  665. sh_eth_write(port_info,
  666. sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
  667. return 0;
  668. }
  669. static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
  670. {
  671. struct sh_eth_dev *eth = bus->priv;
  672. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  673. *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
  674. return 0;
  675. }
  676. static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
  677. {
  678. struct sh_eth_dev *eth = bus->priv;
  679. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  680. if (v)
  681. sh_eth_write(port_info,
  682. sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
  683. else
  684. sh_eth_write(port_info,
  685. sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
  686. return 0;
  687. }
  688. static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
  689. {
  690. udelay(10);
  691. return 0;
  692. }
  693. struct bb_miiphy_bus bb_miiphy_buses[] = {
  694. {
  695. .name = "sh_eth",
  696. .init = sh_eth_bb_init,
  697. .mdio_active = sh_eth_bb_mdio_active,
  698. .mdio_tristate = sh_eth_bb_mdio_tristate,
  699. .set_mdio = sh_eth_bb_set_mdio,
  700. .get_mdio = sh_eth_bb_get_mdio,
  701. .set_mdc = sh_eth_bb_set_mdc,
  702. .delay = sh_eth_bb_delay,
  703. }
  704. };
  705. int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);