switchx2.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/types.h>
  6. #include <linux/pci.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/slab.h>
  10. #include <linux/device.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/if_vlan.h>
  13. #include <net/switchdev.h>
  14. #include "pci.h"
  15. #include "core.h"
  16. #include "reg.h"
  17. #include "port.h"
  18. #include "trap.h"
  19. #include "txheader.h"
  20. #include "ib.h"
  21. static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
  22. static const char mlxsw_sx_driver_version[] = "1.0";
  23. struct mlxsw_sx_port;
  24. struct mlxsw_sx {
  25. struct mlxsw_sx_port **ports;
  26. struct mlxsw_core *core;
  27. const struct mlxsw_bus_info *bus_info;
  28. u8 hw_id[ETH_ALEN];
  29. };
  30. struct mlxsw_sx_port_pcpu_stats {
  31. u64 rx_packets;
  32. u64 rx_bytes;
  33. u64 tx_packets;
  34. u64 tx_bytes;
  35. struct u64_stats_sync syncp;
  36. u32 tx_dropped;
  37. };
  38. struct mlxsw_sx_port {
  39. struct net_device *dev;
  40. struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
  41. struct mlxsw_sx *mlxsw_sx;
  42. u8 local_port;
  43. struct {
  44. u8 module;
  45. } mapping;
  46. };
  47. /* tx_hdr_version
  48. * Tx header version.
  49. * Must be set to 0.
  50. */
  51. MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  52. /* tx_hdr_ctl
  53. * Packet control type.
  54. * 0 - Ethernet control (e.g. EMADs, LACP)
  55. * 1 - Ethernet data
  56. */
  57. MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  58. /* tx_hdr_proto
  59. * Packet protocol type. Must be set to 1 (Ethernet).
  60. */
  61. MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  62. /* tx_hdr_etclass
  63. * Egress TClass to be used on the egress device on the egress port.
  64. * The MSB is specified in the 'ctclass3' field.
  65. * Range is 0-15, where 15 is the highest priority.
  66. */
  67. MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
  68. /* tx_hdr_swid
  69. * Switch partition ID.
  70. */
  71. MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
  72. /* tx_hdr_port_mid
  73. * Destination local port for unicast packets.
  74. * Destination multicast ID for multicast packets.
  75. *
  76. * Control packets are directed to a specific egress port, while data
  77. * packets are transmitted through the CPU port (0) into the switch partition,
  78. * where forwarding rules are applied.
  79. */
  80. MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
  81. /* tx_hdr_ctclass3
  82. * See field 'etclass'.
  83. */
  84. MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
  85. /* tx_hdr_rdq
  86. * RDQ for control packets sent to remote CPU.
  87. * Must be set to 0x1F for EMADs, otherwise 0.
  88. */
  89. MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
  90. /* tx_hdr_cpu_sig
  91. * Signature control for packets going to CPU. Must be set to 0.
  92. */
  93. MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
  94. /* tx_hdr_sig
  95. * Stacking protocl signature. Must be set to 0xE0E0.
  96. */
  97. MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
  98. /* tx_hdr_stclass
  99. * Stacking TClass.
  100. */
  101. MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
  102. /* tx_hdr_emad
  103. * EMAD bit. Must be set for EMADs.
  104. */
  105. MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
  106. /* tx_hdr_type
  107. * 0 - Data packets
  108. * 6 - Control packets
  109. */
  110. MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
  111. static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
  112. const struct mlxsw_tx_info *tx_info)
  113. {
  114. char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  115. bool is_emad = tx_info->is_emad;
  116. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  117. /* We currently set default values for the egress tclass (QoS). */
  118. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
  119. mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
  120. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  121. mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
  122. MLXSW_TXHDR_ETCLASS_5);
  123. mlxsw_tx_hdr_swid_set(txhdr, 0);
  124. mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
  125. mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
  126. mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
  127. MLXSW_TXHDR_RDQ_OTHER);
  128. mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
  129. mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
  130. mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
  131. mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
  132. MLXSW_TXHDR_NOT_EMAD);
  133. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
  134. }
  135. static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
  136. bool is_up)
  137. {
  138. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  139. char paos_pl[MLXSW_REG_PAOS_LEN];
  140. mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
  141. is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
  142. MLXSW_PORT_ADMIN_STATUS_DOWN);
  143. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
  144. }
  145. static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
  146. bool *p_is_up)
  147. {
  148. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  149. char paos_pl[MLXSW_REG_PAOS_LEN];
  150. u8 oper_status;
  151. int err;
  152. mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
  153. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
  154. if (err)
  155. return err;
  156. oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
  157. *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
  158. return 0;
  159. }
  160. static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
  161. u16 mtu)
  162. {
  163. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  164. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  165. int max_mtu;
  166. int err;
  167. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
  168. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
  169. if (err)
  170. return err;
  171. max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
  172. if (mtu > max_mtu)
  173. return -EINVAL;
  174. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
  175. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
  176. }
  177. static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
  178. u16 mtu)
  179. {
  180. mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
  181. return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
  182. }
  183. static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
  184. u16 mtu)
  185. {
  186. return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
  187. }
  188. static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
  189. u8 ib_port)
  190. {
  191. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  192. char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
  193. int err;
  194. mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
  195. mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
  196. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
  197. return err;
  198. }
  199. static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
  200. {
  201. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  202. char pspa_pl[MLXSW_REG_PSPA_LEN];
  203. mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
  204. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
  205. }
  206. static int
  207. mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
  208. {
  209. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  210. char sspr_pl[MLXSW_REG_SSPR_LEN];
  211. mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
  212. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
  213. }
  214. static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
  215. u8 local_port, u8 *p_module,
  216. u8 *p_width)
  217. {
  218. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  219. int err;
  220. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  221. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
  222. if (err)
  223. return err;
  224. *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
  225. *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
  226. return 0;
  227. }
  228. static int mlxsw_sx_port_open(struct net_device *dev)
  229. {
  230. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  231. int err;
  232. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
  233. if (err)
  234. return err;
  235. netif_start_queue(dev);
  236. return 0;
  237. }
  238. static int mlxsw_sx_port_stop(struct net_device *dev)
  239. {
  240. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  241. netif_stop_queue(dev);
  242. return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  243. }
  244. static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
  245. struct net_device *dev)
  246. {
  247. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  248. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  249. struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
  250. const struct mlxsw_tx_info tx_info = {
  251. .local_port = mlxsw_sx_port->local_port,
  252. .is_emad = false,
  253. };
  254. u64 len;
  255. int err;
  256. if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
  257. return NETDEV_TX_BUSY;
  258. if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
  259. struct sk_buff *skb_orig = skb;
  260. skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
  261. if (!skb) {
  262. this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
  263. dev_kfree_skb_any(skb_orig);
  264. return NETDEV_TX_OK;
  265. }
  266. dev_consume_skb_any(skb_orig);
  267. }
  268. mlxsw_sx_txhdr_construct(skb, &tx_info);
  269. /* TX header is consumed by HW on the way so we shouldn't count its
  270. * bytes as being sent.
  271. */
  272. len = skb->len - MLXSW_TXHDR_LEN;
  273. /* Due to a race we might fail here because of a full queue. In that
  274. * unlikely case we simply drop the packet.
  275. */
  276. err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
  277. if (!err) {
  278. pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
  279. u64_stats_update_begin(&pcpu_stats->syncp);
  280. pcpu_stats->tx_packets++;
  281. pcpu_stats->tx_bytes += len;
  282. u64_stats_update_end(&pcpu_stats->syncp);
  283. } else {
  284. this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
  285. dev_kfree_skb_any(skb);
  286. }
  287. return NETDEV_TX_OK;
  288. }
  289. static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
  290. {
  291. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  292. int err;
  293. err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
  294. if (err)
  295. return err;
  296. dev->mtu = mtu;
  297. return 0;
  298. }
  299. static void
  300. mlxsw_sx_port_get_stats64(struct net_device *dev,
  301. struct rtnl_link_stats64 *stats)
  302. {
  303. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  304. struct mlxsw_sx_port_pcpu_stats *p;
  305. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  306. u32 tx_dropped = 0;
  307. unsigned int start;
  308. int i;
  309. for_each_possible_cpu(i) {
  310. p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
  311. do {
  312. start = u64_stats_fetch_begin_irq(&p->syncp);
  313. rx_packets = p->rx_packets;
  314. rx_bytes = p->rx_bytes;
  315. tx_packets = p->tx_packets;
  316. tx_bytes = p->tx_bytes;
  317. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  318. stats->rx_packets += rx_packets;
  319. stats->rx_bytes += rx_bytes;
  320. stats->tx_packets += tx_packets;
  321. stats->tx_bytes += tx_bytes;
  322. /* tx_dropped is u32, updated without syncp protection. */
  323. tx_dropped += p->tx_dropped;
  324. }
  325. stats->tx_dropped = tx_dropped;
  326. }
  327. static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
  328. size_t len)
  329. {
  330. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  331. return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
  332. mlxsw_sx_port->local_port,
  333. name, len);
  334. }
  335. static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
  336. .ndo_open = mlxsw_sx_port_open,
  337. .ndo_stop = mlxsw_sx_port_stop,
  338. .ndo_start_xmit = mlxsw_sx_port_xmit,
  339. .ndo_change_mtu = mlxsw_sx_port_change_mtu,
  340. .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
  341. .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
  342. };
  343. static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
  344. struct ethtool_drvinfo *drvinfo)
  345. {
  346. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  347. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  348. strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
  349. strlcpy(drvinfo->version, mlxsw_sx_driver_version,
  350. sizeof(drvinfo->version));
  351. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  352. "%d.%d.%d",
  353. mlxsw_sx->bus_info->fw_rev.major,
  354. mlxsw_sx->bus_info->fw_rev.minor,
  355. mlxsw_sx->bus_info->fw_rev.subminor);
  356. strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
  357. sizeof(drvinfo->bus_info));
  358. }
  359. struct mlxsw_sx_port_hw_stats {
  360. char str[ETH_GSTRING_LEN];
  361. u64 (*getter)(const char *payload);
  362. };
  363. static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
  364. {
  365. .str = "a_frames_transmitted_ok",
  366. .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
  367. },
  368. {
  369. .str = "a_frames_received_ok",
  370. .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
  371. },
  372. {
  373. .str = "a_frame_check_sequence_errors",
  374. .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
  375. },
  376. {
  377. .str = "a_alignment_errors",
  378. .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
  379. },
  380. {
  381. .str = "a_octets_transmitted_ok",
  382. .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
  383. },
  384. {
  385. .str = "a_octets_received_ok",
  386. .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
  387. },
  388. {
  389. .str = "a_multicast_frames_xmitted_ok",
  390. .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
  391. },
  392. {
  393. .str = "a_broadcast_frames_xmitted_ok",
  394. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
  395. },
  396. {
  397. .str = "a_multicast_frames_received_ok",
  398. .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
  399. },
  400. {
  401. .str = "a_broadcast_frames_received_ok",
  402. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
  403. },
  404. {
  405. .str = "a_in_range_length_errors",
  406. .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
  407. },
  408. {
  409. .str = "a_out_of_range_length_field",
  410. .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
  411. },
  412. {
  413. .str = "a_frame_too_long_errors",
  414. .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
  415. },
  416. {
  417. .str = "a_symbol_error_during_carrier",
  418. .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
  419. },
  420. {
  421. .str = "a_mac_control_frames_transmitted",
  422. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
  423. },
  424. {
  425. .str = "a_mac_control_frames_received",
  426. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
  427. },
  428. {
  429. .str = "a_unsupported_opcodes_received",
  430. .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
  431. },
  432. {
  433. .str = "a_pause_mac_ctrl_frames_received",
  434. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
  435. },
  436. {
  437. .str = "a_pause_mac_ctrl_frames_xmitted",
  438. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
  439. },
  440. };
  441. #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
  442. static void mlxsw_sx_port_get_strings(struct net_device *dev,
  443. u32 stringset, u8 *data)
  444. {
  445. u8 *p = data;
  446. int i;
  447. switch (stringset) {
  448. case ETH_SS_STATS:
  449. for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
  450. memcpy(p, mlxsw_sx_port_hw_stats[i].str,
  451. ETH_GSTRING_LEN);
  452. p += ETH_GSTRING_LEN;
  453. }
  454. break;
  455. }
  456. }
  457. static void mlxsw_sx_port_get_stats(struct net_device *dev,
  458. struct ethtool_stats *stats, u64 *data)
  459. {
  460. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  461. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  462. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  463. int i;
  464. int err;
  465. mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
  466. MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
  467. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
  468. for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
  469. data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
  470. }
  471. static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
  472. {
  473. switch (sset) {
  474. case ETH_SS_STATS:
  475. return MLXSW_SX_PORT_HW_STATS_LEN;
  476. default:
  477. return -EOPNOTSUPP;
  478. }
  479. }
  480. struct mlxsw_sx_port_link_mode {
  481. u32 mask;
  482. u32 supported;
  483. u32 advertised;
  484. u32 speed;
  485. };
  486. static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
  487. {
  488. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
  489. .supported = SUPPORTED_100baseT_Full,
  490. .advertised = ADVERTISED_100baseT_Full,
  491. .speed = 100,
  492. },
  493. {
  494. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
  495. .speed = 100,
  496. },
  497. {
  498. .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
  499. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
  500. .supported = SUPPORTED_1000baseKX_Full,
  501. .advertised = ADVERTISED_1000baseKX_Full,
  502. .speed = 1000,
  503. },
  504. {
  505. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
  506. .supported = SUPPORTED_10000baseT_Full,
  507. .advertised = ADVERTISED_10000baseT_Full,
  508. .speed = 10000,
  509. },
  510. {
  511. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
  512. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
  513. .supported = SUPPORTED_10000baseKX4_Full,
  514. .advertised = ADVERTISED_10000baseKX4_Full,
  515. .speed = 10000,
  516. },
  517. {
  518. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  519. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  520. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  521. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
  522. .supported = SUPPORTED_10000baseKR_Full,
  523. .advertised = ADVERTISED_10000baseKR_Full,
  524. .speed = 10000,
  525. },
  526. {
  527. .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
  528. .supported = SUPPORTED_20000baseKR2_Full,
  529. .advertised = ADVERTISED_20000baseKR2_Full,
  530. .speed = 20000,
  531. },
  532. {
  533. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
  534. .supported = SUPPORTED_40000baseCR4_Full,
  535. .advertised = ADVERTISED_40000baseCR4_Full,
  536. .speed = 40000,
  537. },
  538. {
  539. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
  540. .supported = SUPPORTED_40000baseKR4_Full,
  541. .advertised = ADVERTISED_40000baseKR4_Full,
  542. .speed = 40000,
  543. },
  544. {
  545. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
  546. .supported = SUPPORTED_40000baseSR4_Full,
  547. .advertised = ADVERTISED_40000baseSR4_Full,
  548. .speed = 40000,
  549. },
  550. {
  551. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
  552. .supported = SUPPORTED_40000baseLR4_Full,
  553. .advertised = ADVERTISED_40000baseLR4_Full,
  554. .speed = 40000,
  555. },
  556. {
  557. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
  558. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
  559. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
  560. .speed = 25000,
  561. },
  562. {
  563. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
  564. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
  565. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
  566. .speed = 50000,
  567. },
  568. {
  569. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  570. .supported = SUPPORTED_56000baseKR4_Full,
  571. .advertised = ADVERTISED_56000baseKR4_Full,
  572. .speed = 56000,
  573. },
  574. {
  575. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
  576. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  577. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  578. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
  579. .speed = 100000,
  580. },
  581. };
  582. #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
  583. #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
  584. static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
  585. {
  586. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  587. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  588. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  589. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  590. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  591. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  592. return SUPPORTED_FIBRE;
  593. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  594. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  595. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  596. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  597. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
  598. return SUPPORTED_Backplane;
  599. return 0;
  600. }
  601. static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
  602. {
  603. u32 modes = 0;
  604. int i;
  605. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  606. if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
  607. modes |= mlxsw_sx_port_link_mode[i].supported;
  608. }
  609. return modes;
  610. }
  611. static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
  612. {
  613. u32 modes = 0;
  614. int i;
  615. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  616. if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
  617. modes |= mlxsw_sx_port_link_mode[i].advertised;
  618. }
  619. return modes;
  620. }
  621. static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
  622. struct ethtool_link_ksettings *cmd)
  623. {
  624. u32 speed = SPEED_UNKNOWN;
  625. u8 duplex = DUPLEX_UNKNOWN;
  626. int i;
  627. if (!carrier_ok)
  628. goto out;
  629. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  630. if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
  631. speed = mlxsw_sx_port_link_mode[i].speed;
  632. duplex = DUPLEX_FULL;
  633. break;
  634. }
  635. }
  636. out:
  637. cmd->base.speed = speed;
  638. cmd->base.duplex = duplex;
  639. }
  640. static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
  641. {
  642. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  643. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  644. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  645. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  646. return PORT_FIBRE;
  647. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  648. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  649. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
  650. return PORT_DA;
  651. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  652. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  653. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  654. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
  655. return PORT_NONE;
  656. return PORT_OTHER;
  657. }
  658. static int
  659. mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
  660. struct ethtool_link_ksettings *cmd)
  661. {
  662. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  663. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  664. char ptys_pl[MLXSW_REG_PTYS_LEN];
  665. u32 eth_proto_cap;
  666. u32 eth_proto_admin;
  667. u32 eth_proto_oper;
  668. u32 supported, advertising, lp_advertising;
  669. int err;
  670. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
  671. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  672. if (err) {
  673. netdev_err(dev, "Failed to get proto");
  674. return err;
  675. }
  676. mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
  677. &eth_proto_admin, &eth_proto_oper);
  678. supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
  679. mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
  680. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  681. advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
  682. mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
  683. eth_proto_oper, cmd);
  684. eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
  685. cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
  686. lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
  687. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  688. supported);
  689. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  690. advertising);
  691. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
  692. lp_advertising);
  693. return 0;
  694. }
  695. static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
  696. {
  697. u32 ptys_proto = 0;
  698. int i;
  699. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  700. if (advertising & mlxsw_sx_port_link_mode[i].advertised)
  701. ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
  702. }
  703. return ptys_proto;
  704. }
  705. static u32 mlxsw_sx_to_ptys_speed(u32 speed)
  706. {
  707. u32 ptys_proto = 0;
  708. int i;
  709. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  710. if (speed == mlxsw_sx_port_link_mode[i].speed)
  711. ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
  712. }
  713. return ptys_proto;
  714. }
  715. static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
  716. {
  717. u32 ptys_proto = 0;
  718. int i;
  719. for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
  720. if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
  721. ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
  722. }
  723. return ptys_proto;
  724. }
  725. static int
  726. mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
  727. const struct ethtool_link_ksettings *cmd)
  728. {
  729. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  730. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  731. char ptys_pl[MLXSW_REG_PTYS_LEN];
  732. u32 speed;
  733. u32 eth_proto_new;
  734. u32 eth_proto_cap;
  735. u32 eth_proto_admin;
  736. u32 advertising;
  737. bool is_up;
  738. int err;
  739. speed = cmd->base.speed;
  740. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  741. cmd->link_modes.advertising);
  742. eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
  743. mlxsw_sx_to_ptys_advert_link(advertising) :
  744. mlxsw_sx_to_ptys_speed(speed);
  745. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
  746. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  747. if (err) {
  748. netdev_err(dev, "Failed to get proto");
  749. return err;
  750. }
  751. mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
  752. NULL);
  753. eth_proto_new = eth_proto_new & eth_proto_cap;
  754. if (!eth_proto_new) {
  755. netdev_err(dev, "Not supported proto admin requested");
  756. return -EINVAL;
  757. }
  758. if (eth_proto_new == eth_proto_admin)
  759. return 0;
  760. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
  761. eth_proto_new, true);
  762. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  763. if (err) {
  764. netdev_err(dev, "Failed to set proto admin");
  765. return err;
  766. }
  767. err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
  768. if (err) {
  769. netdev_err(dev, "Failed to get oper status");
  770. return err;
  771. }
  772. if (!is_up)
  773. return 0;
  774. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  775. if (err) {
  776. netdev_err(dev, "Failed to set admin status");
  777. return err;
  778. }
  779. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
  780. if (err) {
  781. netdev_err(dev, "Failed to set admin status");
  782. return err;
  783. }
  784. return 0;
  785. }
  786. static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
  787. .get_drvinfo = mlxsw_sx_port_get_drvinfo,
  788. .get_link = ethtool_op_get_link,
  789. .get_strings = mlxsw_sx_port_get_strings,
  790. .get_ethtool_stats = mlxsw_sx_port_get_stats,
  791. .get_sset_count = mlxsw_sx_port_get_sset_count,
  792. .get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
  793. .set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
  794. };
  795. static int mlxsw_sx_port_attr_get(struct net_device *dev,
  796. struct switchdev_attr *attr)
  797. {
  798. struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
  799. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  800. switch (attr->id) {
  801. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  802. attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
  803. memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
  804. break;
  805. default:
  806. return -EOPNOTSUPP;
  807. }
  808. return 0;
  809. }
  810. static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
  811. .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
  812. };
  813. static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
  814. {
  815. char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
  816. int err;
  817. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
  818. if (err)
  819. return err;
  820. mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
  821. return 0;
  822. }
  823. static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
  824. {
  825. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  826. struct net_device *dev = mlxsw_sx_port->dev;
  827. char ppad_pl[MLXSW_REG_PPAD_LEN];
  828. int err;
  829. mlxsw_reg_ppad_pack(ppad_pl, false, 0);
  830. err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
  831. if (err)
  832. return err;
  833. mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
  834. /* The last byte value in base mac address is guaranteed
  835. * to be such it does not overflow when adding local_port
  836. * value.
  837. */
  838. dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
  839. return 0;
  840. }
  841. static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
  842. u16 vid, enum mlxsw_reg_spms_state state)
  843. {
  844. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  845. char *spms_pl;
  846. int err;
  847. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  848. if (!spms_pl)
  849. return -ENOMEM;
  850. mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
  851. mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
  852. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
  853. kfree(spms_pl);
  854. return err;
  855. }
  856. static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
  857. u16 speed, u16 width)
  858. {
  859. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  860. char ptys_pl[MLXSW_REG_PTYS_LEN];
  861. mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
  862. width);
  863. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  864. }
  865. static int
  866. mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
  867. {
  868. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  869. u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
  870. char ptys_pl[MLXSW_REG_PTYS_LEN];
  871. u32 eth_proto_admin;
  872. eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
  873. mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
  874. eth_proto_admin, true);
  875. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
  876. }
  877. static int
  878. mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
  879. enum mlxsw_reg_spmlr_learn_mode mode)
  880. {
  881. struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  882. char spmlr_pl[MLXSW_REG_SPMLR_LEN];
  883. mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
  884. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
  885. }
  886. static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
  887. u8 module, u8 width)
  888. {
  889. struct mlxsw_sx_port *mlxsw_sx_port;
  890. struct net_device *dev;
  891. int err;
  892. dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
  893. if (!dev)
  894. return -ENOMEM;
  895. SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
  896. mlxsw_sx_port = netdev_priv(dev);
  897. mlxsw_sx_port->dev = dev;
  898. mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
  899. mlxsw_sx_port->local_port = local_port;
  900. mlxsw_sx_port->mapping.module = module;
  901. mlxsw_sx_port->pcpu_stats =
  902. netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
  903. if (!mlxsw_sx_port->pcpu_stats) {
  904. err = -ENOMEM;
  905. goto err_alloc_stats;
  906. }
  907. dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
  908. dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
  909. dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
  910. err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
  911. if (err) {
  912. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
  913. mlxsw_sx_port->local_port);
  914. goto err_dev_addr_get;
  915. }
  916. netif_carrier_off(dev);
  917. dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
  918. NETIF_F_VLAN_CHALLENGED;
  919. dev->min_mtu = 0;
  920. dev->max_mtu = ETH_MAX_MTU;
  921. /* Each packet needs to have a Tx header (metadata) on top all other
  922. * headers.
  923. */
  924. dev->needed_headroom = MLXSW_TXHDR_LEN;
  925. err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
  926. if (err) {
  927. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  928. mlxsw_sx_port->local_port);
  929. goto err_port_system_port_mapping_set;
  930. }
  931. err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
  932. if (err) {
  933. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
  934. mlxsw_sx_port->local_port);
  935. goto err_port_swid_set;
  936. }
  937. err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
  938. if (err) {
  939. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
  940. mlxsw_sx_port->local_port);
  941. goto err_port_speed_set;
  942. }
  943. err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
  944. if (err) {
  945. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
  946. mlxsw_sx_port->local_port);
  947. goto err_port_mtu_set;
  948. }
  949. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  950. if (err)
  951. goto err_port_admin_status_set;
  952. err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
  953. MLXSW_PORT_DEFAULT_VID,
  954. MLXSW_REG_SPMS_STATE_FORWARDING);
  955. if (err) {
  956. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
  957. mlxsw_sx_port->local_port);
  958. goto err_port_stp_state_set;
  959. }
  960. err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
  961. MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
  962. if (err) {
  963. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
  964. mlxsw_sx_port->local_port);
  965. goto err_port_mac_learning_mode_set;
  966. }
  967. err = register_netdev(dev);
  968. if (err) {
  969. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
  970. mlxsw_sx_port->local_port);
  971. goto err_register_netdev;
  972. }
  973. mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
  974. mlxsw_sx_port, dev, module + 1, false, 0);
  975. mlxsw_sx->ports[local_port] = mlxsw_sx_port;
  976. return 0;
  977. err_register_netdev:
  978. err_port_mac_learning_mode_set:
  979. err_port_stp_state_set:
  980. err_port_admin_status_set:
  981. err_port_mtu_set:
  982. err_port_speed_set:
  983. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  984. err_port_swid_set:
  985. err_port_system_port_mapping_set:
  986. err_dev_addr_get:
  987. free_percpu(mlxsw_sx_port->pcpu_stats);
  988. err_alloc_stats:
  989. free_netdev(dev);
  990. return err;
  991. }
  992. static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
  993. u8 module, u8 width)
  994. {
  995. int err;
  996. err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
  997. if (err) {
  998. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
  999. local_port);
  1000. return err;
  1001. }
  1002. err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
  1003. if (err)
  1004. goto err_port_create;
  1005. return 0;
  1006. err_port_create:
  1007. mlxsw_core_port_fini(mlxsw_sx->core, local_port);
  1008. return err;
  1009. }
  1010. static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1011. {
  1012. struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1013. mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
  1014. unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
  1015. mlxsw_sx->ports[local_port] = NULL;
  1016. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1017. free_percpu(mlxsw_sx_port->pcpu_stats);
  1018. free_netdev(mlxsw_sx_port->dev);
  1019. }
  1020. static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1021. {
  1022. return mlxsw_sx->ports[local_port] != NULL;
  1023. }
  1024. static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
  1025. u8 module, u8 width)
  1026. {
  1027. struct mlxsw_sx_port *mlxsw_sx_port;
  1028. int err;
  1029. mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
  1030. if (!mlxsw_sx_port)
  1031. return -ENOMEM;
  1032. mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
  1033. mlxsw_sx_port->local_port = local_port;
  1034. mlxsw_sx_port->mapping.module = module;
  1035. err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
  1036. if (err) {
  1037. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  1038. mlxsw_sx_port->local_port);
  1039. goto err_port_system_port_mapping_set;
  1040. }
  1041. /* Adding port to Infiniband swid (1) */
  1042. err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
  1043. if (err) {
  1044. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
  1045. mlxsw_sx_port->local_port);
  1046. goto err_port_swid_set;
  1047. }
  1048. /* Expose the IB port number as it's front panel name */
  1049. err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
  1050. if (err) {
  1051. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
  1052. mlxsw_sx_port->local_port);
  1053. goto err_port_ib_set;
  1054. }
  1055. /* Supports all speeds from SDR to FDR (bitmask) and support bus width
  1056. * of 1x, 2x and 4x (3 bits bitmask)
  1057. */
  1058. err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
  1059. MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
  1060. BIT(3) - 1);
  1061. if (err) {
  1062. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
  1063. mlxsw_sx_port->local_port);
  1064. goto err_port_speed_set;
  1065. }
  1066. /* Change to the maximum MTU the device supports, the SMA will take
  1067. * care of the active MTU
  1068. */
  1069. err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
  1070. if (err) {
  1071. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
  1072. mlxsw_sx_port->local_port);
  1073. goto err_port_mtu_set;
  1074. }
  1075. err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
  1076. if (err) {
  1077. dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
  1078. mlxsw_sx_port->local_port);
  1079. goto err_port_admin_set;
  1080. }
  1081. mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
  1082. mlxsw_sx_port);
  1083. mlxsw_sx->ports[local_port] = mlxsw_sx_port;
  1084. return 0;
  1085. err_port_admin_set:
  1086. err_port_mtu_set:
  1087. err_port_speed_set:
  1088. err_port_ib_set:
  1089. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1090. err_port_swid_set:
  1091. err_port_system_port_mapping_set:
  1092. kfree(mlxsw_sx_port);
  1093. return err;
  1094. }
  1095. static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1096. {
  1097. struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1098. mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
  1099. mlxsw_sx->ports[local_port] = NULL;
  1100. mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
  1101. mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
  1102. kfree(mlxsw_sx_port);
  1103. }
  1104. static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1105. {
  1106. enum devlink_port_type port_type =
  1107. mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
  1108. if (port_type == DEVLINK_PORT_TYPE_ETH)
  1109. __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
  1110. else if (port_type == DEVLINK_PORT_TYPE_IB)
  1111. __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
  1112. }
  1113. static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
  1114. {
  1115. __mlxsw_sx_port_remove(mlxsw_sx, local_port);
  1116. mlxsw_core_port_fini(mlxsw_sx->core, local_port);
  1117. }
  1118. static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
  1119. {
  1120. int i;
  1121. for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
  1122. if (mlxsw_sx_port_created(mlxsw_sx, i))
  1123. mlxsw_sx_port_remove(mlxsw_sx, i);
  1124. kfree(mlxsw_sx->ports);
  1125. mlxsw_sx->ports = NULL;
  1126. }
  1127. static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
  1128. {
  1129. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
  1130. size_t alloc_size;
  1131. u8 module, width;
  1132. int i;
  1133. int err;
  1134. alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
  1135. mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
  1136. if (!mlxsw_sx->ports)
  1137. return -ENOMEM;
  1138. for (i = 1; i < max_ports; i++) {
  1139. err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
  1140. &width);
  1141. if (err)
  1142. goto err_port_module_info_get;
  1143. if (!width)
  1144. continue;
  1145. err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
  1146. if (err)
  1147. goto err_port_create;
  1148. }
  1149. return 0;
  1150. err_port_create:
  1151. err_port_module_info_get:
  1152. for (i--; i >= 1; i--)
  1153. if (mlxsw_sx_port_created(mlxsw_sx, i))
  1154. mlxsw_sx_port_remove(mlxsw_sx, i);
  1155. kfree(mlxsw_sx->ports);
  1156. mlxsw_sx->ports = NULL;
  1157. return err;
  1158. }
  1159. static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
  1160. enum mlxsw_reg_pude_oper_status status)
  1161. {
  1162. if (status == MLXSW_PORT_OPER_STATUS_UP) {
  1163. netdev_info(mlxsw_sx_port->dev, "link up\n");
  1164. netif_carrier_on(mlxsw_sx_port->dev);
  1165. } else {
  1166. netdev_info(mlxsw_sx_port->dev, "link down\n");
  1167. netif_carrier_off(mlxsw_sx_port->dev);
  1168. }
  1169. }
  1170. static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
  1171. enum mlxsw_reg_pude_oper_status status)
  1172. {
  1173. if (status == MLXSW_PORT_OPER_STATUS_UP)
  1174. pr_info("ib link for port %d - up\n",
  1175. mlxsw_sx_port->mapping.module + 1);
  1176. else
  1177. pr_info("ib link for port %d - down\n",
  1178. mlxsw_sx_port->mapping.module + 1);
  1179. }
  1180. static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
  1181. char *pude_pl, void *priv)
  1182. {
  1183. struct mlxsw_sx *mlxsw_sx = priv;
  1184. struct mlxsw_sx_port *mlxsw_sx_port;
  1185. enum mlxsw_reg_pude_oper_status status;
  1186. enum devlink_port_type port_type;
  1187. u8 local_port;
  1188. local_port = mlxsw_reg_pude_local_port_get(pude_pl);
  1189. mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1190. if (!mlxsw_sx_port) {
  1191. dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
  1192. local_port);
  1193. return;
  1194. }
  1195. status = mlxsw_reg_pude_oper_status_get(pude_pl);
  1196. port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
  1197. if (port_type == DEVLINK_PORT_TYPE_ETH)
  1198. mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
  1199. else if (port_type == DEVLINK_PORT_TYPE_IB)
  1200. mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
  1201. }
  1202. static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
  1203. void *priv)
  1204. {
  1205. struct mlxsw_sx *mlxsw_sx = priv;
  1206. struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
  1207. struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
  1208. if (unlikely(!mlxsw_sx_port)) {
  1209. dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
  1210. local_port);
  1211. return;
  1212. }
  1213. skb->dev = mlxsw_sx_port->dev;
  1214. pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
  1215. u64_stats_update_begin(&pcpu_stats->syncp);
  1216. pcpu_stats->rx_packets++;
  1217. pcpu_stats->rx_bytes += skb->len;
  1218. u64_stats_update_end(&pcpu_stats->syncp);
  1219. skb->protocol = eth_type_trans(skb, skb->dev);
  1220. netif_receive_skb(skb);
  1221. }
  1222. static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
  1223. enum devlink_port_type new_type)
  1224. {
  1225. struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
  1226. u8 module, width;
  1227. int err;
  1228. if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
  1229. dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
  1230. local_port);
  1231. return -EINVAL;
  1232. }
  1233. if (new_type == DEVLINK_PORT_TYPE_AUTO)
  1234. return -EOPNOTSUPP;
  1235. __mlxsw_sx_port_remove(mlxsw_sx, local_port);
  1236. err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
  1237. &width);
  1238. if (err)
  1239. goto err_port_module_info_get;
  1240. if (new_type == DEVLINK_PORT_TYPE_ETH)
  1241. err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
  1242. width);
  1243. else if (new_type == DEVLINK_PORT_TYPE_IB)
  1244. err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
  1245. width);
  1246. err_port_module_info_get:
  1247. return err;
  1248. }
  1249. #define MLXSW_SX_RXL(_trap_id) \
  1250. MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
  1251. false, SX2_RX, FORWARD)
  1252. static const struct mlxsw_listener mlxsw_sx_listener[] = {
  1253. MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
  1254. MLXSW_SX_RXL(FDB_MC),
  1255. MLXSW_SX_RXL(STP),
  1256. MLXSW_SX_RXL(LACP),
  1257. MLXSW_SX_RXL(EAPOL),
  1258. MLXSW_SX_RXL(LLDP),
  1259. MLXSW_SX_RXL(MMRP),
  1260. MLXSW_SX_RXL(MVRP),
  1261. MLXSW_SX_RXL(RPVST),
  1262. MLXSW_SX_RXL(DHCP),
  1263. MLXSW_SX_RXL(IGMP_QUERY),
  1264. MLXSW_SX_RXL(IGMP_V1_REPORT),
  1265. MLXSW_SX_RXL(IGMP_V2_REPORT),
  1266. MLXSW_SX_RXL(IGMP_V2_LEAVE),
  1267. MLXSW_SX_RXL(IGMP_V3_REPORT),
  1268. };
  1269. static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
  1270. {
  1271. char htgt_pl[MLXSW_REG_HTGT_LEN];
  1272. int i;
  1273. int err;
  1274. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
  1275. MLXSW_REG_HTGT_INVALID_POLICER,
  1276. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  1277. MLXSW_REG_HTGT_DEFAULT_TC);
  1278. mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
  1279. MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
  1280. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
  1281. if (err)
  1282. return err;
  1283. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
  1284. MLXSW_REG_HTGT_INVALID_POLICER,
  1285. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  1286. MLXSW_REG_HTGT_DEFAULT_TC);
  1287. mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
  1288. MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
  1289. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
  1290. if (err)
  1291. return err;
  1292. for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
  1293. err = mlxsw_core_trap_register(mlxsw_sx->core,
  1294. &mlxsw_sx_listener[i],
  1295. mlxsw_sx);
  1296. if (err)
  1297. goto err_listener_register;
  1298. }
  1299. return 0;
  1300. err_listener_register:
  1301. for (i--; i >= 0; i--) {
  1302. mlxsw_core_trap_unregister(mlxsw_sx->core,
  1303. &mlxsw_sx_listener[i],
  1304. mlxsw_sx);
  1305. }
  1306. return err;
  1307. }
  1308. static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
  1309. {
  1310. int i;
  1311. for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
  1312. mlxsw_core_trap_unregister(mlxsw_sx->core,
  1313. &mlxsw_sx_listener[i],
  1314. mlxsw_sx);
  1315. }
  1316. }
  1317. static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
  1318. {
  1319. char sfgc_pl[MLXSW_REG_SFGC_LEN];
  1320. char sgcr_pl[MLXSW_REG_SGCR_LEN];
  1321. char *sftr_pl;
  1322. int err;
  1323. /* Configure a flooding table, which includes only CPU port. */
  1324. sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
  1325. if (!sftr_pl)
  1326. return -ENOMEM;
  1327. mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
  1328. MLXSW_PORT_CPU_PORT, true);
  1329. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
  1330. kfree(sftr_pl);
  1331. if (err)
  1332. return err;
  1333. /* Flood different packet types using the flooding table. */
  1334. mlxsw_reg_sfgc_pack(sfgc_pl,
  1335. MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
  1336. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1337. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1338. 0);
  1339. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1340. if (err)
  1341. return err;
  1342. mlxsw_reg_sfgc_pack(sfgc_pl,
  1343. MLXSW_REG_SFGC_TYPE_BROADCAST,
  1344. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1345. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1346. 0);
  1347. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1348. if (err)
  1349. return err;
  1350. mlxsw_reg_sfgc_pack(sfgc_pl,
  1351. MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
  1352. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1353. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1354. 0);
  1355. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1356. if (err)
  1357. return err;
  1358. mlxsw_reg_sfgc_pack(sfgc_pl,
  1359. MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
  1360. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1361. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1362. 0);
  1363. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1364. if (err)
  1365. return err;
  1366. mlxsw_reg_sfgc_pack(sfgc_pl,
  1367. MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
  1368. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
  1369. MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
  1370. 0);
  1371. err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
  1372. if (err)
  1373. return err;
  1374. mlxsw_reg_sgcr_pack(sgcr_pl, true);
  1375. return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
  1376. }
  1377. static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
  1378. {
  1379. char htgt_pl[MLXSW_REG_HTGT_LEN];
  1380. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
  1381. MLXSW_REG_HTGT_INVALID_POLICER,
  1382. MLXSW_REG_HTGT_DEFAULT_PRIORITY,
  1383. MLXSW_REG_HTGT_DEFAULT_TC);
  1384. mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
  1385. mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
  1386. MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
  1387. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
  1388. }
  1389. static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
  1390. const struct mlxsw_bus_info *mlxsw_bus_info)
  1391. {
  1392. struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
  1393. int err;
  1394. mlxsw_sx->core = mlxsw_core;
  1395. mlxsw_sx->bus_info = mlxsw_bus_info;
  1396. err = mlxsw_sx_hw_id_get(mlxsw_sx);
  1397. if (err) {
  1398. dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
  1399. return err;
  1400. }
  1401. err = mlxsw_sx_ports_create(mlxsw_sx);
  1402. if (err) {
  1403. dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
  1404. return err;
  1405. }
  1406. err = mlxsw_sx_traps_init(mlxsw_sx);
  1407. if (err) {
  1408. dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
  1409. goto err_listener_register;
  1410. }
  1411. err = mlxsw_sx_flood_init(mlxsw_sx);
  1412. if (err) {
  1413. dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
  1414. goto err_flood_init;
  1415. }
  1416. return 0;
  1417. err_flood_init:
  1418. mlxsw_sx_traps_fini(mlxsw_sx);
  1419. err_listener_register:
  1420. mlxsw_sx_ports_remove(mlxsw_sx);
  1421. return err;
  1422. }
  1423. static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
  1424. {
  1425. struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
  1426. mlxsw_sx_traps_fini(mlxsw_sx);
  1427. mlxsw_sx_ports_remove(mlxsw_sx);
  1428. }
  1429. static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
  1430. .used_max_vepa_channels = 1,
  1431. .max_vepa_channels = 0,
  1432. .used_max_mid = 1,
  1433. .max_mid = 7000,
  1434. .used_max_pgt = 1,
  1435. .max_pgt = 0,
  1436. .used_max_system_port = 1,
  1437. .max_system_port = 48000,
  1438. .used_max_vlan_groups = 1,
  1439. .max_vlan_groups = 127,
  1440. .used_max_regions = 1,
  1441. .max_regions = 400,
  1442. .used_flood_tables = 1,
  1443. .max_flood_tables = 2,
  1444. .max_vid_flood_tables = 1,
  1445. .used_flood_mode = 1,
  1446. .flood_mode = 3,
  1447. .used_max_ib_mc = 1,
  1448. .max_ib_mc = 6,
  1449. .used_max_pkey = 1,
  1450. .max_pkey = 0,
  1451. .swid_config = {
  1452. {
  1453. .used_type = 1,
  1454. .type = MLXSW_PORT_SWID_TYPE_ETH,
  1455. },
  1456. {
  1457. .used_type = 1,
  1458. .type = MLXSW_PORT_SWID_TYPE_IB,
  1459. }
  1460. },
  1461. };
  1462. static struct mlxsw_driver mlxsw_sx_driver = {
  1463. .kind = mlxsw_sx_driver_name,
  1464. .priv_size = sizeof(struct mlxsw_sx),
  1465. .init = mlxsw_sx_init,
  1466. .fini = mlxsw_sx_fini,
  1467. .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
  1468. .txhdr_construct = mlxsw_sx_txhdr_construct,
  1469. .txhdr_len = MLXSW_TXHDR_LEN,
  1470. .profile = &mlxsw_sx_config_profile,
  1471. .port_type_set = mlxsw_sx_port_type_set,
  1472. };
  1473. static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
  1474. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
  1475. {0, },
  1476. };
  1477. static struct pci_driver mlxsw_sx_pci_driver = {
  1478. .name = mlxsw_sx_driver_name,
  1479. .id_table = mlxsw_sx_pci_id_table,
  1480. };
  1481. static int __init mlxsw_sx_module_init(void)
  1482. {
  1483. int err;
  1484. err = mlxsw_core_driver_register(&mlxsw_sx_driver);
  1485. if (err)
  1486. return err;
  1487. err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
  1488. if (err)
  1489. goto err_pci_driver_register;
  1490. return 0;
  1491. err_pci_driver_register:
  1492. mlxsw_core_driver_unregister(&mlxsw_sx_driver);
  1493. return err;
  1494. }
  1495. static void __exit mlxsw_sx_module_exit(void)
  1496. {
  1497. mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
  1498. mlxsw_core_driver_unregister(&mlxsw_sx_driver);
  1499. }
  1500. module_init(mlxsw_sx_module_init);
  1501. module_exit(mlxsw_sx_module_exit);
  1502. MODULE_LICENSE("Dual BSD/GPL");
  1503. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  1504. MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
  1505. MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);