qeth_l3_main.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2007, 2009
  4. * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
  5. * Frank Pavlic <fpavlic@de.ibm.com>,
  6. * Thomas Spatzier <tspat@de.ibm.com>,
  7. * Frank Blaschka <frank.blaschka@de.ibm.com>
  8. */
  9. #define KMSG_COMPONENT "qeth"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/bitops.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/ip.h>
  19. #include <linux/in.h>
  20. #include <linux/inet.h>
  21. #include <linux/ipv6.h>
  22. #include <linux/inetdevice.h>
  23. #include <linux/igmp.h>
  24. #include <linux/slab.h>
  25. #include <linux/if_ether.h>
  26. #include <linux/if_vlan.h>
  27. #include <linux/skbuff.h>
  28. #include <net/ip.h>
  29. #include <net/arp.h>
  30. #include <net/route.h>
  31. #include <net/ipv6.h>
  32. #include <net/ip6_route.h>
  33. #include <net/iucv/af_iucv.h>
  34. #include <linux/hashtable.h>
  35. #include "qeth_l3.h"
  36. static int qeth_l3_register_addr_entry(struct qeth_card *,
  37. struct qeth_ipaddr *);
  38. static int qeth_l3_deregister_addr_entry(struct qeth_card *,
  39. struct qeth_ipaddr *);
  40. int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
  41. char *buf)
  42. {
  43. if (proto == QETH_PROT_IPV4)
  44. return scnprintf(buf, INET_ADDRSTRLEN, "%pI4", addr);
  45. else
  46. return scnprintf(buf, INET6_ADDRSTRLEN, "%pI6", addr);
  47. }
  48. static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
  49. struct qeth_ipaddr *query)
  50. {
  51. u32 key = qeth_l3_ipaddr_hash(query);
  52. struct qeth_ipaddr *addr;
  53. if (query->is_multicast) {
  54. hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key)
  55. if (qeth_l3_addr_match_ip(addr, query))
  56. return addr;
  57. } else {
  58. hash_for_each_possible(card->ip_htable, addr, hnode, key)
  59. if (qeth_l3_addr_match_ip(addr, query))
  60. return addr;
  61. }
  62. return NULL;
  63. }
  64. static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
  65. {
  66. int i, j;
  67. u8 octet;
  68. for (i = 0; i < len; ++i) {
  69. octet = addr[i];
  70. for (j = 7; j >= 0; --j) {
  71. bits[i*8 + j] = octet & 1;
  72. octet >>= 1;
  73. }
  74. }
  75. }
  76. static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
  77. struct qeth_ipaddr *addr)
  78. {
  79. struct qeth_ipato_entry *ipatoe;
  80. u8 addr_bits[128] = {0, };
  81. u8 ipatoe_bits[128] = {0, };
  82. int rc = 0;
  83. if (!card->ipato.enabled)
  84. return false;
  85. if (addr->type != QETH_IP_TYPE_NORMAL)
  86. return false;
  87. qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
  88. (addr->proto == QETH_PROT_IPV4) ? 4 : 16);
  89. list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
  90. if (addr->proto != ipatoe->proto)
  91. continue;
  92. qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
  93. (ipatoe->proto == QETH_PROT_IPV4) ?
  94. 4 : 16);
  95. rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
  96. if (rc)
  97. break;
  98. }
  99. /* invert? */
  100. if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
  101. rc = !rc;
  102. else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
  103. rc = !rc;
  104. return rc;
  105. }
  106. static int qeth_l3_delete_ip(struct qeth_card *card,
  107. struct qeth_ipaddr *tmp_addr)
  108. {
  109. int rc = 0;
  110. struct qeth_ipaddr *addr;
  111. if (tmp_addr->type == QETH_IP_TYPE_RXIP)
  112. QETH_CARD_TEXT(card, 2, "delrxip");
  113. else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
  114. QETH_CARD_TEXT(card, 2, "delvipa");
  115. else
  116. QETH_CARD_TEXT(card, 2, "delip");
  117. if (tmp_addr->proto == QETH_PROT_IPV4)
  118. QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
  119. else {
  120. QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
  121. QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
  122. }
  123. addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
  124. if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
  125. return -ENOENT;
  126. addr->ref_counter--;
  127. if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
  128. return rc;
  129. if (qeth_card_hw_is_reachable(card))
  130. rc = qeth_l3_deregister_addr_entry(card, addr);
  131. hash_del(&addr->hnode);
  132. kfree(addr);
  133. return rc;
  134. }
  135. static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
  136. {
  137. int rc = 0;
  138. struct qeth_ipaddr *addr;
  139. char buf[INET6_ADDRSTRLEN];
  140. if (tmp_addr->type == QETH_IP_TYPE_RXIP)
  141. QETH_CARD_TEXT(card, 2, "addrxip");
  142. else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
  143. QETH_CARD_TEXT(card, 2, "addvipa");
  144. else
  145. QETH_CARD_TEXT(card, 2, "addip");
  146. if (tmp_addr->proto == QETH_PROT_IPV4)
  147. QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
  148. else {
  149. QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
  150. QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
  151. }
  152. addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
  153. if (addr) {
  154. if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
  155. return -EADDRINUSE;
  156. if (qeth_l3_addr_match_all(addr, tmp_addr)) {
  157. addr->ref_counter++;
  158. return 0;
  159. }
  160. qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
  161. buf);
  162. dev_warn(&card->gdev->dev,
  163. "Registering IP address %s failed\n", buf);
  164. return -EADDRINUSE;
  165. } else {
  166. addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
  167. if (!addr)
  168. return -ENOMEM;
  169. if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
  170. QETH_CARD_TEXT(card, 2, "tkovaddr");
  171. addr->ipato = 1;
  172. }
  173. hash_add(card->ip_htable, &addr->hnode,
  174. qeth_l3_ipaddr_hash(addr));
  175. if (!qeth_card_hw_is_reachable(card)) {
  176. addr->disp_flag = QETH_DISP_ADDR_ADD;
  177. return 0;
  178. }
  179. rc = qeth_l3_register_addr_entry(card, addr);
  180. if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
  181. addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  182. } else {
  183. hash_del(&addr->hnode);
  184. kfree(addr);
  185. }
  186. }
  187. return rc;
  188. }
  189. static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr,
  190. bool add)
  191. {
  192. int rc;
  193. mutex_lock(&card->ip_lock);
  194. rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr);
  195. mutex_unlock(&card->ip_lock);
  196. return rc;
  197. }
  198. static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
  199. {
  200. struct qeth_ipaddr *addr;
  201. struct hlist_node *tmp;
  202. int i;
  203. hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
  204. hash_del(&addr->hnode);
  205. kfree(addr);
  206. }
  207. }
  208. static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
  209. {
  210. struct qeth_ipaddr *addr;
  211. struct hlist_node *tmp;
  212. int i;
  213. QETH_CARD_TEXT(card, 4, "clearip");
  214. mutex_lock(&card->ip_lock);
  215. hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
  216. if (!recover) {
  217. hash_del(&addr->hnode);
  218. kfree(addr);
  219. } else {
  220. /* prepare for recovery */
  221. addr->disp_flag = QETH_DISP_ADDR_ADD;
  222. }
  223. }
  224. mutex_unlock(&card->ip_lock);
  225. }
  226. static void qeth_l3_recover_ip(struct qeth_card *card)
  227. {
  228. struct qeth_ipaddr *addr;
  229. struct hlist_node *tmp;
  230. int i;
  231. int rc;
  232. QETH_CARD_TEXT(card, 4, "recovrip");
  233. mutex_lock(&card->ip_lock);
  234. hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
  235. if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
  236. rc = qeth_l3_register_addr_entry(card, addr);
  237. if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
  238. /* keep it in the records */
  239. addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  240. } else {
  241. /* bad address */
  242. hash_del(&addr->hnode);
  243. kfree(addr);
  244. }
  245. }
  246. }
  247. mutex_unlock(&card->ip_lock);
  248. }
  249. static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
  250. unsigned long data)
  251. {
  252. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  253. switch (cmd->hdr.return_code) {
  254. case IPA_RC_SUCCESS:
  255. return 0;
  256. case IPA_RC_DUPLICATE_IP_ADDRESS:
  257. return -EADDRINUSE;
  258. case IPA_RC_MC_ADDR_NOT_FOUND:
  259. return -ENOENT;
  260. case IPA_RC_LAN_OFFLINE:
  261. return -ENETDOWN;
  262. default:
  263. return -EIO;
  264. }
  265. }
  266. static int qeth_l3_send_setdelmc(struct qeth_card *card,
  267. struct qeth_ipaddr *addr,
  268. enum qeth_ipa_cmds ipacmd)
  269. {
  270. struct qeth_cmd_buffer *iob;
  271. struct qeth_ipa_cmd *cmd;
  272. QETH_CARD_TEXT(card, 4, "setdelmc");
  273. iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
  274. IPA_DATA_SIZEOF(setdelipm));
  275. if (!iob)
  276. return -ENOMEM;
  277. cmd = __ipa_cmd(iob);
  278. if (addr->proto == QETH_PROT_IPV6) {
  279. cmd->data.setdelipm.ip = addr->u.a6.addr;
  280. ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
  281. } else {
  282. cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
  283. ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
  284. }
  285. return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
  286. }
  287. static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
  288. {
  289. unsigned int i = 0;
  290. while (len && i < 4) {
  291. int mask_len = min_t(int, len, 32);
  292. prefix->s6_addr32[i] = inet_make_mask(mask_len);
  293. len -= mask_len;
  294. i++;
  295. }
  296. }
  297. static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set)
  298. {
  299. switch (addr->type) {
  300. case QETH_IP_TYPE_RXIP:
  301. return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
  302. case QETH_IP_TYPE_VIPA:
  303. return (set) ? QETH_IPA_SETIP_VIPA_FLAG :
  304. QETH_IPA_DELIP_VIPA_FLAG;
  305. default:
  306. return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
  307. }
  308. }
  309. static int qeth_l3_send_setdelip(struct qeth_card *card,
  310. struct qeth_ipaddr *addr,
  311. enum qeth_ipa_cmds ipacmd)
  312. {
  313. struct qeth_cmd_buffer *iob;
  314. struct qeth_ipa_cmd *cmd;
  315. u32 flags;
  316. QETH_CARD_TEXT(card, 4, "setdelip");
  317. iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
  318. IPA_DATA_SIZEOF(setdelip6));
  319. if (!iob)
  320. return -ENOMEM;
  321. cmd = __ipa_cmd(iob);
  322. flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP);
  323. QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
  324. if (addr->proto == QETH_PROT_IPV6) {
  325. cmd->data.setdelip6.addr = addr->u.a6.addr;
  326. qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
  327. addr->u.a6.pfxlen);
  328. cmd->data.setdelip6.flags = flags;
  329. } else {
  330. cmd->data.setdelip4.addr = addr->u.a4.addr;
  331. cmd->data.setdelip4.mask = addr->u.a4.mask;
  332. cmd->data.setdelip4.flags = flags;
  333. }
  334. return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
  335. }
  336. static int qeth_l3_send_setrouting(struct qeth_card *card,
  337. enum qeth_routing_types type, enum qeth_prot_versions prot)
  338. {
  339. int rc;
  340. struct qeth_ipa_cmd *cmd;
  341. struct qeth_cmd_buffer *iob;
  342. QETH_CARD_TEXT(card, 4, "setroutg");
  343. iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot,
  344. IPA_DATA_SIZEOF(setrtg));
  345. if (!iob)
  346. return -ENOMEM;
  347. cmd = __ipa_cmd(iob);
  348. cmd->data.setrtg.type = (type);
  349. rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
  350. return rc;
  351. }
  352. static int qeth_l3_correct_routing_type(struct qeth_card *card,
  353. enum qeth_routing_types *type, enum qeth_prot_versions prot)
  354. {
  355. if (IS_IQD(card)) {
  356. switch (*type) {
  357. case NO_ROUTER:
  358. case PRIMARY_CONNECTOR:
  359. case SECONDARY_CONNECTOR:
  360. case MULTICAST_ROUTER:
  361. return 0;
  362. default:
  363. goto out_inval;
  364. }
  365. } else {
  366. switch (*type) {
  367. case NO_ROUTER:
  368. case PRIMARY_ROUTER:
  369. case SECONDARY_ROUTER:
  370. return 0;
  371. case MULTICAST_ROUTER:
  372. if (qeth_is_ipafunc_supported(card, prot,
  373. IPA_OSA_MC_ROUTER))
  374. return 0;
  375. goto out_inval;
  376. default:
  377. goto out_inval;
  378. }
  379. }
  380. out_inval:
  381. *type = NO_ROUTER;
  382. return -EINVAL;
  383. }
  384. int qeth_l3_setrouting_v4(struct qeth_card *card)
  385. {
  386. int rc;
  387. QETH_CARD_TEXT(card, 3, "setrtg4");
  388. rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
  389. QETH_PROT_IPV4);
  390. if (rc)
  391. return rc;
  392. rc = qeth_l3_send_setrouting(card, card->options.route4.type,
  393. QETH_PROT_IPV4);
  394. if (rc) {
  395. card->options.route4.type = NO_ROUTER;
  396. QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
  397. rc, CARD_DEVID(card));
  398. }
  399. return rc;
  400. }
  401. int qeth_l3_setrouting_v6(struct qeth_card *card)
  402. {
  403. int rc = 0;
  404. QETH_CARD_TEXT(card, 3, "setrtg6");
  405. if (!qeth_is_supported(card, IPA_IPV6))
  406. return 0;
  407. rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
  408. QETH_PROT_IPV6);
  409. if (rc)
  410. return rc;
  411. rc = qeth_l3_send_setrouting(card, card->options.route6.type,
  412. QETH_PROT_IPV6);
  413. if (rc) {
  414. card->options.route6.type = NO_ROUTER;
  415. QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
  416. rc, CARD_DEVID(card));
  417. }
  418. return rc;
  419. }
  420. /*
  421. * IP address takeover related functions
  422. */
  423. /*
  424. * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
  425. *
  426. * Caller must hold ip_lock.
  427. */
  428. void qeth_l3_update_ipato(struct qeth_card *card)
  429. {
  430. struct qeth_ipaddr *addr;
  431. unsigned int i;
  432. hash_for_each(card->ip_htable, i, addr, hnode) {
  433. if (addr->type != QETH_IP_TYPE_NORMAL)
  434. continue;
  435. addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr);
  436. }
  437. }
  438. static void qeth_l3_clear_ipato_list(struct qeth_card *card)
  439. {
  440. struct qeth_ipato_entry *ipatoe, *tmp;
  441. mutex_lock(&card->ip_lock);
  442. list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
  443. list_del(&ipatoe->entry);
  444. kfree(ipatoe);
  445. }
  446. qeth_l3_update_ipato(card);
  447. mutex_unlock(&card->ip_lock);
  448. }
  449. int qeth_l3_add_ipato_entry(struct qeth_card *card,
  450. struct qeth_ipato_entry *new)
  451. {
  452. struct qeth_ipato_entry *ipatoe;
  453. int rc = 0;
  454. QETH_CARD_TEXT(card, 2, "addipato");
  455. mutex_lock(&card->ip_lock);
  456. list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
  457. if (ipatoe->proto != new->proto)
  458. continue;
  459. if (!memcmp(ipatoe->addr, new->addr,
  460. (ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) &&
  461. (ipatoe->mask_bits == new->mask_bits)) {
  462. rc = -EEXIST;
  463. break;
  464. }
  465. }
  466. if (!rc) {
  467. list_add_tail(&new->entry, &card->ipato.entries);
  468. qeth_l3_update_ipato(card);
  469. }
  470. mutex_unlock(&card->ip_lock);
  471. return rc;
  472. }
  473. int qeth_l3_del_ipato_entry(struct qeth_card *card,
  474. enum qeth_prot_versions proto, u8 *addr,
  475. unsigned int mask_bits)
  476. {
  477. struct qeth_ipato_entry *ipatoe, *tmp;
  478. int rc = -ENOENT;
  479. QETH_CARD_TEXT(card, 2, "delipato");
  480. mutex_lock(&card->ip_lock);
  481. list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
  482. if (ipatoe->proto != proto)
  483. continue;
  484. if (!memcmp(ipatoe->addr, addr,
  485. (proto == QETH_PROT_IPV4) ? 4 : 16) &&
  486. (ipatoe->mask_bits == mask_bits)) {
  487. list_del(&ipatoe->entry);
  488. qeth_l3_update_ipato(card);
  489. kfree(ipatoe);
  490. rc = 0;
  491. }
  492. }
  493. mutex_unlock(&card->ip_lock);
  494. return rc;
  495. }
  496. int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
  497. enum qeth_ip_types type,
  498. enum qeth_prot_versions proto)
  499. {
  500. struct qeth_ipaddr addr;
  501. qeth_l3_init_ipaddr(&addr, type, proto);
  502. if (proto == QETH_PROT_IPV4)
  503. memcpy(&addr.u.a4.addr, ip, 4);
  504. else
  505. memcpy(&addr.u.a6.addr, ip, 16);
  506. return qeth_l3_modify_ip(card, &addr, add);
  507. }
  508. int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
  509. {
  510. struct qeth_ipaddr addr;
  511. unsigned int i;
  512. qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
  513. addr.u.a6.addr.s6_addr[0] = 0xfe;
  514. addr.u.a6.addr.s6_addr[1] = 0x80;
  515. for (i = 0; i < 8; i++)
  516. addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
  517. return qeth_l3_modify_ip(card, &addr, add);
  518. }
  519. static int qeth_l3_register_addr_entry(struct qeth_card *card,
  520. struct qeth_ipaddr *addr)
  521. {
  522. char buf[50];
  523. int rc = 0;
  524. int cnt = 3;
  525. if (card->options.sniffer)
  526. return 0;
  527. if (addr->proto == QETH_PROT_IPV4) {
  528. QETH_CARD_TEXT(card, 2, "setaddr4");
  529. QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
  530. } else if (addr->proto == QETH_PROT_IPV6) {
  531. QETH_CARD_TEXT(card, 2, "setaddr6");
  532. QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
  533. QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
  534. } else {
  535. QETH_CARD_TEXT(card, 2, "setaddr?");
  536. QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
  537. }
  538. do {
  539. if (addr->is_multicast)
  540. rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
  541. else
  542. rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP);
  543. if (rc)
  544. QETH_CARD_TEXT(card, 2, "failed");
  545. } while ((--cnt > 0) && rc);
  546. if (rc) {
  547. QETH_CARD_TEXT(card, 2, "FAILED");
  548. qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
  549. dev_warn(&card->gdev->dev,
  550. "Registering IP address %s failed\n", buf);
  551. }
  552. return rc;
  553. }
  554. static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
  555. struct qeth_ipaddr *addr)
  556. {
  557. int rc = 0;
  558. if (card->options.sniffer)
  559. return 0;
  560. if (addr->proto == QETH_PROT_IPV4) {
  561. QETH_CARD_TEXT(card, 2, "deladdr4");
  562. QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
  563. } else if (addr->proto == QETH_PROT_IPV6) {
  564. QETH_CARD_TEXT(card, 2, "deladdr6");
  565. QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
  566. QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
  567. } else {
  568. QETH_CARD_TEXT(card, 2, "deladdr?");
  569. QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
  570. }
  571. if (addr->is_multicast)
  572. rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
  573. else
  574. rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP);
  575. if (rc)
  576. QETH_CARD_TEXT(card, 2, "failed");
  577. return rc;
  578. }
  579. static int qeth_l3_setadapter_parms(struct qeth_card *card)
  580. {
  581. int rc = 0;
  582. QETH_CARD_TEXT(card, 2, "setadprm");
  583. if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
  584. rc = qeth_setadpparms_change_macaddr(card);
  585. if (rc)
  586. dev_warn(&card->gdev->dev, "Reading the adapter MAC"
  587. " address failed\n");
  588. }
  589. return rc;
  590. }
  591. static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
  592. {
  593. int rc;
  594. QETH_CARD_TEXT(card, 3, "ipaarp");
  595. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  596. dev_info(&card->gdev->dev,
  597. "ARP processing not supported on %s!\n",
  598. netdev_name(card->dev));
  599. return 0;
  600. }
  601. rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
  602. IPA_CMD_ASS_START, NULL);
  603. if (rc) {
  604. dev_warn(&card->gdev->dev,
  605. "Starting ARP processing support for %s failed\n",
  606. netdev_name(card->dev));
  607. }
  608. return rc;
  609. }
  610. static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
  611. {
  612. int rc;
  613. QETH_CARD_TEXT(card, 3, "stsrcmac");
  614. if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
  615. dev_info(&card->gdev->dev,
  616. "Inbound source MAC-address not supported on %s\n",
  617. netdev_name(card->dev));
  618. return -EOPNOTSUPP;
  619. }
  620. rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
  621. IPA_CMD_ASS_START, NULL);
  622. if (rc)
  623. dev_warn(&card->gdev->dev,
  624. "Starting source MAC-address support for %s failed\n",
  625. netdev_name(card->dev));
  626. return rc;
  627. }
  628. static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
  629. {
  630. int rc = 0;
  631. QETH_CARD_TEXT(card, 3, "strtvlan");
  632. if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
  633. dev_info(&card->gdev->dev,
  634. "VLAN not supported on %s\n", netdev_name(card->dev));
  635. return -EOPNOTSUPP;
  636. }
  637. rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
  638. IPA_CMD_ASS_START, NULL);
  639. if (rc) {
  640. dev_warn(&card->gdev->dev,
  641. "Starting VLAN support for %s failed\n",
  642. netdev_name(card->dev));
  643. } else {
  644. dev_info(&card->gdev->dev, "VLAN enabled\n");
  645. }
  646. return rc;
  647. }
  648. static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
  649. {
  650. int rc;
  651. QETH_CARD_TEXT(card, 3, "stmcast");
  652. if (!qeth_is_supported(card, IPA_MULTICASTING)) {
  653. dev_info(&card->gdev->dev,
  654. "Multicast not supported on %s\n",
  655. netdev_name(card->dev));
  656. return -EOPNOTSUPP;
  657. }
  658. rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
  659. IPA_CMD_ASS_START, NULL);
  660. if (rc) {
  661. dev_warn(&card->gdev->dev,
  662. "Starting multicast support for %s failed\n",
  663. netdev_name(card->dev));
  664. } else {
  665. dev_info(&card->gdev->dev, "Multicast enabled\n");
  666. card->dev->flags |= IFF_MULTICAST;
  667. }
  668. return rc;
  669. }
  670. static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
  671. {
  672. u32 ipv6_data = 3;
  673. int rc;
  674. QETH_CARD_TEXT(card, 3, "softipv6");
  675. if (IS_IQD(card))
  676. goto out;
  677. rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START,
  678. &ipv6_data);
  679. if (rc) {
  680. dev_err(&card->gdev->dev,
  681. "Activating IPv6 support for %s failed\n",
  682. netdev_name(card->dev));
  683. return rc;
  684. }
  685. rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
  686. NULL);
  687. if (rc) {
  688. dev_err(&card->gdev->dev,
  689. "Activating IPv6 support for %s failed\n",
  690. netdev_name(card->dev));
  691. return rc;
  692. }
  693. rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
  694. IPA_CMD_ASS_START, NULL);
  695. if (rc) {
  696. dev_warn(&card->gdev->dev,
  697. "Enabling the passthrough mode for %s failed\n",
  698. netdev_name(card->dev));
  699. return rc;
  700. }
  701. out:
  702. dev_info(&card->gdev->dev, "IPV6 enabled\n");
  703. return 0;
  704. }
  705. static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
  706. {
  707. QETH_CARD_TEXT(card, 3, "strtipv6");
  708. if (!qeth_is_supported(card, IPA_IPV6)) {
  709. dev_info(&card->gdev->dev,
  710. "IPv6 not supported on %s\n", netdev_name(card->dev));
  711. return 0;
  712. }
  713. return qeth_l3_softsetup_ipv6(card);
  714. }
  715. static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
  716. {
  717. u32 filter_data = 1;
  718. int rc;
  719. QETH_CARD_TEXT(card, 3, "stbrdcst");
  720. card->info.broadcast_capable = 0;
  721. if (!qeth_is_supported(card, IPA_FILTERING)) {
  722. dev_info(&card->gdev->dev,
  723. "Broadcast not supported on %s\n",
  724. netdev_name(card->dev));
  725. rc = -EOPNOTSUPP;
  726. goto out;
  727. }
  728. rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
  729. IPA_CMD_ASS_START, NULL);
  730. if (rc) {
  731. dev_warn(&card->gdev->dev,
  732. "Enabling broadcast filtering for %s failed\n",
  733. netdev_name(card->dev));
  734. goto out;
  735. }
  736. rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
  737. IPA_CMD_ASS_CONFIGURE, &filter_data);
  738. if (rc) {
  739. dev_warn(&card->gdev->dev,
  740. "Setting up broadcast filtering for %s failed\n",
  741. netdev_name(card->dev));
  742. goto out;
  743. }
  744. card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
  745. dev_info(&card->gdev->dev, "Broadcast enabled\n");
  746. rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
  747. IPA_CMD_ASS_ENABLE, &filter_data);
  748. if (rc) {
  749. dev_warn(&card->gdev->dev,
  750. "Setting up broadcast echo filtering for %s failed\n",
  751. netdev_name(card->dev));
  752. goto out;
  753. }
  754. card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
  755. out:
  756. if (card->info.broadcast_capable)
  757. card->dev->flags |= IFF_BROADCAST;
  758. else
  759. card->dev->flags &= ~IFF_BROADCAST;
  760. return rc;
  761. }
  762. static void qeth_l3_start_ipassists(struct qeth_card *card)
  763. {
  764. QETH_CARD_TEXT(card, 3, "strtipas");
  765. qeth_l3_start_ipa_arp_processing(card); /* go on*/
  766. qeth_l3_start_ipa_source_mac(card); /* go on*/
  767. qeth_l3_start_ipa_vlan(card); /* go on*/
  768. qeth_l3_start_ipa_multicast(card); /* go on*/
  769. qeth_l3_start_ipa_ipv6(card); /* go on*/
  770. qeth_l3_start_ipa_broadcast(card); /* go on*/
  771. }
  772. static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
  773. struct qeth_reply *reply, unsigned long data)
  774. {
  775. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  776. if (cmd->hdr.return_code)
  777. return -EIO;
  778. if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
  779. return -EADDRNOTAVAIL;
  780. eth_hw_addr_set(card->dev, cmd->data.create_destroy_addr.mac_addr);
  781. return 0;
  782. }
  783. static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
  784. {
  785. int rc = 0;
  786. struct qeth_cmd_buffer *iob;
  787. QETH_CARD_TEXT(card, 2, "hsrmac");
  788. iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
  789. IPA_DATA_SIZEOF(create_destroy_addr));
  790. if (!iob)
  791. return -ENOMEM;
  792. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
  793. NULL);
  794. return rc;
  795. }
  796. static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
  797. struct qeth_reply *reply, unsigned long data)
  798. {
  799. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  800. u16 *uid = reply->param;
  801. if (cmd->hdr.return_code == 0) {
  802. *uid = cmd->data.create_destroy_addr.uid;
  803. return 0;
  804. }
  805. dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
  806. return -EIO;
  807. }
  808. static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
  809. {
  810. struct qeth_cmd_buffer *iob;
  811. QETH_CARD_TEXT(card, 2, "guniqeid");
  812. if (!qeth_is_supported(card, IPA_IPV6))
  813. goto out;
  814. iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
  815. IPA_DATA_SIZEOF(create_destroy_addr));
  816. if (!iob)
  817. goto out;
  818. __ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
  819. qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
  820. out:
  821. return uid;
  822. }
  823. static int
  824. qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
  825. unsigned long data)
  826. {
  827. struct qeth_ipa_cmd *cmd;
  828. __u16 rc;
  829. QETH_CARD_TEXT(card, 2, "diastrcb");
  830. cmd = (struct qeth_ipa_cmd *)data;
  831. rc = cmd->hdr.return_code;
  832. if (rc)
  833. QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
  834. switch (cmd->data.diagass.action) {
  835. case QETH_DIAGS_CMD_TRACE_QUERY:
  836. break;
  837. case QETH_DIAGS_CMD_TRACE_DISABLE:
  838. switch (rc) {
  839. case 0:
  840. case IPA_RC_INVALID_SUBCMD:
  841. card->info.promisc_mode = SET_PROMISC_MODE_OFF;
  842. dev_info(&card->gdev->dev, "The HiperSockets network "
  843. "traffic analyzer is deactivated\n");
  844. break;
  845. default:
  846. break;
  847. }
  848. break;
  849. case QETH_DIAGS_CMD_TRACE_ENABLE:
  850. switch (rc) {
  851. case 0:
  852. card->info.promisc_mode = SET_PROMISC_MODE_ON;
  853. dev_info(&card->gdev->dev, "The HiperSockets network "
  854. "traffic analyzer is activated\n");
  855. break;
  856. case IPA_RC_HARDWARE_AUTH_ERROR:
  857. dev_warn(&card->gdev->dev, "The device is not "
  858. "authorized to run as a HiperSockets network "
  859. "traffic analyzer\n");
  860. break;
  861. case IPA_RC_TRACE_ALREADY_ACTIVE:
  862. dev_warn(&card->gdev->dev, "A HiperSockets "
  863. "network traffic analyzer is already "
  864. "active in the HiperSockets LAN\n");
  865. break;
  866. default:
  867. break;
  868. }
  869. break;
  870. default:
  871. QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
  872. cmd->data.diagass.action, CARD_DEVID(card));
  873. }
  874. return rc ? -EIO : 0;
  875. }
  876. static int
  877. qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
  878. {
  879. struct qeth_cmd_buffer *iob;
  880. struct qeth_ipa_cmd *cmd;
  881. QETH_CARD_TEXT(card, 2, "diagtrac");
  882. iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0);
  883. if (!iob)
  884. return -ENOMEM;
  885. cmd = __ipa_cmd(iob);
  886. cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
  887. cmd->data.diagass.action = diags_cmd;
  888. return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
  889. }
  890. static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
  891. {
  892. struct qeth_card *card = arg;
  893. struct inet6_dev *in6_dev;
  894. struct in_device *in4_dev;
  895. struct qeth_ipaddr *ipm;
  896. struct qeth_ipaddr tmp;
  897. struct ip_mc_list *im4;
  898. struct ifmcaddr6 *im6;
  899. QETH_CARD_TEXT(card, 4, "addmc");
  900. if (!dev || !(dev->flags & IFF_UP))
  901. goto out;
  902. in4_dev = __in_dev_get_rtnl(dev);
  903. if (!in4_dev)
  904. goto walk_ipv6;
  905. qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
  906. tmp.disp_flag = QETH_DISP_ADDR_ADD;
  907. tmp.is_multicast = 1;
  908. for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
  909. im4 = rtnl_dereference(im4->next_rcu)) {
  910. tmp.u.a4.addr = im4->multiaddr;
  911. ipm = qeth_l3_find_addr_by_ip(card, &tmp);
  912. if (ipm) {
  913. /* for mcast, by-IP match means full match */
  914. ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  915. continue;
  916. }
  917. ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
  918. if (!ipm)
  919. continue;
  920. hash_add(card->rx_mode_addrs, &ipm->hnode,
  921. qeth_l3_ipaddr_hash(ipm));
  922. }
  923. walk_ipv6:
  924. if (!qeth_is_supported(card, IPA_IPV6))
  925. goto out;
  926. in6_dev = __in6_dev_get(dev);
  927. if (!in6_dev)
  928. goto out;
  929. qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
  930. tmp.disp_flag = QETH_DISP_ADDR_ADD;
  931. tmp.is_multicast = 1;
  932. for (im6 = rtnl_dereference(in6_dev->mc_list);
  933. im6;
  934. im6 = rtnl_dereference(im6->next)) {
  935. tmp.u.a6.addr = im6->mca_addr;
  936. ipm = qeth_l3_find_addr_by_ip(card, &tmp);
  937. if (ipm) {
  938. /* for mcast, by-IP match means full match */
  939. ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  940. continue;
  941. }
  942. ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
  943. if (!ipm)
  944. continue;
  945. hash_add(card->rx_mode_addrs, &ipm->hnode,
  946. qeth_l3_ipaddr_hash(ipm));
  947. }
  948. out:
  949. return 0;
  950. }
  951. static void qeth_l3_set_promisc_mode(struct qeth_card *card)
  952. {
  953. bool enable = card->dev->flags & IFF_PROMISC;
  954. if (card->info.promisc_mode == enable)
  955. return;
  956. if (IS_VM_NIC(card)) { /* Guestlan trace */
  957. if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
  958. qeth_setadp_promisc_mode(card, enable);
  959. } else if (card->options.sniffer && /* HiperSockets trace */
  960. qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
  961. if (enable) {
  962. QETH_CARD_TEXT(card, 3, "+promisc");
  963. qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
  964. } else {
  965. QETH_CARD_TEXT(card, 3, "-promisc");
  966. qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
  967. }
  968. }
  969. }
  970. static void qeth_l3_rx_mode_work(struct work_struct *work)
  971. {
  972. struct qeth_card *card = container_of(work, struct qeth_card,
  973. rx_mode_work);
  974. struct qeth_ipaddr *addr;
  975. struct hlist_node *tmp;
  976. int i, rc;
  977. QETH_CARD_TEXT(card, 3, "setmulti");
  978. if (!card->options.sniffer) {
  979. rtnl_lock();
  980. qeth_l3_add_mcast_rtnl(card->dev, 0, card);
  981. if (qeth_is_supported(card, IPA_FULL_VLAN))
  982. vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
  983. rtnl_unlock();
  984. hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
  985. switch (addr->disp_flag) {
  986. case QETH_DISP_ADDR_DELETE:
  987. rc = qeth_l3_deregister_addr_entry(card, addr);
  988. if (!rc || rc == -ENOENT) {
  989. hash_del(&addr->hnode);
  990. kfree(addr);
  991. }
  992. break;
  993. case QETH_DISP_ADDR_ADD:
  994. rc = qeth_l3_register_addr_entry(card, addr);
  995. if (rc && rc != -ENETDOWN) {
  996. hash_del(&addr->hnode);
  997. kfree(addr);
  998. break;
  999. }
  1000. fallthrough;
  1001. default:
  1002. /* for next call to set_rx_mode(): */
  1003. addr->disp_flag = QETH_DISP_ADDR_DELETE;
  1004. }
  1005. }
  1006. }
  1007. qeth_l3_set_promisc_mode(card);
  1008. }
  1009. static int qeth_l3_arp_makerc(u16 rc)
  1010. {
  1011. switch (rc) {
  1012. case IPA_RC_SUCCESS:
  1013. return 0;
  1014. case QETH_IPA_ARP_RC_NOTSUPP:
  1015. case QETH_IPA_ARP_RC_Q_NOTSUPP:
  1016. return -EOPNOTSUPP;
  1017. case QETH_IPA_ARP_RC_OUT_OF_RANGE:
  1018. return -EINVAL;
  1019. case QETH_IPA_ARP_RC_Q_NO_DATA:
  1020. return -ENOENT;
  1021. default:
  1022. return -EIO;
  1023. }
  1024. }
  1025. static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply,
  1026. unsigned long data)
  1027. {
  1028. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  1029. qeth_setassparms_cb(card, reply, data);
  1030. return qeth_l3_arp_makerc(cmd->hdr.return_code);
  1031. }
  1032. static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
  1033. {
  1034. struct qeth_cmd_buffer *iob;
  1035. int rc;
  1036. QETH_CARD_TEXT(card, 3, "arpstnoe");
  1037. /*
  1038. * currently GuestLAN only supports the ARP assist function
  1039. * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
  1040. * thus we say EOPNOTSUPP for this ARP function
  1041. */
  1042. if (IS_VM_NIC(card))
  1043. return -EOPNOTSUPP;
  1044. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  1045. return -EOPNOTSUPP;
  1046. }
  1047. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
  1048. IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
  1049. SETASS_DATA_SIZEOF(flags_32bit),
  1050. QETH_PROT_IPV4);
  1051. if (!iob)
  1052. return -ENOMEM;
  1053. __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries;
  1054. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
  1055. if (rc)
  1056. QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
  1057. CARD_DEVID(card), rc);
  1058. return rc;
  1059. }
  1060. static __u32 get_arp_entry_size(struct qeth_card *card,
  1061. struct qeth_arp_query_data *qdata,
  1062. struct qeth_arp_entrytype *type, __u8 strip_entries)
  1063. {
  1064. __u32 rc;
  1065. __u8 is_hsi;
  1066. is_hsi = qdata->reply_bits == 5;
  1067. if (type->ip == QETHARP_IP_ADDR_V4) {
  1068. QETH_CARD_TEXT(card, 4, "arpev4");
  1069. if (strip_entries) {
  1070. rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
  1071. sizeof(struct qeth_arp_qi_entry7_short);
  1072. } else {
  1073. rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
  1074. sizeof(struct qeth_arp_qi_entry7);
  1075. }
  1076. } else if (type->ip == QETHARP_IP_ADDR_V6) {
  1077. QETH_CARD_TEXT(card, 4, "arpev6");
  1078. if (strip_entries) {
  1079. rc = is_hsi ?
  1080. sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
  1081. sizeof(struct qeth_arp_qi_entry7_short_ipv6);
  1082. } else {
  1083. rc = is_hsi ?
  1084. sizeof(struct qeth_arp_qi_entry5_ipv6) :
  1085. sizeof(struct qeth_arp_qi_entry7_ipv6);
  1086. }
  1087. } else {
  1088. QETH_CARD_TEXT(card, 4, "arpinv");
  1089. rc = 0;
  1090. }
  1091. return rc;
  1092. }
  1093. static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
  1094. {
  1095. return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
  1096. (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
  1097. }
  1098. static int qeth_l3_arp_query_cb(struct qeth_card *card,
  1099. struct qeth_reply *reply, unsigned long data)
  1100. {
  1101. struct qeth_ipa_cmd *cmd;
  1102. struct qeth_arp_query_data *qdata;
  1103. struct qeth_arp_query_info *qinfo;
  1104. int e;
  1105. int entrybytes_done;
  1106. int stripped_bytes;
  1107. __u8 do_strip_entries;
  1108. QETH_CARD_TEXT(card, 3, "arpquecb");
  1109. qinfo = (struct qeth_arp_query_info *) reply->param;
  1110. cmd = (struct qeth_ipa_cmd *) data;
  1111. QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
  1112. if (cmd->hdr.return_code) {
  1113. QETH_CARD_TEXT(card, 4, "arpcberr");
  1114. QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
  1115. return qeth_l3_arp_makerc(cmd->hdr.return_code);
  1116. }
  1117. if (cmd->data.setassparms.hdr.return_code) {
  1118. cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
  1119. QETH_CARD_TEXT(card, 4, "setaperr");
  1120. QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
  1121. return qeth_l3_arp_makerc(cmd->hdr.return_code);
  1122. }
  1123. qdata = &cmd->data.setassparms.data.query_arp;
  1124. QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
  1125. do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
  1126. stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
  1127. entrybytes_done = 0;
  1128. for (e = 0; e < qdata->no_entries; ++e) {
  1129. char *cur_entry;
  1130. __u32 esize;
  1131. struct qeth_arp_entrytype *etype;
  1132. cur_entry = &qdata->data + entrybytes_done;
  1133. etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
  1134. if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
  1135. QETH_CARD_TEXT(card, 4, "pmis");
  1136. QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
  1137. break;
  1138. }
  1139. esize = get_arp_entry_size(card, qdata, etype,
  1140. do_strip_entries);
  1141. QETH_CARD_TEXT_(card, 5, "esz%i", esize);
  1142. if (!esize)
  1143. break;
  1144. if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
  1145. QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC);
  1146. memset(qinfo->udata, 0, 4);
  1147. return -ENOSPC;
  1148. }
  1149. memcpy(qinfo->udata + qinfo->udata_offset,
  1150. &qdata->data + entrybytes_done + stripped_bytes,
  1151. esize);
  1152. entrybytes_done += esize + stripped_bytes;
  1153. qinfo->udata_offset += esize;
  1154. ++qinfo->no_entries;
  1155. }
  1156. /* check if all replies received ... */
  1157. if (cmd->data.setassparms.hdr.seq_no <
  1158. cmd->data.setassparms.hdr.number_of_replies)
  1159. return 1;
  1160. QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
  1161. memcpy(qinfo->udata, &qinfo->no_entries, 4);
  1162. /* keep STRIP_ENTRIES flag so the user program can distinguish
  1163. * stripped entries from normal ones */
  1164. if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
  1165. qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
  1166. memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
  1167. QETH_CARD_TEXT_(card, 4, "rc%i", 0);
  1168. return 0;
  1169. }
  1170. static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
  1171. enum qeth_prot_versions prot,
  1172. struct qeth_arp_query_info *qinfo)
  1173. {
  1174. struct qeth_cmd_buffer *iob;
  1175. struct qeth_ipa_cmd *cmd;
  1176. int rc;
  1177. QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
  1178. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
  1179. IPA_CMD_ASS_ARP_QUERY_INFO,
  1180. SETASS_DATA_SIZEOF(query_arp), prot);
  1181. if (!iob)
  1182. return -ENOMEM;
  1183. cmd = __ipa_cmd(iob);
  1184. cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
  1185. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo);
  1186. if (rc)
  1187. QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
  1188. CARD_DEVID(card), rc);
  1189. return rc;
  1190. }
  1191. static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
  1192. {
  1193. struct qeth_arp_query_info qinfo = {0, };
  1194. int rc;
  1195. QETH_CARD_TEXT(card, 3, "arpquery");
  1196. if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
  1197. IPA_ARP_PROCESSING)) {
  1198. QETH_CARD_TEXT(card, 3, "arpqnsup");
  1199. rc = -EOPNOTSUPP;
  1200. goto out;
  1201. }
  1202. /* get size of userspace buffer and mask_bits -> 6 bytes */
  1203. if (copy_from_user(&qinfo, udata, 6)) {
  1204. rc = -EFAULT;
  1205. goto out;
  1206. }
  1207. qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
  1208. if (!qinfo.udata) {
  1209. rc = -ENOMEM;
  1210. goto out;
  1211. }
  1212. qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
  1213. rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
  1214. if (rc) {
  1215. if (copy_to_user(udata, qinfo.udata, 4))
  1216. rc = -EFAULT;
  1217. goto free_and_out;
  1218. }
  1219. if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
  1220. /* fails in case of GuestLAN QDIO mode */
  1221. qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
  1222. }
  1223. if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
  1224. QETH_CARD_TEXT(card, 4, "qactf");
  1225. rc = -EFAULT;
  1226. goto free_and_out;
  1227. }
  1228. QETH_CARD_TEXT(card, 4, "qacts");
  1229. free_and_out:
  1230. kfree(qinfo.udata);
  1231. out:
  1232. return rc;
  1233. }
  1234. static int qeth_l3_arp_modify_entry(struct qeth_card *card,
  1235. struct qeth_arp_cache_entry *entry,
  1236. enum qeth_arp_process_subcmds arp_cmd)
  1237. {
  1238. struct qeth_arp_cache_entry *cmd_entry;
  1239. struct qeth_cmd_buffer *iob;
  1240. int rc;
  1241. if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
  1242. QETH_CARD_TEXT(card, 3, "arpadd");
  1243. else
  1244. QETH_CARD_TEXT(card, 3, "arpdel");
  1245. /*
  1246. * currently GuestLAN only supports the ARP assist function
  1247. * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
  1248. * thus we say EOPNOTSUPP for this ARP function
  1249. */
  1250. if (IS_VM_NIC(card))
  1251. return -EOPNOTSUPP;
  1252. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  1253. return -EOPNOTSUPP;
  1254. }
  1255. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
  1256. SETASS_DATA_SIZEOF(arp_entry),
  1257. QETH_PROT_IPV4);
  1258. if (!iob)
  1259. return -ENOMEM;
  1260. cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
  1261. ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
  1262. memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
  1263. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
  1264. if (rc)
  1265. QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
  1266. arp_cmd, CARD_DEVID(card), rc);
  1267. return rc;
  1268. }
  1269. static int qeth_l3_arp_flush_cache(struct qeth_card *card)
  1270. {
  1271. struct qeth_cmd_buffer *iob;
  1272. int rc;
  1273. QETH_CARD_TEXT(card, 3, "arpflush");
  1274. /*
  1275. * currently GuestLAN only supports the ARP assist function
  1276. * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
  1277. * thus we say EOPNOTSUPP for this ARP function
  1278. */
  1279. if (IS_VM_NIC(card) || IS_IQD(card))
  1280. return -EOPNOTSUPP;
  1281. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  1282. return -EOPNOTSUPP;
  1283. }
  1284. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
  1285. IPA_CMD_ASS_ARP_FLUSH_CACHE, 0,
  1286. QETH_PROT_IPV4);
  1287. if (!iob)
  1288. return -ENOMEM;
  1289. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
  1290. if (rc)
  1291. QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
  1292. CARD_DEVID(card), rc);
  1293. return rc;
  1294. }
  1295. static int qeth_l3_ndo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
  1296. void __user *data, int cmd)
  1297. {
  1298. struct qeth_card *card = dev->ml_priv;
  1299. struct qeth_arp_cache_entry arp_entry;
  1300. enum qeth_arp_process_subcmds arp_cmd;
  1301. int rc = 0;
  1302. switch (cmd) {
  1303. case SIOC_QETH_ARP_SET_NO_ENTRIES:
  1304. if (!capable(CAP_NET_ADMIN)) {
  1305. rc = -EPERM;
  1306. break;
  1307. }
  1308. rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
  1309. break;
  1310. case SIOC_QETH_ARP_QUERY_INFO:
  1311. if (!capable(CAP_NET_ADMIN)) {
  1312. rc = -EPERM;
  1313. break;
  1314. }
  1315. rc = qeth_l3_arp_query(card, data);
  1316. break;
  1317. case SIOC_QETH_ARP_ADD_ENTRY:
  1318. case SIOC_QETH_ARP_REMOVE_ENTRY:
  1319. if (!capable(CAP_NET_ADMIN))
  1320. return -EPERM;
  1321. if (copy_from_user(&arp_entry, data, sizeof(arp_entry)))
  1322. return -EFAULT;
  1323. arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
  1324. IPA_CMD_ASS_ARP_ADD_ENTRY :
  1325. IPA_CMD_ASS_ARP_REMOVE_ENTRY;
  1326. return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
  1327. case SIOC_QETH_ARP_FLUSH_CACHE:
  1328. if (!capable(CAP_NET_ADMIN)) {
  1329. rc = -EPERM;
  1330. break;
  1331. }
  1332. rc = qeth_l3_arp_flush_cache(card);
  1333. break;
  1334. default:
  1335. rc = qeth_siocdevprivate(dev, rq, data, cmd);
  1336. }
  1337. return rc;
  1338. }
  1339. static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
  1340. __be16 proto)
  1341. {
  1342. struct neighbour *n = NULL;
  1343. if (dst)
  1344. n = dst_neigh_lookup_skb(dst, skb);
  1345. if (n) {
  1346. int cast_type = n->type;
  1347. neigh_release(n);
  1348. if ((cast_type == RTN_BROADCAST) ||
  1349. (cast_type == RTN_MULTICAST) ||
  1350. (cast_type == RTN_ANYCAST))
  1351. return cast_type;
  1352. return RTN_UNICAST;
  1353. }
  1354. /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
  1355. switch (proto) {
  1356. case htons(ETH_P_IP):
  1357. if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
  1358. return RTN_BROADCAST;
  1359. return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
  1360. RTN_MULTICAST : RTN_UNICAST;
  1361. case htons(ETH_P_IPV6):
  1362. return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
  1363. RTN_MULTICAST : RTN_UNICAST;
  1364. case htons(ETH_P_AF_IUCV):
  1365. return RTN_UNICAST;
  1366. default:
  1367. /* OSA only: ... and MAC address */
  1368. return qeth_get_ether_cast_type(skb);
  1369. }
  1370. }
  1371. static int qeth_l3_get_cast_type(struct sk_buff *skb, __be16 proto)
  1372. {
  1373. struct dst_entry *dst;
  1374. int cast_type;
  1375. rcu_read_lock();
  1376. dst = qeth_dst_check_rcu(skb, proto);
  1377. cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
  1378. rcu_read_unlock();
  1379. return cast_type;
  1380. }
  1381. static u8 qeth_l3_cast_type_to_flag(int cast_type)
  1382. {
  1383. if (cast_type == RTN_MULTICAST)
  1384. return QETH_CAST_MULTICAST;
  1385. if (cast_type == RTN_ANYCAST)
  1386. return QETH_CAST_ANYCAST;
  1387. if (cast_type == RTN_BROADCAST)
  1388. return QETH_CAST_BROADCAST;
  1389. return QETH_CAST_UNICAST;
  1390. }
  1391. static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
  1392. struct qeth_hdr *hdr, struct sk_buff *skb,
  1393. __be16 proto, unsigned int data_len)
  1394. {
  1395. struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
  1396. struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
  1397. struct qeth_card *card = queue->card;
  1398. struct dst_entry *dst;
  1399. int cast_type;
  1400. hdr->hdr.l3.length = data_len;
  1401. if (skb_is_gso(skb)) {
  1402. hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
  1403. } else {
  1404. hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
  1405. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1406. qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, proto);
  1407. /* some HW requires combined L3+L4 csum offload: */
  1408. if (proto == htons(ETH_P_IP))
  1409. hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
  1410. }
  1411. }
  1412. if (proto == htons(ETH_P_IP) || IS_IQD(card)) {
  1413. /* NETIF_F_HW_VLAN_CTAG_TX */
  1414. if (skb_vlan_tag_present(skb)) {
  1415. hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
  1416. hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
  1417. }
  1418. } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
  1419. hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG;
  1420. hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
  1421. }
  1422. rcu_read_lock();
  1423. dst = qeth_dst_check_rcu(skb, proto);
  1424. if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
  1425. cast_type = RTN_UNICAST;
  1426. else
  1427. cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
  1428. l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
  1429. switch (proto) {
  1430. case htons(ETH_P_IP):
  1431. l3_hdr->next_hop.addr.s6_addr32[3] =
  1432. qeth_next_hop_v4_rcu(skb, dst);
  1433. break;
  1434. case htons(ETH_P_IPV6):
  1435. l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
  1436. hdr->hdr.l3.flags |= QETH_HDR_IPV6;
  1437. if (!IS_IQD(card))
  1438. hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
  1439. break;
  1440. case htons(ETH_P_AF_IUCV):
  1441. l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
  1442. memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
  1443. iucv_trans_hdr(skb)->destUserID, 8);
  1444. l3_hdr->flags |= QETH_HDR_IPV6;
  1445. break;
  1446. default:
  1447. /* OSA only: */
  1448. l3_hdr->flags |= QETH_HDR_PASSTHRU;
  1449. }
  1450. rcu_read_unlock();
  1451. }
  1452. static void qeth_l3_fixup_headers(struct sk_buff *skb)
  1453. {
  1454. struct iphdr *iph = ip_hdr(skb);
  1455. /* this is safe, IPv6 traffic takes a different path */
  1456. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1457. iph->check = 0;
  1458. if (skb_is_gso(skb)) {
  1459. iph->tot_len = 0;
  1460. tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
  1461. iph->daddr, 0);
  1462. }
  1463. }
  1464. static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
  1465. struct qeth_qdio_out_q *queue, __be16 proto)
  1466. {
  1467. unsigned int hw_hdr_len;
  1468. int rc;
  1469. /* re-use the L2 header area for the HW header: */
  1470. hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
  1471. sizeof(struct qeth_hdr);
  1472. rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
  1473. if (rc)
  1474. return rc;
  1475. skb_pull(skb, ETH_HLEN);
  1476. qeth_l3_fixup_headers(skb);
  1477. return qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
  1478. }
  1479. static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
  1480. struct net_device *dev)
  1481. {
  1482. struct qeth_card *card = dev->ml_priv;
  1483. __be16 proto = vlan_get_protocol(skb);
  1484. u16 txq = skb_get_queue_mapping(skb);
  1485. struct qeth_qdio_out_q *queue;
  1486. int rc;
  1487. if (!skb_is_gso(skb))
  1488. qdisc_skb_cb(skb)->pkt_len = skb->len;
  1489. if (IS_IQD(card)) {
  1490. queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
  1491. if (card->options.sniffer)
  1492. goto tx_drop;
  1493. switch (proto) {
  1494. case htons(ETH_P_AF_IUCV):
  1495. if (card->options.cq != QETH_CQ_ENABLED)
  1496. goto tx_drop;
  1497. break;
  1498. case htons(ETH_P_IP):
  1499. case htons(ETH_P_IPV6):
  1500. if (card->options.cq == QETH_CQ_ENABLED)
  1501. goto tx_drop;
  1502. break;
  1503. default:
  1504. goto tx_drop;
  1505. }
  1506. } else {
  1507. queue = card->qdio.out_qs[txq];
  1508. }
  1509. if (!(dev->flags & IFF_BROADCAST) &&
  1510. qeth_l3_get_cast_type(skb, proto) == RTN_BROADCAST)
  1511. goto tx_drop;
  1512. if (proto == htons(ETH_P_IP) || IS_IQD(card))
  1513. rc = qeth_l3_xmit(card, skb, queue, proto);
  1514. else
  1515. rc = qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
  1516. if (!rc)
  1517. return NETDEV_TX_OK;
  1518. tx_drop:
  1519. QETH_TXQ_STAT_INC(queue, tx_dropped);
  1520. kfree_skb(skb);
  1521. return NETDEV_TX_OK;
  1522. }
  1523. static void qeth_l3_set_rx_mode(struct net_device *dev)
  1524. {
  1525. struct qeth_card *card = dev->ml_priv;
  1526. schedule_work(&card->rx_mode_work);
  1527. }
  1528. /*
  1529. * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
  1530. * NOARP on the netdevice is no option because it also turns off neighbor
  1531. * solicitation. For IPv4 we install a neighbor_setup function. We don't want
  1532. * arp resolution but we want the hard header (packet socket will work
  1533. * e.g. tcpdump)
  1534. */
  1535. static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
  1536. {
  1537. n->nud_state = NUD_NOARP;
  1538. memcpy(n->ha, "FAKELL", 6);
  1539. n->output = n->ops->connected_output;
  1540. return 0;
  1541. }
  1542. static int
  1543. qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
  1544. {
  1545. if (np->tbl->family == AF_INET)
  1546. np->neigh_setup = qeth_l3_neigh_setup_noarp;
  1547. return 0;
  1548. }
  1549. static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
  1550. struct net_device *dev,
  1551. netdev_features_t features)
  1552. {
  1553. if (vlan_get_protocol(skb) != htons(ETH_P_IP))
  1554. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  1555. return qeth_features_check(skb, dev, features);
  1556. }
  1557. static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
  1558. struct net_device *sb_dev)
  1559. {
  1560. __be16 proto = vlan_get_protocol(skb);
  1561. return qeth_iqd_select_queue(dev, skb,
  1562. qeth_l3_get_cast_type(skb, proto), sb_dev);
  1563. }
  1564. static const struct net_device_ops qeth_l3_netdev_ops = {
  1565. .ndo_open = qeth_open,
  1566. .ndo_stop = qeth_stop,
  1567. .ndo_get_stats64 = qeth_get_stats64,
  1568. .ndo_start_xmit = qeth_l3_hard_start_xmit,
  1569. .ndo_select_queue = qeth_l3_iqd_select_queue,
  1570. .ndo_validate_addr = eth_validate_addr,
  1571. .ndo_set_rx_mode = qeth_l3_set_rx_mode,
  1572. .ndo_eth_ioctl = qeth_do_ioctl,
  1573. .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
  1574. .ndo_fix_features = qeth_fix_features,
  1575. .ndo_set_features = qeth_set_features,
  1576. .ndo_tx_timeout = qeth_tx_timeout,
  1577. };
  1578. static const struct net_device_ops qeth_l3_osa_netdev_ops = {
  1579. .ndo_open = qeth_open,
  1580. .ndo_stop = qeth_stop,
  1581. .ndo_get_stats64 = qeth_get_stats64,
  1582. .ndo_start_xmit = qeth_l3_hard_start_xmit,
  1583. .ndo_features_check = qeth_l3_osa_features_check,
  1584. .ndo_select_queue = qeth_osa_select_queue,
  1585. .ndo_validate_addr = eth_validate_addr,
  1586. .ndo_set_rx_mode = qeth_l3_set_rx_mode,
  1587. .ndo_eth_ioctl = qeth_do_ioctl,
  1588. .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
  1589. .ndo_fix_features = qeth_fix_features,
  1590. .ndo_set_features = qeth_set_features,
  1591. .ndo_tx_timeout = qeth_tx_timeout,
  1592. .ndo_neigh_setup = qeth_l3_neigh_setup,
  1593. };
  1594. static int qeth_l3_setup_netdev(struct qeth_card *card)
  1595. {
  1596. struct net_device *dev = card->dev;
  1597. unsigned int headroom;
  1598. int rc;
  1599. if (IS_OSD(card) || IS_OSX(card)) {
  1600. card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
  1601. /*IPv6 address autoconfiguration stuff*/
  1602. dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
  1603. if (!IS_VM_NIC(card)) {
  1604. card->dev->features |= NETIF_F_SG;
  1605. card->dev->hw_features |= NETIF_F_TSO |
  1606. NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
  1607. card->dev->vlan_features |= NETIF_F_TSO |
  1608. NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
  1609. }
  1610. if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
  1611. card->dev->hw_features |= NETIF_F_IPV6_CSUM;
  1612. card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
  1613. }
  1614. if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
  1615. card->dev->hw_features |= NETIF_F_TSO6;
  1616. card->dev->vlan_features |= NETIF_F_TSO6;
  1617. }
  1618. /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
  1619. if (card->dev->hw_features & NETIF_F_TSO6)
  1620. headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
  1621. else if (card->dev->hw_features & NETIF_F_TSO)
  1622. headroom = sizeof(struct qeth_hdr_tso);
  1623. else
  1624. headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
  1625. } else if (IS_IQD(card)) {
  1626. card->dev->flags |= IFF_NOARP;
  1627. card->dev->netdev_ops = &qeth_l3_netdev_ops;
  1628. headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
  1629. rc = qeth_l3_iqd_read_initial_mac(card);
  1630. if (rc)
  1631. return rc;
  1632. } else
  1633. return -ENODEV;
  1634. card->dev->needed_headroom = headroom;
  1635. card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
  1636. NETIF_F_HW_VLAN_CTAG_RX;
  1637. netif_keep_dst(card->dev);
  1638. if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
  1639. netif_set_tso_max_size(card->dev,
  1640. PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
  1641. netif_napi_add(card->dev, &card->napi, qeth_poll);
  1642. return register_netdev(card->dev);
  1643. }
  1644. static const struct device_type qeth_l3_devtype = {
  1645. .name = "qeth_layer3",
  1646. .groups = qeth_l3_attr_groups,
  1647. };
  1648. static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
  1649. {
  1650. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1651. int rc;
  1652. hash_init(card->ip_htable);
  1653. mutex_init(&card->ip_lock);
  1654. card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0,
  1655. dev_name(&gdev->dev));
  1656. if (!card->cmd_wq)
  1657. return -ENOMEM;
  1658. if (gdev->dev.type) {
  1659. rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups);
  1660. if (rc) {
  1661. destroy_workqueue(card->cmd_wq);
  1662. return rc;
  1663. }
  1664. } else {
  1665. gdev->dev.type = &qeth_l3_devtype;
  1666. }
  1667. INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
  1668. return 0;
  1669. }
  1670. static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
  1671. {
  1672. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  1673. if (cgdev->dev.type != &qeth_l3_devtype)
  1674. device_remove_groups(&cgdev->dev, qeth_l3_attr_groups);
  1675. qeth_set_allowed_threads(card, 0, 1);
  1676. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  1677. if (cgdev->state == CCWGROUP_ONLINE)
  1678. qeth_set_offline(card, card->discipline, false);
  1679. if (card->dev->reg_state == NETREG_REGISTERED)
  1680. unregister_netdev(card->dev);
  1681. destroy_workqueue(card->cmd_wq);
  1682. qeth_l3_clear_ip_htable(card, 0);
  1683. qeth_l3_clear_ipato_list(card);
  1684. }
  1685. static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
  1686. {
  1687. struct net_device *dev = card->dev;
  1688. int rc = 0;
  1689. /* softsetup */
  1690. QETH_CARD_TEXT(card, 2, "softsetp");
  1691. rc = qeth_l3_setadapter_parms(card);
  1692. if (rc)
  1693. QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
  1694. if (!card->options.sniffer) {
  1695. qeth_l3_start_ipassists(card);
  1696. rc = qeth_l3_setrouting_v4(card);
  1697. if (rc)
  1698. QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
  1699. rc = qeth_l3_setrouting_v6(card);
  1700. if (rc)
  1701. QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
  1702. }
  1703. card->state = CARD_STATE_SOFTSETUP;
  1704. qeth_set_allowed_threads(card, 0xffffffff, 0);
  1705. qeth_l3_recover_ip(card);
  1706. if (dev->reg_state != NETREG_REGISTERED) {
  1707. rc = qeth_l3_setup_netdev(card);
  1708. if (rc)
  1709. goto err_setup;
  1710. if (carrier_ok)
  1711. netif_carrier_on(dev);
  1712. } else {
  1713. rtnl_lock();
  1714. rc = qeth_set_real_num_tx_queues(card,
  1715. qeth_tx_actual_queues(card));
  1716. if (rc) {
  1717. rtnl_unlock();
  1718. goto err_set_queues;
  1719. }
  1720. if (carrier_ok)
  1721. netif_carrier_on(dev);
  1722. else
  1723. netif_carrier_off(dev);
  1724. netif_device_attach(dev);
  1725. qeth_enable_hw_features(dev);
  1726. if (netif_running(dev)) {
  1727. local_bh_disable();
  1728. napi_schedule(&card->napi);
  1729. /* kick-start the NAPI softirq: */
  1730. local_bh_enable();
  1731. }
  1732. rtnl_unlock();
  1733. }
  1734. return 0;
  1735. err_set_queues:
  1736. err_setup:
  1737. qeth_set_allowed_threads(card, 0, 1);
  1738. card->state = CARD_STATE_DOWN;
  1739. qeth_l3_clear_ip_htable(card, 1);
  1740. return rc;
  1741. }
  1742. static void qeth_l3_set_offline(struct qeth_card *card)
  1743. {
  1744. qeth_set_allowed_threads(card, 0, 1);
  1745. qeth_l3_drain_rx_mode_cache(card);
  1746. if (card->options.sniffer &&
  1747. (card->info.promisc_mode == SET_PROMISC_MODE_ON))
  1748. qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
  1749. if (card->state == CARD_STATE_SOFTSETUP) {
  1750. card->state = CARD_STATE_DOWN;
  1751. qeth_l3_clear_ip_htable(card, 1);
  1752. }
  1753. }
  1754. /* Returns zero if the command is successfully "consumed" */
  1755. static int qeth_l3_control_event(struct qeth_card *card,
  1756. struct qeth_ipa_cmd *cmd)
  1757. {
  1758. return 1;
  1759. }
  1760. const struct qeth_discipline qeth_l3_discipline = {
  1761. .setup = qeth_l3_probe_device,
  1762. .remove = qeth_l3_remove_device,
  1763. .set_online = qeth_l3_set_online,
  1764. .set_offline = qeth_l3_set_offline,
  1765. .control_event_handler = qeth_l3_control_event,
  1766. };
  1767. EXPORT_SYMBOL_GPL(qeth_l3_discipline);
  1768. static int qeth_l3_handle_ip_event(struct qeth_card *card,
  1769. struct qeth_ipaddr *addr,
  1770. unsigned long event)
  1771. {
  1772. switch (event) {
  1773. case NETDEV_UP:
  1774. qeth_l3_modify_ip(card, addr, true);
  1775. return NOTIFY_OK;
  1776. case NETDEV_DOWN:
  1777. qeth_l3_modify_ip(card, addr, false);
  1778. return NOTIFY_OK;
  1779. default:
  1780. return NOTIFY_DONE;
  1781. }
  1782. }
  1783. struct qeth_l3_ip_event_work {
  1784. struct work_struct work;
  1785. struct qeth_card *card;
  1786. struct qeth_ipaddr addr;
  1787. };
  1788. #define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
  1789. static void qeth_l3_add_ip_worker(struct work_struct *work)
  1790. {
  1791. struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
  1792. qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true);
  1793. kfree(work);
  1794. }
  1795. static void qeth_l3_delete_ip_worker(struct work_struct *work)
  1796. {
  1797. struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
  1798. qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false);
  1799. kfree(work);
  1800. }
  1801. static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
  1802. {
  1803. if (is_vlan_dev(dev))
  1804. dev = vlan_dev_real_dev(dev);
  1805. if (dev->netdev_ops == &qeth_l3_osa_netdev_ops ||
  1806. dev->netdev_ops == &qeth_l3_netdev_ops)
  1807. return (struct qeth_card *) dev->ml_priv;
  1808. return NULL;
  1809. }
  1810. static int qeth_l3_ip_event(struct notifier_block *this,
  1811. unsigned long event, void *ptr)
  1812. {
  1813. struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
  1814. struct net_device *dev = ifa->ifa_dev->dev;
  1815. struct qeth_ipaddr addr;
  1816. struct qeth_card *card;
  1817. card = qeth_l3_get_card_from_dev(dev);
  1818. if (!card)
  1819. return NOTIFY_DONE;
  1820. QETH_CARD_TEXT(card, 3, "ipevent");
  1821. qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
  1822. addr.u.a4.addr = ifa->ifa_address;
  1823. addr.u.a4.mask = ifa->ifa_mask;
  1824. return qeth_l3_handle_ip_event(card, &addr, event);
  1825. }
  1826. static struct notifier_block qeth_l3_ip_notifier = {
  1827. qeth_l3_ip_event,
  1828. NULL,
  1829. };
  1830. static int qeth_l3_ip6_event(struct notifier_block *this,
  1831. unsigned long event, void *ptr)
  1832. {
  1833. struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
  1834. struct net_device *dev = ifa->idev->dev;
  1835. struct qeth_l3_ip_event_work *ip_work;
  1836. struct qeth_card *card;
  1837. if (event != NETDEV_UP && event != NETDEV_DOWN)
  1838. return NOTIFY_DONE;
  1839. card = qeth_l3_get_card_from_dev(dev);
  1840. if (!card)
  1841. return NOTIFY_DONE;
  1842. QETH_CARD_TEXT(card, 3, "ip6event");
  1843. if (!qeth_is_supported(card, IPA_IPV6))
  1844. return NOTIFY_DONE;
  1845. ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC);
  1846. if (!ip_work)
  1847. return NOTIFY_DONE;
  1848. if (event == NETDEV_UP)
  1849. INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
  1850. else
  1851. INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
  1852. ip_work->card = card;
  1853. qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL,
  1854. QETH_PROT_IPV6);
  1855. ip_work->addr.u.a6.addr = ifa->addr;
  1856. ip_work->addr.u.a6.pfxlen = ifa->prefix_len;
  1857. queue_work(card->cmd_wq, &ip_work->work);
  1858. return NOTIFY_OK;
  1859. }
  1860. static struct notifier_block qeth_l3_ip6_notifier = {
  1861. qeth_l3_ip6_event,
  1862. NULL,
  1863. };
  1864. static int qeth_l3_register_notifiers(void)
  1865. {
  1866. int rc;
  1867. QETH_DBF_TEXT(SETUP, 5, "regnotif");
  1868. rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
  1869. if (rc)
  1870. return rc;
  1871. rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
  1872. if (rc) {
  1873. unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
  1874. return rc;
  1875. }
  1876. return 0;
  1877. }
  1878. static void qeth_l3_unregister_notifiers(void)
  1879. {
  1880. QETH_DBF_TEXT(SETUP, 5, "unregnot");
  1881. WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
  1882. WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
  1883. }
  1884. static int __init qeth_l3_init(void)
  1885. {
  1886. pr_info("register layer 3 discipline\n");
  1887. return qeth_l3_register_notifiers();
  1888. }
  1889. static void __exit qeth_l3_exit(void)
  1890. {
  1891. qeth_l3_unregister_notifiers();
  1892. pr_info("unregister layer 3 discipline\n");
  1893. }
  1894. module_init(qeth_l3_init);
  1895. module_exit(qeth_l3_exit);
  1896. MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
  1897. MODULE_DESCRIPTION("qeth layer 3 discipline");
  1898. MODULE_LICENSE("GPL");