ixgbe_ipsec.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
  3. #include "ixgbe.h"
  4. #include <net/xfrm.h>
  5. #include <crypto/aead.h>
  6. #include <linux/if_bridge.h>
  7. /**
  8. * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
  9. * @hw: hw specific details
  10. * @idx: register index to write
  11. * @key: key byte array
  12. * @salt: salt bytes
  13. **/
  14. static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
  15. u32 key[], u32 salt)
  16. {
  17. u32 reg;
  18. int i;
  19. for (i = 0; i < 4; i++)
  20. IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
  21. (__force u32)cpu_to_be32(key[3 - i]));
  22. IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
  23. IXGBE_WRITE_FLUSH(hw);
  24. reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
  25. reg &= IXGBE_RXTXIDX_IPS_EN;
  26. reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
  27. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
  28. IXGBE_WRITE_FLUSH(hw);
  29. }
  30. /**
  31. * ixgbe_ipsec_set_rx_item - set an Rx table item
  32. * @hw: hw specific details
  33. * @idx: register index to write
  34. * @tbl: table selector
  35. *
  36. * Trigger the device to store into a particular Rx table the
  37. * data that has already been loaded into the input register
  38. **/
  39. static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
  40. enum ixgbe_ipsec_tbl_sel tbl)
  41. {
  42. u32 reg;
  43. reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
  44. reg &= IXGBE_RXTXIDX_IPS_EN;
  45. reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
  46. idx << IXGBE_RXTXIDX_IDX_SHIFT |
  47. IXGBE_RXTXIDX_WRITE;
  48. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
  49. IXGBE_WRITE_FLUSH(hw);
  50. }
  51. /**
  52. * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
  53. * @hw: hw specific details
  54. * @idx: register index to write
  55. * @spi: security parameter index
  56. * @key: key byte array
  57. * @salt: salt bytes
  58. * @mode: rx decrypt control bits
  59. * @ip_idx: index into IP table for related IP address
  60. **/
  61. static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
  62. u32 key[], u32 salt, u32 mode, u32 ip_idx)
  63. {
  64. int i;
  65. /* store the SPI (in bigendian) and IPidx */
  66. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
  67. (__force u32)cpu_to_le32((__force u32)spi));
  68. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
  69. IXGBE_WRITE_FLUSH(hw);
  70. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
  71. /* store the key, salt, and mode */
  72. for (i = 0; i < 4; i++)
  73. IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
  74. (__force u32)cpu_to_be32(key[3 - i]));
  75. IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
  76. IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
  77. IXGBE_WRITE_FLUSH(hw);
  78. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
  79. }
  80. /**
  81. * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
  82. * @hw: hw specific details
  83. * @idx: register index to write
  84. * @addr: IP address byte array
  85. **/
  86. static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
  87. {
  88. int i;
  89. /* store the ip address */
  90. for (i = 0; i < 4; i++)
  91. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
  92. (__force u32)cpu_to_le32((__force u32)addr[i]));
  93. IXGBE_WRITE_FLUSH(hw);
  94. ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
  95. }
  96. /**
  97. * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
  98. * @adapter: board private structure
  99. **/
  100. static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
  101. {
  102. struct ixgbe_hw *hw = &adapter->hw;
  103. u32 buf[4] = {0, 0, 0, 0};
  104. u16 idx;
  105. /* disable Rx and Tx SA lookup */
  106. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  107. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  108. /* scrub the tables - split the loops for the max of the IP table */
  109. for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
  110. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  111. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  112. ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
  113. }
  114. for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
  115. ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
  116. ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
  117. }
  118. }
  119. /**
  120. * ixgbe_ipsec_stop_data
  121. * @adapter: board private structure
  122. **/
  123. static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
  124. {
  125. struct ixgbe_hw *hw = &adapter->hw;
  126. bool link = adapter->link_up;
  127. u32 t_rdy, r_rdy;
  128. u32 limit;
  129. u32 reg;
  130. /* halt data paths */
  131. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  132. reg |= IXGBE_SECTXCTRL_TX_DIS;
  133. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  134. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  135. reg |= IXGBE_SECRXCTRL_RX_DIS;
  136. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  137. /* If both Tx and Rx are ready there are no packets
  138. * that we need to flush so the loopback configuration
  139. * below is not necessary.
  140. */
  141. t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  142. IXGBE_SECTXSTAT_SECTX_RDY;
  143. r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  144. IXGBE_SECRXSTAT_SECRX_RDY;
  145. if (t_rdy && r_rdy)
  146. return;
  147. /* If the tx fifo doesn't have link, but still has data,
  148. * we can't clear the tx sec block. Set the MAC loopback
  149. * before block clear
  150. */
  151. if (!link) {
  152. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  153. reg |= IXGBE_MACC_FLU;
  154. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  155. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  156. reg |= IXGBE_HLREG0_LPBK;
  157. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  158. IXGBE_WRITE_FLUSH(hw);
  159. mdelay(3);
  160. }
  161. /* wait for the paths to empty */
  162. limit = 20;
  163. do {
  164. mdelay(10);
  165. t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  166. IXGBE_SECTXSTAT_SECTX_RDY;
  167. r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  168. IXGBE_SECRXSTAT_SECRX_RDY;
  169. } while (!(t_rdy && r_rdy) && limit--);
  170. /* undo loopback if we played with it earlier */
  171. if (!link) {
  172. reg = IXGBE_READ_REG(hw, IXGBE_MACC);
  173. reg &= ~IXGBE_MACC_FLU;
  174. IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
  175. reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  176. reg &= ~IXGBE_HLREG0_LPBK;
  177. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
  178. IXGBE_WRITE_FLUSH(hw);
  179. }
  180. }
  181. /**
  182. * ixgbe_ipsec_stop_engine
  183. * @adapter: board private structure
  184. **/
  185. static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
  186. {
  187. struct ixgbe_hw *hw = &adapter->hw;
  188. u32 reg;
  189. ixgbe_ipsec_stop_data(adapter);
  190. /* disable Rx and Tx SA lookup */
  191. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
  192. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
  193. /* disable the Rx and Tx engines and full packet store-n-forward */
  194. reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
  195. reg |= IXGBE_SECTXCTRL_SECTX_DIS;
  196. reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
  197. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
  198. reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
  199. reg |= IXGBE_SECRXCTRL_SECRX_DIS;
  200. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
  201. /* restore the "tx security buffer almost full threshold" to 0x250 */
  202. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
  203. /* Set minimum IFG between packets back to the default 0x1 */
  204. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  205. reg = (reg & 0xfffffff0) | 0x1;
  206. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  207. /* final set for normal (no ipsec offload) processing */
  208. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
  209. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
  210. IXGBE_WRITE_FLUSH(hw);
  211. }
  212. /**
  213. * ixgbe_ipsec_start_engine
  214. * @adapter: board private structure
  215. *
  216. * NOTE: this increases power consumption whether being used or not
  217. **/
  218. static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
  219. {
  220. struct ixgbe_hw *hw = &adapter->hw;
  221. u32 reg;
  222. ixgbe_ipsec_stop_data(adapter);
  223. /* Set minimum IFG between packets to 3 */
  224. reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  225. reg = (reg & 0xfffffff0) | 0x3;
  226. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
  227. /* Set "tx security buffer almost full threshold" to 0x15 so that the
  228. * almost full indication is generated only after buffer contains at
  229. * least an entire jumbo packet.
  230. */
  231. reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
  232. reg = (reg & 0xfffffc00) | 0x15;
  233. IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
  234. /* restart the data paths by clearing the DISABLE bits */
  235. IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
  236. IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
  237. /* enable Rx and Tx SA lookup */
  238. IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
  239. IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
  240. IXGBE_WRITE_FLUSH(hw);
  241. }
  242. /**
  243. * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
  244. * @adapter: board private structure
  245. **/
  246. void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
  247. {
  248. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  249. struct ixgbe_hw *hw = &adapter->hw;
  250. int i;
  251. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
  252. return;
  253. /* clean up and restart the engine */
  254. ixgbe_ipsec_stop_engine(adapter);
  255. ixgbe_ipsec_clear_hw_tables(adapter);
  256. ixgbe_ipsec_start_engine(adapter);
  257. /* reload the IP addrs */
  258. for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
  259. struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
  260. if (ipsa->used)
  261. ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
  262. }
  263. /* reload the Rx and Tx keys */
  264. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  265. struct rx_sa *rsa = &ipsec->rx_tbl[i];
  266. struct tx_sa *tsa = &ipsec->tx_tbl[i];
  267. if (rsa->used)
  268. ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
  269. rsa->key, rsa->salt,
  270. rsa->mode, rsa->iptbl_ind);
  271. if (tsa->used)
  272. ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
  273. }
  274. }
  275. /**
  276. * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
  277. * @ipsec: pointer to ipsec struct
  278. * @rxtable: true if we need to look in the Rx table
  279. *
  280. * Returns the first unused index in either the Rx or Tx SA table
  281. **/
  282. static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
  283. {
  284. u32 i;
  285. if (rxtable) {
  286. if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  287. return -ENOSPC;
  288. /* search rx sa table */
  289. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  290. if (!ipsec->rx_tbl[i].used)
  291. return i;
  292. }
  293. } else {
  294. if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  295. return -ENOSPC;
  296. /* search tx sa table */
  297. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  298. if (!ipsec->tx_tbl[i].used)
  299. return i;
  300. }
  301. }
  302. return -ENOSPC;
  303. }
  304. /**
  305. * ixgbe_ipsec_find_rx_state - find the state that matches
  306. * @ipsec: pointer to ipsec struct
  307. * @daddr: inbound address to match
  308. * @proto: protocol to match
  309. * @spi: SPI to match
  310. * @ip4: true if using an ipv4 address
  311. *
  312. * Returns a pointer to the matching SA state information
  313. **/
  314. static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
  315. __be32 *daddr, u8 proto,
  316. __be32 spi, bool ip4)
  317. {
  318. struct rx_sa *rsa;
  319. struct xfrm_state *ret = NULL;
  320. rcu_read_lock();
  321. hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
  322. (__force u32)spi) {
  323. if (spi == rsa->xs->id.spi &&
  324. ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
  325. (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
  326. sizeof(rsa->xs->id.daddr.a6)))) &&
  327. proto == rsa->xs->id.proto) {
  328. ret = rsa->xs;
  329. xfrm_state_hold(ret);
  330. break;
  331. }
  332. }
  333. rcu_read_unlock();
  334. return ret;
  335. }
  336. /**
  337. * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
  338. * @xs: pointer to xfrm_state struct
  339. * @mykey: pointer to key array to populate
  340. * @mysalt: pointer to salt value to populate
  341. *
  342. * This copies the protocol keys and salt to our own data tables. The
  343. * 82599 family only supports the one algorithm.
  344. **/
  345. static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
  346. u32 *mykey, u32 *mysalt)
  347. {
  348. struct net_device *dev = xs->xso.dev;
  349. unsigned char *key_data;
  350. char *alg_name = NULL;
  351. const char aes_gcm_name[] = "rfc4106(gcm(aes))";
  352. int key_len;
  353. if (!xs->aead) {
  354. netdev_err(dev, "Unsupported IPsec algorithm\n");
  355. return -EINVAL;
  356. }
  357. if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
  358. netdev_err(dev, "IPsec offload requires %d bit authentication\n",
  359. IXGBE_IPSEC_AUTH_BITS);
  360. return -EINVAL;
  361. }
  362. key_data = &xs->aead->alg_key[0];
  363. key_len = xs->aead->alg_key_len;
  364. alg_name = xs->aead->alg_name;
  365. if (strcmp(alg_name, aes_gcm_name)) {
  366. netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
  367. aes_gcm_name);
  368. return -EINVAL;
  369. }
  370. /* The key bytes come down in a bigendian array of bytes, so
  371. * we don't need to do any byteswapping.
  372. * 160 accounts for 16 byte key and 4 byte salt
  373. */
  374. if (key_len == 160) {
  375. *mysalt = ((u32 *)key_data)[4];
  376. } else if (key_len != 128) {
  377. netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
  378. return -EINVAL;
  379. } else {
  380. netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
  381. *mysalt = 0;
  382. }
  383. memcpy(mykey, key_data, 16);
  384. return 0;
  385. }
  386. /**
  387. * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
  388. * @xs: pointer to transformer state struct
  389. **/
  390. static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
  391. {
  392. struct net_device *dev = xs->xso.dev;
  393. struct ixgbe_adapter *adapter = netdev_priv(dev);
  394. struct ixgbe_hw *hw = &adapter->hw;
  395. u32 mfval, manc, reg;
  396. int num_filters = 4;
  397. bool manc_ipv4;
  398. u32 bmcipval;
  399. int i, j;
  400. #define MANC_EN_IPV4_FILTER BIT(24)
  401. #define MFVAL_IPV4_FILTER_SHIFT 16
  402. #define MFVAL_IPV6_FILTER_SHIFT 24
  403. #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
  404. #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
  405. #define IXGBE_BMCIPVAL 0x5060
  406. #define BMCIP_V4 0x2
  407. #define BMCIP_V6 0x3
  408. #define BMCIP_MASK 0x3
  409. manc = IXGBE_READ_REG(hw, IXGBE_MANC);
  410. manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
  411. mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
  412. bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
  413. if (xs->props.family == AF_INET) {
  414. /* are there any IPv4 filters to check? */
  415. if (manc_ipv4) {
  416. /* the 4 ipv4 filters are all in MIPAF(3, i) */
  417. for (i = 0; i < num_filters; i++) {
  418. if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
  419. continue;
  420. reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
  421. if (reg == xs->id.daddr.a4)
  422. return 1;
  423. }
  424. }
  425. if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
  426. reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
  427. if (reg == xs->id.daddr.a4)
  428. return 1;
  429. }
  430. } else {
  431. /* if there are ipv4 filters, they are in the last ipv6 slot */
  432. if (manc_ipv4)
  433. num_filters = 3;
  434. for (i = 0; i < num_filters; i++) {
  435. if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
  436. continue;
  437. for (j = 0; j < 4; j++) {
  438. reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
  439. if (reg != xs->id.daddr.a6[j])
  440. break;
  441. }
  442. if (j == 4) /* did we match all 4 words? */
  443. return 1;
  444. }
  445. if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
  446. for (j = 0; j < 4; j++) {
  447. reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
  448. if (reg != xs->id.daddr.a6[j])
  449. break;
  450. }
  451. if (j == 4) /* did we match all 4 words? */
  452. return 1;
  453. }
  454. }
  455. return 0;
  456. }
  457. /**
  458. * ixgbe_ipsec_add_sa - program device with a security association
  459. * @xs: pointer to transformer state struct
  460. **/
  461. static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
  462. {
  463. struct net_device *dev = xs->xso.dev;
  464. struct ixgbe_adapter *adapter = netdev_priv(dev);
  465. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  466. struct ixgbe_hw *hw = &adapter->hw;
  467. int checked, match, first;
  468. u16 sa_idx;
  469. int ret;
  470. int i;
  471. if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
  472. netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
  473. xs->id.proto);
  474. return -EINVAL;
  475. }
  476. if (ixgbe_ipsec_check_mgmt_ip(xs)) {
  477. netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
  478. return -EINVAL;
  479. }
  480. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  481. struct rx_sa rsa;
  482. if (xs->calg) {
  483. netdev_err(dev, "Compression offload not supported\n");
  484. return -EINVAL;
  485. }
  486. /* find the first unused index */
  487. ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
  488. if (ret < 0) {
  489. netdev_err(dev, "No space for SA in Rx table!\n");
  490. return ret;
  491. }
  492. sa_idx = (u16)ret;
  493. memset(&rsa, 0, sizeof(rsa));
  494. rsa.used = true;
  495. rsa.xs = xs;
  496. if (rsa.xs->id.proto & IPPROTO_ESP)
  497. rsa.decrypt = xs->ealg || xs->aead;
  498. /* get the key and salt */
  499. ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
  500. if (ret) {
  501. netdev_err(dev, "Failed to get key data for Rx SA table\n");
  502. return ret;
  503. }
  504. /* get ip for rx sa table */
  505. if (xs->props.family == AF_INET6)
  506. memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
  507. else
  508. memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
  509. /* The HW does not have a 1:1 mapping from keys to IP addrs, so
  510. * check for a matching IP addr entry in the table. If the addr
  511. * already exists, use it; else find an unused slot and add the
  512. * addr. If one does not exist and there are no unused table
  513. * entries, fail the request.
  514. */
  515. /* Find an existing match or first not used, and stop looking
  516. * after we've checked all we know we have.
  517. */
  518. checked = 0;
  519. match = -1;
  520. first = -1;
  521. for (i = 0;
  522. i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
  523. (checked < ipsec->num_rx_sa || first < 0);
  524. i++) {
  525. if (ipsec->ip_tbl[i].used) {
  526. if (!memcmp(ipsec->ip_tbl[i].ipaddr,
  527. rsa.ipaddr, sizeof(rsa.ipaddr))) {
  528. match = i;
  529. break;
  530. }
  531. checked++;
  532. } else if (first < 0) {
  533. first = i; /* track the first empty seen */
  534. }
  535. }
  536. if (ipsec->num_rx_sa == 0)
  537. first = 0;
  538. if (match >= 0) {
  539. /* addrs are the same, we should use this one */
  540. rsa.iptbl_ind = match;
  541. ipsec->ip_tbl[match].ref_cnt++;
  542. } else if (first >= 0) {
  543. /* no matches, but here's an empty slot */
  544. rsa.iptbl_ind = first;
  545. memcpy(ipsec->ip_tbl[first].ipaddr,
  546. rsa.ipaddr, sizeof(rsa.ipaddr));
  547. ipsec->ip_tbl[first].ref_cnt = 1;
  548. ipsec->ip_tbl[first].used = true;
  549. ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
  550. } else {
  551. /* no match and no empty slot */
  552. netdev_err(dev, "No space for SA in Rx IP SA table\n");
  553. memset(&rsa, 0, sizeof(rsa));
  554. return -ENOSPC;
  555. }
  556. rsa.mode = IXGBE_RXMOD_VALID;
  557. if (rsa.xs->id.proto & IPPROTO_ESP)
  558. rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
  559. if (rsa.decrypt)
  560. rsa.mode |= IXGBE_RXMOD_DECRYPT;
  561. if (rsa.xs->props.family == AF_INET6)
  562. rsa.mode |= IXGBE_RXMOD_IPV6;
  563. /* the preparations worked, so save the info */
  564. memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
  565. ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
  566. rsa.salt, rsa.mode, rsa.iptbl_ind);
  567. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
  568. ipsec->num_rx_sa++;
  569. /* hash the new entry for faster search in Rx path */
  570. hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
  571. (__force u32)rsa.xs->id.spi);
  572. } else {
  573. struct tx_sa tsa;
  574. if (adapter->num_vfs &&
  575. adapter->bridge_mode != BRIDGE_MODE_VEPA)
  576. return -EOPNOTSUPP;
  577. /* find the first unused index */
  578. ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
  579. if (ret < 0) {
  580. netdev_err(dev, "No space for SA in Tx table\n");
  581. return ret;
  582. }
  583. sa_idx = (u16)ret;
  584. memset(&tsa, 0, sizeof(tsa));
  585. tsa.used = true;
  586. tsa.xs = xs;
  587. if (xs->id.proto & IPPROTO_ESP)
  588. tsa.encrypt = xs->ealg || xs->aead;
  589. ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
  590. if (ret) {
  591. netdev_err(dev, "Failed to get key data for Tx SA table\n");
  592. memset(&tsa, 0, sizeof(tsa));
  593. return ret;
  594. }
  595. /* the preparations worked, so save the info */
  596. memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
  597. ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
  598. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
  599. ipsec->num_tx_sa++;
  600. }
  601. /* enable the engine if not already warmed up */
  602. if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
  603. ixgbe_ipsec_start_engine(adapter);
  604. adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
  605. }
  606. return 0;
  607. }
  608. /**
  609. * ixgbe_ipsec_del_sa - clear out this specific SA
  610. * @xs: pointer to transformer state struct
  611. **/
  612. static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
  613. {
  614. struct net_device *dev = xs->xso.dev;
  615. struct ixgbe_adapter *adapter = netdev_priv(dev);
  616. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  617. struct ixgbe_hw *hw = &adapter->hw;
  618. u32 zerobuf[4] = {0, 0, 0, 0};
  619. u16 sa_idx;
  620. if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
  621. struct rx_sa *rsa;
  622. u8 ipi;
  623. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
  624. rsa = &ipsec->rx_tbl[sa_idx];
  625. if (!rsa->used) {
  626. netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
  627. sa_idx, xs->xso.offload_handle);
  628. return;
  629. }
  630. ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
  631. hash_del_rcu(&rsa->hlist);
  632. /* if the IP table entry is referenced by only this SA,
  633. * i.e. ref_cnt is only 1, clear the IP table entry as well
  634. */
  635. ipi = rsa->iptbl_ind;
  636. if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
  637. ipsec->ip_tbl[ipi].ref_cnt--;
  638. if (!ipsec->ip_tbl[ipi].ref_cnt) {
  639. memset(&ipsec->ip_tbl[ipi], 0,
  640. sizeof(struct rx_ip_sa));
  641. ixgbe_ipsec_set_rx_ip(hw, ipi,
  642. (__force __be32 *)zerobuf);
  643. }
  644. }
  645. memset(rsa, 0, sizeof(struct rx_sa));
  646. ipsec->num_rx_sa--;
  647. } else {
  648. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  649. if (!ipsec->tx_tbl[sa_idx].used) {
  650. netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
  651. sa_idx, xs->xso.offload_handle);
  652. return;
  653. }
  654. ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
  655. memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
  656. ipsec->num_tx_sa--;
  657. }
  658. /* if there are no SAs left, stop the engine to save energy */
  659. if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
  660. adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
  661. ixgbe_ipsec_stop_engine(adapter);
  662. }
  663. }
  664. /**
  665. * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
  666. * @skb: current data packet
  667. * @xs: pointer to transformer state struct
  668. **/
  669. static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
  670. {
  671. if (xs->props.family == AF_INET) {
  672. /* Offload with IPv4 options is not supported yet */
  673. if (ip_hdr(skb)->ihl != 5)
  674. return false;
  675. } else {
  676. /* Offload with IPv6 extension headers is not support yet */
  677. if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
  678. return false;
  679. }
  680. return true;
  681. }
  682. static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
  683. .xdo_dev_state_add = ixgbe_ipsec_add_sa,
  684. .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
  685. .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
  686. };
  687. /**
  688. * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
  689. * @tx_ring: outgoing context
  690. * @first: current data packet
  691. * @itd: ipsec Tx data for later use in building context descriptor
  692. **/
  693. int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
  694. struct ixgbe_tx_buffer *first,
  695. struct ixgbe_ipsec_tx_data *itd)
  696. {
  697. struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
  698. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  699. struct xfrm_state *xs;
  700. struct tx_sa *tsa;
  701. if (unlikely(!first->skb->sp->len)) {
  702. netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
  703. __func__, first->skb->sp->len);
  704. return 0;
  705. }
  706. xs = xfrm_input_state(first->skb);
  707. if (unlikely(!xs)) {
  708. netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
  709. __func__, xs);
  710. return 0;
  711. }
  712. itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  713. if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
  714. netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
  715. __func__, itd->sa_idx, xs->xso.offload_handle);
  716. return 0;
  717. }
  718. tsa = &ipsec->tx_tbl[itd->sa_idx];
  719. if (unlikely(!tsa->used)) {
  720. netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
  721. __func__, itd->sa_idx);
  722. return 0;
  723. }
  724. first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
  725. if (xs->id.proto == IPPROTO_ESP) {
  726. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
  727. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  728. if (first->protocol == htons(ETH_P_IP))
  729. itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
  730. /* The actual trailer length is authlen (16 bytes) plus
  731. * 2 bytes for the proto and the padlen values, plus
  732. * padlen bytes of padding. This ends up not the same
  733. * as the static value found in xs->props.trailer_len (21).
  734. *
  735. * ... but if we're doing GSO, don't bother as the stack
  736. * doesn't add a trailer for those.
  737. */
  738. if (!skb_is_gso(first->skb)) {
  739. /* The "correct" way to get the auth length would be
  740. * to use
  741. * authlen = crypto_aead_authsize(xs->data);
  742. * but since we know we only have one size to worry
  743. * about * we can let the compiler use the constant
  744. * and save us a few CPU cycles.
  745. */
  746. const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
  747. struct sk_buff *skb = first->skb;
  748. u8 padlen;
  749. int ret;
  750. ret = skb_copy_bits(skb, skb->len - (authlen + 2),
  751. &padlen, 1);
  752. if (unlikely(ret))
  753. return 0;
  754. itd->trailer_len = authlen + 2 + padlen;
  755. }
  756. }
  757. if (tsa->encrypt)
  758. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
  759. return 1;
  760. }
  761. /**
  762. * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
  763. * @rx_ring: receiving ring
  764. * @rx_desc: receive data descriptor
  765. * @skb: current data packet
  766. *
  767. * Determine if there was an ipsec encapsulation noticed, and if so set up
  768. * the resulting status for later in the receive stack.
  769. **/
  770. void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
  771. union ixgbe_adv_rx_desc *rx_desc,
  772. struct sk_buff *skb)
  773. {
  774. struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
  775. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  776. __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
  777. IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
  778. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  779. struct xfrm_offload *xo = NULL;
  780. struct xfrm_state *xs = NULL;
  781. struct ipv6hdr *ip6 = NULL;
  782. struct iphdr *ip4 = NULL;
  783. void *daddr;
  784. __be32 spi;
  785. u8 *c_hdr;
  786. u8 proto;
  787. /* Find the ip and crypto headers in the data.
  788. * We can assume no vlan header in the way, b/c the
  789. * hw won't recognize the IPsec packet and anyway the
  790. * currently vlan device doesn't support xfrm offload.
  791. */
  792. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
  793. ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
  794. daddr = &ip4->daddr;
  795. c_hdr = (u8 *)ip4 + ip4->ihl * 4;
  796. } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
  797. ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
  798. daddr = &ip6->daddr;
  799. c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
  800. } else {
  801. return;
  802. }
  803. switch (pkt_info & ipsec_pkt_types) {
  804. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
  805. spi = ((struct ip_auth_hdr *)c_hdr)->spi;
  806. proto = IPPROTO_AH;
  807. break;
  808. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
  809. spi = ((struct ip_esp_hdr *)c_hdr)->spi;
  810. proto = IPPROTO_ESP;
  811. break;
  812. default:
  813. return;
  814. }
  815. xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
  816. if (unlikely(!xs))
  817. return;
  818. skb->sp = secpath_dup(skb->sp);
  819. if (unlikely(!skb->sp))
  820. return;
  821. skb->sp->xvec[skb->sp->len++] = xs;
  822. skb->sp->olen++;
  823. xo = xfrm_offload(skb);
  824. xo->flags = CRYPTO_DONE;
  825. xo->status = CRYPTO_SUCCESS;
  826. adapter->rx_ipsec++;
  827. }
  828. /**
  829. * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
  830. * @adapter: board private structure
  831. **/
  832. void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
  833. {
  834. struct ixgbe_hw *hw = &adapter->hw;
  835. struct ixgbe_ipsec *ipsec;
  836. u32 t_dis, r_dis;
  837. size_t size;
  838. if (hw->mac.type == ixgbe_mac_82598EB)
  839. return;
  840. /* If there is no support for either Tx or Rx offload
  841. * we should not be advertising support for IPsec.
  842. */
  843. t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
  844. IXGBE_SECTXSTAT_SECTX_OFF_DIS;
  845. r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
  846. IXGBE_SECRXSTAT_SECRX_OFF_DIS;
  847. if (t_dis || r_dis)
  848. return;
  849. ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
  850. if (!ipsec)
  851. goto err1;
  852. hash_init(ipsec->rx_sa_list);
  853. size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  854. ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
  855. if (!ipsec->rx_tbl)
  856. goto err2;
  857. size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  858. ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
  859. if (!ipsec->tx_tbl)
  860. goto err2;
  861. size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
  862. ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
  863. if (!ipsec->ip_tbl)
  864. goto err2;
  865. ipsec->num_rx_sa = 0;
  866. ipsec->num_tx_sa = 0;
  867. adapter->ipsec = ipsec;
  868. ixgbe_ipsec_stop_engine(adapter);
  869. ixgbe_ipsec_clear_hw_tables(adapter);
  870. adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
  871. return;
  872. err2:
  873. kfree(ipsec->ip_tbl);
  874. kfree(ipsec->rx_tbl);
  875. kfree(ipsec->tx_tbl);
  876. kfree(ipsec);
  877. err1:
  878. netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
  879. }
  880. /**
  881. * ixgbe_stop_ipsec_offload - tear down the ipsec offload
  882. * @adapter: board private structure
  883. **/
  884. void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
  885. {
  886. struct ixgbe_ipsec *ipsec = adapter->ipsec;
  887. adapter->ipsec = NULL;
  888. if (ipsec) {
  889. kfree(ipsec->ip_tbl);
  890. kfree(ipsec->rx_tbl);
  891. kfree(ipsec->tx_tbl);
  892. kfree(ipsec);
  893. }
  894. }