cvmx-helper-pki.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018-2022 Marvell International Ltd.
  4. *
  5. * PKI helper functions.
  6. */
  7. #include <time.h>
  8. #include <log.h>
  9. #include <linux/delay.h>
  10. #include <mach/cvmx-regs.h>
  11. #include <mach/cvmx-csr.h>
  12. #include <mach/cvmx-bootmem.h>
  13. #include <mach/octeon-model.h>
  14. #include <mach/cvmx-fuse.h>
  15. #include <mach/octeon-feature.h>
  16. #include <mach/cvmx-qlm.h>
  17. #include <mach/octeon_qlm.h>
  18. #include <mach/cvmx-pcie.h>
  19. #include <mach/cvmx-coremask.h>
  20. #include <mach/cvmx-agl-defs.h>
  21. #include <mach/cvmx-bgxx-defs.h>
  22. #include <mach/cvmx-ciu-defs.h>
  23. #include <mach/cvmx-gmxx-defs.h>
  24. #include <mach/cvmx-gserx-defs.h>
  25. #include <mach/cvmx-ilk-defs.h>
  26. #include <mach/cvmx-ipd-defs.h>
  27. #include <mach/cvmx-pexp-defs.h>
  28. #include <mach/cvmx-pcsx-defs.h>
  29. #include <mach/cvmx-pcsxx-defs.h>
  30. #include <mach/cvmx-pki-defs.h>
  31. #include <mach/cvmx-pko-defs.h>
  32. #include <mach/cvmx-sli-defs.h>
  33. #include <mach/cvmx-xcv-defs.h>
  34. #include <mach/cvmx-hwpko.h>
  35. #include <mach/cvmx-ilk.h>
  36. #include <mach/cvmx-pki.h>
  37. #include <mach/cvmx-helper.h>
  38. #include <mach/cvmx-helper-board.h>
  39. #include <mach/cvmx-helper-cfg.h>
  40. #include <mach/cvmx-helper-pki.h>
  41. #include <mach/cvmx-global-resources.h>
  42. #include <mach/cvmx-pko-internal-ports-range.h>
  43. #include <mach/cvmx-ilk.h>
  44. #include <mach/cvmx-pip.h>
  45. static int pki_helper_debug;
  46. bool cvmx_pki_dflt_init[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] = 1 };
  47. static bool cvmx_pki_dflt_bp_en[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] =
  48. true };
  49. static struct cvmx_pki_cluster_grp_config pki_dflt_clgrp[CVMX_MAX_NODES] = {
  50. { 0, 0xf },
  51. { 0, 0xf }
  52. };
  53. struct cvmx_pki_pool_config pki_dflt_pool[CVMX_MAX_NODES] = {
  54. [0 ... CVMX_MAX_NODES -
  55. 1] = { .pool_num = -1, .buffer_size = 2048, .buffer_count = 0 }
  56. };
  57. struct cvmx_pki_aura_config pki_dflt_aura[CVMX_MAX_NODES] = {
  58. [0 ... CVMX_MAX_NODES -
  59. 1] = { .aura_num = 0, .pool_num = -1, .buffer_count = 0 }
  60. };
  61. struct cvmx_pki_style_config pki_dflt_style[CVMX_MAX_NODES] = {
  62. [0 ... CVMX_MAX_NODES - 1] = { .parm_cfg = { .lenerr_en = 1,
  63. .maxerr_en = 1,
  64. .minerr_en = 1,
  65. .fcs_strip = 1,
  66. .fcs_chk = 1,
  67. .first_skip = 40,
  68. .mbuff_size = 2048 } }
  69. };
  70. struct cvmx_pki_sso_grp_config pki_dflt_sso_grp[CVMX_MAX_NODES];
  71. struct cvmx_pki_qpg_config pki_dflt_qpg[CVMX_MAX_NODES];
  72. struct cvmx_pki_pkind_config pki_dflt_pkind[CVMX_MAX_NODES];
  73. u64 pkind_style_map[CVMX_MAX_NODES][CVMX_PKI_NUM_PKIND] = {
  74. [0 ... CVMX_MAX_NODES -
  75. 1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  76. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
  77. 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
  78. 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }
  79. };
  80. /* To store the qos watcher values before they are written to pcam when watcher
  81. * is enabled. There is no cvmx-pip.c file exist so it ended up here
  82. */
  83. struct cvmx_pki_legacy_qos_watcher qos_watcher[8];
  84. /** @INTERNAL
  85. * This function setsup default ltype map
  86. * @param node node number
  87. */
  88. void __cvmx_helper_pki_set_dflt_ltype_map(int node)
  89. {
  90. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NONE,
  91. CVMX_PKI_BELTYPE_NONE);
  92. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ENET,
  93. CVMX_PKI_BELTYPE_MISC);
  94. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_VLAN,
  95. CVMX_PKI_BELTYPE_MISC);
  96. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SNAP_PAYLD,
  97. CVMX_PKI_BELTYPE_MISC);
  98. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ARP,
  99. CVMX_PKI_BELTYPE_MISC);
  100. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_RARP,
  101. CVMX_PKI_BELTYPE_MISC);
  102. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4,
  103. CVMX_PKI_BELTYPE_IP4);
  104. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4_OPT,
  105. CVMX_PKI_BELTYPE_IP4);
  106. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6,
  107. CVMX_PKI_BELTYPE_IP6);
  108. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6_OPT,
  109. CVMX_PKI_BELTYPE_IP6);
  110. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPSEC_ESP,
  111. CVMX_PKI_BELTYPE_MISC);
  112. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPFRAG,
  113. CVMX_PKI_BELTYPE_MISC);
  114. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPCOMP,
  115. CVMX_PKI_BELTYPE_MISC);
  116. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_TCP,
  117. CVMX_PKI_BELTYPE_TCP);
  118. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP,
  119. CVMX_PKI_BELTYPE_UDP);
  120. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SCTP,
  121. CVMX_PKI_BELTYPE_SCTP);
  122. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP_VXLAN,
  123. CVMX_PKI_BELTYPE_UDP);
  124. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GRE,
  125. CVMX_PKI_BELTYPE_MISC);
  126. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NVGRE,
  127. CVMX_PKI_BELTYPE_MISC);
  128. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GTP,
  129. CVMX_PKI_BELTYPE_MISC);
  130. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW28,
  131. CVMX_PKI_BELTYPE_MISC);
  132. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW29,
  133. CVMX_PKI_BELTYPE_MISC);
  134. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW30,
  135. CVMX_PKI_BELTYPE_MISC);
  136. cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW31,
  137. CVMX_PKI_BELTYPE_MISC);
  138. }
  139. /** @INTERNAL
  140. * This function installs the default VLAN entries to identify
  141. * the VLAN and set WQE[vv], WQE[vs] if VLAN is found. In 78XX
  142. * hardware (PKI) is not hardwired to recognize any 802.1Q VLAN
  143. * Ethertypes
  144. *
  145. * @param node node number
  146. */
  147. int __cvmx_helper_pki_install_dflt_vlan(int node)
  148. {
  149. struct cvmx_pki_pcam_input pcam_input;
  150. struct cvmx_pki_pcam_action pcam_action;
  151. enum cvmx_pki_term field;
  152. int index;
  153. int bank;
  154. u64 cl_mask = CVMX_PKI_CLUSTER_ALL;
  155. memset(&pcam_input, 0, sizeof(pcam_input));
  156. memset(&pcam_action, 0, sizeof(pcam_action));
  157. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
  158. /* PKI-20858 */
  159. int i;
  160. for (i = 0; i < 4; i++) {
  161. union cvmx_pki_clx_ecc_ctl ecc_ctl;
  162. ecc_ctl.u64 =
  163. csr_rd_node(node, CVMX_PKI_CLX_ECC_CTL(i));
  164. ecc_ctl.s.pcam_en = 0;
  165. ecc_ctl.s.pcam0_cdis = 1;
  166. ecc_ctl.s.pcam1_cdis = 1;
  167. csr_wr_node(node, CVMX_PKI_CLX_ECC_CTL(i), ecc_ctl.u64);
  168. }
  169. }
  170. for (field = CVMX_PKI_PCAM_TERM_ETHTYPE0;
  171. field < CVMX_PKI_PCAM_TERM_ETHTYPE2; field++) {
  172. bank = field & 0x01;
  173. index = cvmx_pki_pcam_entry_alloc(
  174. node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
  175. if (index < 0) {
  176. debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
  177. node, bank);
  178. return -1;
  179. }
  180. pcam_input.style = 0;
  181. pcam_input.style_mask = 0;
  182. pcam_input.field = field;
  183. pcam_input.field_mask = 0xfd;
  184. pcam_input.data = 0x81000000;
  185. pcam_input.data_mask = 0xffff0000;
  186. pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
  187. pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_VLAN;
  188. pcam_action.style_add = 0;
  189. pcam_action.pointer_advance = 4;
  190. cvmx_pki_pcam_write_entry(
  191. node, index, cl_mask, pcam_input,
  192. pcam_action); /*cluster_mask in pass2*/
  193. index = cvmx_pki_pcam_entry_alloc(
  194. node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
  195. if (index < 0) {
  196. debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
  197. node, bank);
  198. return -1;
  199. }
  200. pcam_input.data = 0x88a80000;
  201. cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
  202. pcam_action);
  203. index = cvmx_pki_pcam_entry_alloc(
  204. node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
  205. if (index < 0) {
  206. debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
  207. node, bank);
  208. return -1;
  209. }
  210. pcam_input.data = 0x92000000;
  211. cvmx_pki_pcam_write_entry(
  212. node, index, cl_mask, pcam_input,
  213. pcam_action); /* cluster_mask in pass2*/
  214. index = cvmx_pki_pcam_entry_alloc(
  215. node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
  216. if (index < 0) {
  217. debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
  218. node, bank);
  219. return -1;
  220. }
  221. pcam_input.data = 0x91000000;
  222. cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
  223. pcam_action);
  224. }
  225. return 0;
  226. }
  227. static int __cvmx_helper_setup_pki_cluster_groups(int node)
  228. {
  229. u64 cl_mask;
  230. int cl_group;
  231. cl_group =
  232. cvmx_pki_cluster_grp_alloc(node, pki_dflt_clgrp[node].grp_num);
  233. if (cl_group == CVMX_RESOURCE_ALLOC_FAILED)
  234. return -1;
  235. else if (cl_group == CVMX_RESOURCE_ALREADY_RESERVED) {
  236. if (pki_dflt_clgrp[node].grp_num == -1)
  237. return -1;
  238. else
  239. return 0; /* cluster already configured, share it */
  240. }
  241. cl_mask = pki_dflt_clgrp[node].cluster_mask;
  242. if (pki_helper_debug)
  243. debug("pki-helper: setup pki cluster grp %d with cl_mask 0x%llx\n",
  244. (int)cl_group, (unsigned long long)cl_mask);
  245. cvmx_pki_attach_cluster_to_group(node, cl_group, cl_mask);
  246. return 0;
  247. }
  248. /**
  249. * This function sets up pools/auras to be used by PKI
  250. * @param node node number
  251. */
  252. static int __cvmx_helper_pki_setup_fpa_pools(int node)
  253. {
  254. u64 buffer_count;
  255. u64 buffer_size;
  256. if (__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura))
  257. return 0; /* aura already configured, share it */
  258. buffer_count = pki_dflt_pool[node].buffer_count;
  259. buffer_size = pki_dflt_pool[node].buffer_size;
  260. if (buffer_count != 0) {
  261. pki_dflt_pool[node].pool = cvmx_fpa3_setup_fill_pool(
  262. node, pki_dflt_pool[node].pool_num, "PKI POOL DFLT",
  263. buffer_size, buffer_count, NULL);
  264. if (!__cvmx_fpa3_pool_valid(pki_dflt_pool[node].pool)) {
  265. cvmx_printf("ERROR: %s: Failed to allocate pool %d\n",
  266. __func__, pki_dflt_pool[node].pool_num);
  267. return -1;
  268. }
  269. pki_dflt_pool[node].pool_num = pki_dflt_pool[node].pool.lpool;
  270. if (pki_helper_debug)
  271. debug("%s pool %d with buffer size %d cnt %d\n",
  272. __func__, pki_dflt_pool[node].pool_num,
  273. (int)buffer_size, (int)buffer_count);
  274. pki_dflt_aura[node].pool_num = pki_dflt_pool[node].pool_num;
  275. pki_dflt_aura[node].pool = pki_dflt_pool[node].pool;
  276. }
  277. buffer_count = pki_dflt_aura[node].buffer_count;
  278. if (buffer_count != 0) {
  279. pki_dflt_aura[node].aura = cvmx_fpa3_set_aura_for_pool(
  280. pki_dflt_aura[node].pool, pki_dflt_aura[node].aura_num,
  281. "PKI DFLT AURA", buffer_size, buffer_count);
  282. if (!__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura)) {
  283. debug("ERROR: %sL Failed to allocate aura %d\n",
  284. __func__, pki_dflt_aura[node].aura_num);
  285. return -1;
  286. }
  287. }
  288. return 0;
  289. }
  290. static int __cvmx_helper_setup_pki_qpg_table(int node)
  291. {
  292. int offset;
  293. offset = cvmx_pki_qpg_entry_alloc(node, pki_dflt_qpg[node].qpg_base, 1);
  294. if (offset == CVMX_RESOURCE_ALLOC_FAILED)
  295. return -1;
  296. else if (offset == CVMX_RESOURCE_ALREADY_RESERVED)
  297. return 0; /* share the qpg table entry */
  298. if (pki_helper_debug)
  299. debug("pki-helper: set qpg entry at offset %d with port add %d aura %d grp_ok %d grp_bad %d\n",
  300. offset, pki_dflt_qpg[node].port_add,
  301. pki_dflt_qpg[node].aura_num, pki_dflt_qpg[node].grp_ok,
  302. pki_dflt_qpg[node].grp_bad);
  303. cvmx_pki_write_qpg_entry(node, offset, &pki_dflt_qpg[node]);
  304. return 0;
  305. }
  306. int __cvmx_helper_pki_port_setup(int node, int ipd_port)
  307. {
  308. int xiface, index;
  309. int pknd, style_num;
  310. int rs;
  311. struct cvmx_pki_pkind_config pkind_cfg;
  312. if (!cvmx_pki_dflt_init[node])
  313. return 0;
  314. xiface = cvmx_helper_get_interface_num(ipd_port);
  315. index = cvmx_helper_get_interface_index_num(ipd_port);
  316. pknd = cvmx_helper_get_pknd(xiface, index);
  317. style_num = pkind_style_map[node][pknd];
  318. /* try to reserve the style, if it is not configured already, reserve
  319. and configure it */
  320. rs = cvmx_pki_style_alloc(node, style_num);
  321. if (rs < 0) {
  322. if (rs == CVMX_RESOURCE_ALLOC_FAILED)
  323. return -1;
  324. } else {
  325. if (pki_helper_debug)
  326. debug("pki-helper: set style %d with default parameters\n",
  327. style_num);
  328. pkind_style_map[node][pknd] = style_num;
  329. /* configure style with default parameters */
  330. cvmx_pki_write_style_config(node, style_num,
  331. CVMX_PKI_CLUSTER_ALL,
  332. &pki_dflt_style[node]);
  333. }
  334. if (pki_helper_debug)
  335. debug("pki-helper: set pkind %d with initial style %d\n", pknd,
  336. style_num);
  337. /* write pkind configuration */
  338. pkind_cfg = pki_dflt_pkind[node];
  339. pkind_cfg.initial_style = style_num;
  340. cvmx_pki_write_pkind_config(node, pknd, &pkind_cfg);
  341. return 0;
  342. }
  343. int __cvmx_helper_pki_global_setup(int node)
  344. {
  345. __cvmx_helper_pki_set_dflt_ltype_map(node);
  346. if (!cvmx_pki_dflt_init[node])
  347. return 0;
  348. /* Setup the packet pools*/
  349. __cvmx_helper_pki_setup_fpa_pools(node);
  350. /*set up default cluster*/
  351. __cvmx_helper_setup_pki_cluster_groups(node);
  352. //__cvmx_helper_pki_setup_sso_groups(node);
  353. __cvmx_helper_setup_pki_qpg_table(node);
  354. /*
  355. * errata PKI-19103 backward compat has only 1 aura
  356. * no head line blocking
  357. */
  358. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
  359. cvmx_pki_buf_ctl_t buf_ctl;
  360. buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
  361. buf_ctl.s.fpa_wait = 1;
  362. csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
  363. }
  364. return 0;
  365. }
  366. /**
  367. * This function Enabled the PKI hardware to
  368. * start accepting/processing packets.
  369. *
  370. * @param node node number
  371. */
  372. void cvmx_helper_pki_enable(int node)
  373. {
  374. if (pki_helper_debug)
  375. debug("enable PKI on node %d\n", node);
  376. __cvmx_helper_pki_install_dflt_vlan(node);
  377. cvmx_pki_setup_clusters(node);
  378. if (cvmx_pki_dflt_bp_en[node])
  379. cvmx_pki_enable_backpressure(node);
  380. cvmx_pki_parse_enable(node, 0);
  381. cvmx_pki_enable(node);
  382. }
  383. /**
  384. * This function setups the qos table by allocating qpg entry and writing
  385. * the provided parameters to that entry (offset).
  386. * @param node node number.
  387. * @param qpg_cfg pointer to struct containing qpg configuration
  388. */
  389. int cvmx_helper_pki_set_qpg_entry(int node, struct cvmx_pki_qpg_config *qpg_cfg)
  390. {
  391. int offset;
  392. offset = cvmx_pki_qpg_entry_alloc(node, qpg_cfg->qpg_base, 1);
  393. if (pki_helper_debug)
  394. debug("pki-helper:set qpg entry at offset %d\n", offset);
  395. if (offset == CVMX_RESOURCE_ALREADY_RESERVED) {
  396. debug("INFO:setup_qpg_table: offset %d already reserved\n",
  397. qpg_cfg->qpg_base);
  398. return CVMX_RESOURCE_ALREADY_RESERVED;
  399. } else if (offset == CVMX_RESOURCE_ALLOC_FAILED) {
  400. debug("ERROR:setup_qpg_table: no more entries available\n");
  401. return CVMX_RESOURCE_ALLOC_FAILED;
  402. }
  403. qpg_cfg->qpg_base = offset;
  404. cvmx_pki_write_qpg_entry(node, offset, qpg_cfg);
  405. return offset;
  406. }
  407. /**
  408. * This function gets all the PKI parameters related to that
  409. * particular port from hardware.
  410. * @param xipd_port xipd_port port number with node to get parameter of
  411. * @param port_cfg pointer to structure where to store read parameters
  412. */
  413. void cvmx_pki_get_port_config(int xipd_port,
  414. struct cvmx_pki_port_config *port_cfg)
  415. {
  416. int xiface, index, pknd;
  417. int style, cl_mask;
  418. cvmx_pki_icgx_cfg_t pki_cl_msk;
  419. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
  420. /* get the pkind used by this ipd port */
  421. xiface = cvmx_helper_get_interface_num(xipd_port);
  422. index = cvmx_helper_get_interface_index_num(xipd_port);
  423. pknd = cvmx_helper_get_pknd(xiface, index);
  424. cvmx_pki_read_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg);
  425. style = port_cfg->pkind_cfg.initial_style;
  426. pki_cl_msk.u64 = csr_rd_node(
  427. xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
  428. cl_mask = pki_cl_msk.s.clusters;
  429. cvmx_pki_read_style_config(xp.node, style, cl_mask,
  430. &port_cfg->style_cfg);
  431. }
  432. /**
  433. * This function sets all the PKI parameters related to that
  434. * particular port in hardware.
  435. * @param xipd_port ipd port number with node to get parameter of
  436. * @param port_cfg pointer to structure containing port parameters
  437. */
  438. void cvmx_pki_set_port_config(int xipd_port,
  439. struct cvmx_pki_port_config *port_cfg)
  440. {
  441. int xiface, index, pknd;
  442. int style, cl_mask;
  443. cvmx_pki_icgx_cfg_t pki_cl_msk;
  444. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
  445. /* get the pkind used by this ipd port */
  446. xiface = cvmx_helper_get_interface_num(xipd_port);
  447. index = cvmx_helper_get_interface_index_num(xipd_port);
  448. pknd = cvmx_helper_get_pknd(xiface, index);
  449. if (cvmx_pki_write_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg))
  450. return;
  451. style = port_cfg->pkind_cfg.initial_style;
  452. pki_cl_msk.u64 = csr_rd_node(
  453. xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
  454. cl_mask = pki_cl_msk.s.clusters;
  455. cvmx_pki_write_style_config(xp.node, style, cl_mask,
  456. &port_cfg->style_cfg);
  457. }
  458. /**
  459. * This function sets up all th eports of particular interface
  460. * for chosen fcs mode. (only use for backward compatibility).
  461. * New application can control it via init_interface calls.
  462. * @param node node number.
  463. * @param interface interface number.
  464. * @param nports number of ports
  465. * @param has_fcs 1 -- enable fcs check and fcs strip.
  466. * 0 -- disable fcs check.
  467. */
  468. void cvmx_helper_pki_set_fcs_op(int node, int interface, int nports,
  469. int has_fcs)
  470. {
  471. int xiface, index;
  472. int pknd;
  473. unsigned int cluster = 0;
  474. cvmx_pki_clx_pkindx_cfg_t pkind_cfg;
  475. xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  476. for (index = 0; index < nports; index++) {
  477. pknd = cvmx_helper_get_pknd(xiface, index);
  478. while (cluster < CVMX_PKI_NUM_CLUSTER) {
  479. /*find the cluster in use pass2*/
  480. pkind_cfg.u64 = csr_rd_node(
  481. node, CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster));
  482. pkind_cfg.s.fcs_pres = has_fcs;
  483. csr_wr_node(node,
  484. CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster),
  485. pkind_cfg.u64);
  486. cluster++;
  487. }
  488. /* make sure fcs_strip and fcs_check is also enable/disable
  489. * for the style used by that port
  490. */
  491. cvmx_pki_endis_fcs_check(node, pknd, has_fcs, has_fcs);
  492. cluster = 0;
  493. }
  494. }