cvmx-pko3-queue.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018-2022 Marvell International Ltd.
  4. */
  5. #include <errno.h>
  6. #include <log.h>
  7. #include <time.h>
  8. #include <linux/delay.h>
  9. #include <mach/cvmx-regs.h>
  10. #include <mach/cvmx-csr.h>
  11. #include <mach/cvmx-bootmem.h>
  12. #include <mach/octeon-model.h>
  13. #include <mach/cvmx-fuse.h>
  14. #include <mach/octeon-feature.h>
  15. #include <mach/cvmx-qlm.h>
  16. #include <mach/octeon_qlm.h>
  17. #include <mach/cvmx-pcie.h>
  18. #include <mach/cvmx-coremask.h>
  19. #include <mach/cvmx-agl-defs.h>
  20. #include <mach/cvmx-bgxx-defs.h>
  21. #include <mach/cvmx-ciu-defs.h>
  22. #include <mach/cvmx-gmxx-defs.h>
  23. #include <mach/cvmx-gserx-defs.h>
  24. #include <mach/cvmx-ilk-defs.h>
  25. #include <mach/cvmx-ipd-defs.h>
  26. #include <mach/cvmx-pcsx-defs.h>
  27. #include <mach/cvmx-pcsxx-defs.h>
  28. #include <mach/cvmx-pki-defs.h>
  29. #include <mach/cvmx-pko-defs.h>
  30. #include <mach/cvmx-xcv-defs.h>
  31. #include <mach/cvmx-hwpko.h>
  32. #include <mach/cvmx-ilk.h>
  33. #include <mach/cvmx-pki.h>
  34. #include <mach/cvmx-pko3.h>
  35. #include <mach/cvmx-pko3-queue.h>
  36. #include <mach/cvmx-pko3-resources.h>
  37. #include <mach/cvmx-helper.h>
  38. #include <mach/cvmx-helper-board.h>
  39. #include <mach/cvmx-helper-cfg.h>
  40. #include <mach/cvmx-helper-bgx.h>
  41. #include <mach/cvmx-helper-cfg.h>
  42. #include <mach/cvmx-helper-util.h>
  43. #include <mach/cvmx-helper-pki.h>
  44. /* Smalles Round-Robin quantum to use +1 */
  45. #define CVMX_PKO3_RR_QUANTUM_MIN 0x10
  46. static int debug; /* 1 for basic, 2 for detailed trace */
  47. struct cvmx_pko3_dq {
  48. unsigned dq_count : 6; /* Number of descriptor queues */
  49. unsigned dq_base : 10; /* Descriptor queue start number */
  50. #define CVMX_PKO3_SWIZZLE_IPD 0x0
  51. };
  52. /*
  53. * @INTERNAL
  54. * Descriptor Queue to IPD port mapping table.
  55. *
  56. * This pointer is per-core, contains the virtual address
  57. * of a global named block which has 2^12 entries per each
  58. * possible node.
  59. */
  60. struct cvmx_pko3_dq *__cvmx_pko3_dq_table;
  61. int cvmx_pko3_get_queue_base(int ipd_port)
  62. {
  63. struct cvmx_pko3_dq *dq_table;
  64. int ret = -1;
  65. unsigned int i;
  66. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  67. /* get per-node table */
  68. if (cvmx_unlikely(!__cvmx_pko3_dq_table))
  69. __cvmx_pko3_dq_table_setup();
  70. i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
  71. /* get per-node table */
  72. dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
  73. if (cvmx_likely(dq_table[i].dq_count > 0))
  74. ret = xp.node << 10 | dq_table[i].dq_base;
  75. else if (debug)
  76. cvmx_printf("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
  77. ipd_port);
  78. return ret;
  79. }
  80. int cvmx_pko3_get_queue_num(int ipd_port)
  81. {
  82. struct cvmx_pko3_dq *dq_table;
  83. int ret = -1;
  84. unsigned int i;
  85. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  86. /* get per-node table */
  87. if (cvmx_unlikely(!__cvmx_pko3_dq_table))
  88. __cvmx_pko3_dq_table_setup();
  89. i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
  90. /* get per-node table */
  91. dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
  92. if (cvmx_likely(dq_table[i].dq_count > 0))
  93. ret = dq_table[i].dq_count;
  94. else if (debug)
  95. debug("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
  96. ipd_port);
  97. return ret;
  98. }
  99. /**
  100. * @INTERNAL
  101. *
  102. * Initialize port/dq table contents
  103. */
  104. static void __cvmx_pko3_dq_table_init(void *ptr)
  105. {
  106. unsigned int size = sizeof(struct cvmx_pko3_dq) *
  107. CVMX_PKO3_IPD_NUM_MAX * CVMX_MAX_NODES;
  108. memset(ptr, 0, size);
  109. }
  110. /**
  111. * @INTERNAL
  112. *
  113. * Find or allocate global port/dq map table
  114. * which is a named table, contains entries for
  115. * all possible OCI nodes.
  116. *
  117. * The table global pointer is stored in core-local variable
  118. * so that every core will call this function once, on first use.
  119. */
  120. int __cvmx_pko3_dq_table_setup(void)
  121. {
  122. void *ptr;
  123. ptr = cvmx_bootmem_alloc_named_range_once(
  124. /* size */
  125. sizeof(struct cvmx_pko3_dq) * CVMX_PKO3_IPD_NUM_MAX *
  126. CVMX_MAX_NODES,
  127. /* min_addr, max_addr, align */
  128. 0ull, 0ull, sizeof(struct cvmx_pko3_dq),
  129. /* name */
  130. "cvmx_pko3_global_dq_table", __cvmx_pko3_dq_table_init);
  131. if (debug)
  132. debug("%s: dq_table_ptr=%p\n", __func__, ptr);
  133. if (!ptr)
  134. return -1;
  135. __cvmx_pko3_dq_table = ptr;
  136. return 0;
  137. }
  138. /*
  139. * @INTERNAL
  140. * Register a range of Descriptor Queues with an interface port
  141. *
  142. * This function populates the DQ-to-IPD translation table
  143. * used by the application to retrieve the DQ range (typically ordered
  144. * by priority) for a given IPD-port, which is either a physical port,
  145. * or a channel on a channelized interface (i.e. ILK).
  146. *
  147. * @param xiface is the physical interface number
  148. * @param index is either a physical port on an interface
  149. * or a channel of an ILK interface
  150. * @param dq_base is the first Descriptor Queue number in a consecutive range
  151. * @param dq_count is the number of consecutive Descriptor Queues leading
  152. * the same channel or port.
  153. *
  154. * Only a consecutive range of Descriptor Queues can be associated with any
  155. * given channel/port, and usually they are ordered from most to least
  156. * in terms of scheduling priority.
  157. *
  158. * Note: thus function only populates the node-local translation table.
  159. * NOTE: This function would be cleaner if it had a single ipd_port argument
  160. *
  161. * @returns 0 on success, -1 on failure.
  162. */
  163. int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base,
  164. unsigned int dq_count)
  165. {
  166. struct cvmx_pko3_dq *dq_table;
  167. int ipd_port;
  168. unsigned int i;
  169. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  170. struct cvmx_xport xp;
  171. if (__cvmx_helper_xiface_is_null(xiface)) {
  172. ipd_port = cvmx_helper_node_to_ipd_port(xi.node,
  173. CVMX_PKO3_IPD_PORT_NULL);
  174. } else {
  175. int p;
  176. p = cvmx_helper_get_ipd_port(xiface, index);
  177. if (p < 0) {
  178. cvmx_printf("ERROR: %s: xiface %#x has no IPD port\n",
  179. __func__, xiface);
  180. return -1;
  181. }
  182. ipd_port = p;
  183. }
  184. xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  185. i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
  186. /* get per-node table */
  187. if (!__cvmx_pko3_dq_table)
  188. __cvmx_pko3_dq_table_setup();
  189. dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xi.node;
  190. if (debug)
  191. debug("%s: ipd_port=%#x ix=%#x dq %u cnt %u\n", __func__,
  192. ipd_port, i, dq_base, dq_count);
  193. /* Check the IPD port has not already been configured */
  194. if (dq_table[i].dq_count > 0) {
  195. cvmx_printf("%s: ERROR: IPD %#x already registered\n", __func__,
  196. ipd_port);
  197. return -1;
  198. }
  199. /* Store DQ# range in the queue lookup table */
  200. dq_table[i].dq_base = dq_base;
  201. dq_table[i].dq_count = dq_count;
  202. return 0;
  203. }
  204. /*
  205. * @INTERNAL
  206. * Convert normal CHAN_E (i.e. IPD port) value to compressed channel form
  207. * that is used to populate PKO_LUT.
  208. *
  209. * Note: This code may be model specific.
  210. */
  211. static int cvmx_pko3_chan_2_xchan(uint16_t ipd_port)
  212. {
  213. u16 xchan;
  214. u8 off;
  215. static const u8 *xchan_base;
  216. static const u8 xchan_base_cn78xx[16] = {
  217. /* IPD 0x000 */ 0x3c0 >> 4, /* LBK */
  218. /* IPD 0x100 */ 0x380 >> 4, /* DPI */
  219. /* IPD 0x200 */ 0xfff >> 4, /* not used */
  220. /* IPD 0x300 */ 0xfff >> 4, /* not used */
  221. /* IPD 0x400 */ 0x000 >> 4, /* ILK0 */
  222. /* IPD 0x500 */ 0x100 >> 4, /* ILK1 */
  223. /* IPD 0x600 */ 0xfff >> 4, /* not used */
  224. /* IPD 0x700 */ 0xfff >> 4, /* not used */
  225. /* IPD 0x800 */ 0x200 >> 4, /* BGX0 */
  226. /* IPD 0x900 */ 0x240 >> 4, /* BGX1 */
  227. /* IPD 0xa00 */ 0x280 >> 4, /* BGX2 */
  228. /* IPD 0xb00 */ 0x2c0 >> 4, /* BGX3 */
  229. /* IPD 0xc00 */ 0x300 >> 4, /* BGX4 */
  230. /* IPD 0xd00 */ 0x340 >> 4, /* BGX5 */
  231. /* IPD 0xe00 */ 0xfff >> 4, /* not used */
  232. /* IPD 0xf00 */ 0xfff >> 4 /* not used */
  233. };
  234. static const u8 xchan_base_cn73xx[16] = {
  235. /* IPD 0x000 */ 0x0c0 >> 4, /* LBK */
  236. /* IPD 0x100 */ 0x100 >> 4, /* DPI */
  237. /* IPD 0x200 */ 0xfff >> 4, /* not used */
  238. /* IPD 0x300 */ 0xfff >> 4, /* not used */
  239. /* IPD 0x400 */ 0xfff >> 4, /* not used */
  240. /* IPD 0x500 */ 0xfff >> 4, /* not used */
  241. /* IPD 0x600 */ 0xfff >> 4, /* not used */
  242. /* IPD 0x700 */ 0xfff >> 4, /* not used */
  243. /* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
  244. /* IPD 0x900 */ 0x040 >> 4, /* BGX1 */
  245. /* IPD 0xa00 */ 0x080 >> 4, /* BGX2 */
  246. /* IPD 0xb00 */ 0xfff >> 4, /* not used */
  247. /* IPD 0xc00 */ 0xfff >> 4, /* not used */
  248. /* IPD 0xd00 */ 0xfff >> 4, /* not used */
  249. /* IPD 0xe00 */ 0xfff >> 4, /* not used */
  250. /* IPD 0xf00 */ 0xfff >> 4 /* not used */
  251. };
  252. static const u8 xchan_base_cn75xx[16] = {
  253. /* IPD 0x000 */ 0x040 >> 4, /* LBK */
  254. /* IPD 0x100 */ 0x080 >> 4, /* DPI */
  255. /* IPD 0x200 */ 0xeee >> 4, /* SRIO0 noop */
  256. /* IPD 0x300 */ 0xfff >> 4, /* not used */
  257. /* IPD 0x400 */ 0xfff >> 4, /* not used */
  258. /* IPD 0x500 */ 0xfff >> 4, /* not used */
  259. /* IPD 0x600 */ 0xfff >> 4, /* not used */
  260. /* IPD 0x700 */ 0xfff >> 4, /* not used */
  261. /* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
  262. /* IPD 0x900 */ 0xfff >> 4, /* not used */
  263. /* IPD 0xa00 */ 0xfff >> 4, /* not used */
  264. /* IPD 0xb00 */ 0xfff >> 4, /* not used */
  265. /* IPD 0xc00 */ 0xfff >> 4, /* not used */
  266. /* IPD 0xd00 */ 0xfff >> 4, /* not used */
  267. /* IPD 0xe00 */ 0xfff >> 4, /* not used */
  268. /* IPD 0xf00 */ 0xfff >> 4 /* not used */
  269. };
  270. if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  271. xchan_base = xchan_base_cn73xx;
  272. if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  273. xchan_base = xchan_base_cn75xx;
  274. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  275. xchan_base = xchan_base_cn78xx;
  276. if (!xchan_base)
  277. return -1;
  278. xchan = ipd_port >> 8;
  279. /* ILKx, DPI has 8 bits logical channels, others just 6 */
  280. if (((xchan & 0xfe) == 0x04) || xchan == 0x01)
  281. off = ipd_port & 0xff;
  282. else
  283. off = ipd_port & 0x3f;
  284. xchan = xchan_base[xchan & 0xf];
  285. if (xchan == 0xff)
  286. return -1; /* Invalid IPD_PORT */
  287. else if (xchan == 0xee)
  288. return -2; /* LUT not used */
  289. else
  290. return (xchan << 4) | off;
  291. }
  292. /*
  293. * Map channel number in PKO
  294. *
  295. * @param node is to specify the node to which this configuration is applied.
  296. * @param pq_num specifies the Port Queue (i.e. L1) queue number.
  297. * @param l2_l3_q_num specifies L2/L3 queue number.
  298. * @param channel specifies the channel number to map to the queue.
  299. *
  300. * The channel assignment applies to L2 or L3 Shaper Queues depending
  301. * on the setting of channel credit level.
  302. *
  303. * @return returns none.
  304. */
  305. void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num,
  306. unsigned int l2_l3_q_num, uint16_t channel)
  307. {
  308. union cvmx_pko_l3_l2_sqx_channel sqx_channel;
  309. cvmx_pko_lutx_t lutx;
  310. int xchan;
  311. sqx_channel.u64 =
  312. csr_rd_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num));
  313. sqx_channel.s.cc_channel = channel;
  314. csr_wr_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num),
  315. sqx_channel.u64);
  316. /* Convert CHAN_E into compressed channel */
  317. xchan = cvmx_pko3_chan_2_xchan(channel);
  318. if (debug)
  319. debug("%s: ipd_port=%#x xchan=%#x\n", __func__, channel, xchan);
  320. if (xchan < 0) {
  321. if (xchan == -1)
  322. cvmx_printf("%s: ERROR: channel %#x not recognized\n",
  323. __func__, channel);
  324. return;
  325. }
  326. lutx.u64 = 0;
  327. lutx.s.valid = 1;
  328. lutx.s.pq_idx = pq_num;
  329. lutx.s.queue_number = l2_l3_q_num;
  330. csr_wr_node(node, CVMX_PKO_LUTX(xchan), lutx.u64);
  331. if (debug)
  332. debug("%s: channel %#x (compressed=%#x) mapped L2/L3 SQ=%u, PQ=%u\n",
  333. __func__, channel, xchan, l2_l3_q_num, pq_num);
  334. }
  335. /*
  336. * @INTERNAL
  337. * This function configures port queue scheduling and topology parameters
  338. * in hardware.
  339. *
  340. * @param node is to specify the node to which this configuration is applied.
  341. * @param port_queue is the port queue number to be configured.
  342. * @param mac_num is the mac number of the mac that will be tied to this port_queue.
  343. */
  344. static void cvmx_pko_configure_port_queue(int node, int port_queue, int mac_num)
  345. {
  346. cvmx_pko_l1_sqx_topology_t pko_l1_topology;
  347. cvmx_pko_l1_sqx_shape_t pko_l1_shape;
  348. cvmx_pko_l1_sqx_link_t pko_l1_link;
  349. pko_l1_topology.u64 = 0;
  350. pko_l1_topology.s.link = mac_num;
  351. csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(port_queue),
  352. pko_l1_topology.u64);
  353. pko_l1_shape.u64 = 0;
  354. pko_l1_shape.s.link = mac_num;
  355. csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(port_queue), pko_l1_shape.u64);
  356. pko_l1_link.u64 = 0;
  357. pko_l1_link.s.link = mac_num;
  358. csr_wr_node(node, CVMX_PKO_L1_SQX_LINK(port_queue), pko_l1_link.u64);
  359. }
  360. /*
  361. * @INTERNAL
  362. * This function configures level 2 queues scheduling and topology parameters
  363. * in hardware.
  364. *
  365. * @param node is to specify the node to which this configuration is applied.
  366. * @param queue is the level3 queue number to be configured.
  367. * @param parent_queue is the parent queue at next level for this l3 queue.
  368. * @param prio is this queue's priority in parent's scheduler.
  369. * @param rr_quantum is this queue's round robin quantum value.
  370. * @param child_base is the first child queue number in the static prioriy children.
  371. * @param child_rr_prio is the round robin children priority.
  372. */
  373. static void cvmx_pko_configure_l2_queue(int node, int queue, int parent_queue,
  374. int prio, int rr_quantum,
  375. int child_base, int child_rr_prio)
  376. {
  377. cvmx_pko_l2_sqx_schedule_t pko_sq_sched;
  378. cvmx_pko_l2_sqx_topology_t pko_child_topology;
  379. cvmx_pko_l1_sqx_topology_t pko_parent_topology;
  380. /* parent topology configuration */
  381. pko_parent_topology.u64 =
  382. csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue));
  383. pko_parent_topology.s.prio_anchor = child_base;
  384. pko_parent_topology.s.rr_prio = child_rr_prio;
  385. csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue),
  386. pko_parent_topology.u64);
  387. if (debug > 1)
  388. debug("CVMX_PKO_L1_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
  389. parent_queue, pko_parent_topology.s.prio_anchor,
  390. pko_parent_topology.s.link);
  391. /* scheduler configuration for this sq in the parent queue */
  392. pko_sq_sched.u64 = 0;
  393. pko_sq_sched.s.prio = prio;
  394. pko_sq_sched.s.rr_quantum = rr_quantum;
  395. csr_wr_node(node, CVMX_PKO_L2_SQX_SCHEDULE(queue), pko_sq_sched.u64);
  396. /* child topology configuration */
  397. pko_child_topology.u64 = 0;
  398. pko_child_topology.s.parent = parent_queue;
  399. csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(queue),
  400. pko_child_topology.u64);
  401. }
  402. /*
  403. * @INTERNAL
  404. * This function configures level 3 queues scheduling and topology parameters
  405. * in hardware.
  406. *
  407. * @param node is to specify the node to which this configuration is applied.
  408. * @param queue is the level3 queue number to be configured.
  409. * @param parent_queue is the parent queue at next level for this l3 queue.
  410. * @param prio is this queue's priority in parent's scheduler.
  411. * @param rr_quantum is this queue's round robin quantum value.
  412. * @param child_base is the first child queue number in the static prioriy children.
  413. * @param child_rr_prio is the round robin children priority.
  414. */
  415. static void cvmx_pko_configure_l3_queue(int node, int queue, int parent_queue,
  416. int prio, int rr_quantum,
  417. int child_base, int child_rr_prio)
  418. {
  419. cvmx_pko_l3_sqx_schedule_t pko_sq_sched;
  420. cvmx_pko_l3_sqx_topology_t pko_child_topology;
  421. cvmx_pko_l2_sqx_topology_t pko_parent_topology;
  422. /* parent topology configuration */
  423. pko_parent_topology.u64 =
  424. csr_rd_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue));
  425. pko_parent_topology.s.prio_anchor = child_base;
  426. pko_parent_topology.s.rr_prio = child_rr_prio;
  427. csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue),
  428. pko_parent_topology.u64);
  429. if (debug > 1)
  430. debug("CVMX_PKO_L2_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
  431. parent_queue, pko_parent_topology.s.prio_anchor,
  432. pko_parent_topology.s.parent);
  433. /* scheduler configuration for this sq in the parent queue */
  434. pko_sq_sched.u64 = 0;
  435. pko_sq_sched.s.prio = prio;
  436. pko_sq_sched.s.rr_quantum = rr_quantum;
  437. csr_wr_node(node, CVMX_PKO_L3_SQX_SCHEDULE(queue), pko_sq_sched.u64);
  438. /* child topology configuration */
  439. pko_child_topology.u64 = 0;
  440. pko_child_topology.s.parent = parent_queue;
  441. csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(queue),
  442. pko_child_topology.u64);
  443. }
  444. /*
  445. * @INTERNAL
  446. * This function configures level 4 queues scheduling and topology parameters
  447. * in hardware.
  448. *
  449. * @param node is to specify the node to which this configuration is applied.
  450. * @param queue is the level4 queue number to be configured.
  451. * @param parent_queue is the parent queue at next level for this l4 queue.
  452. * @param prio is this queue's priority in parent's scheduler.
  453. * @param rr_quantum is this queue's round robin quantum value.
  454. * @param child_base is the first child queue number in the static prioriy children.
  455. * @param child_rr_prio is the round robin children priority.
  456. */
  457. static void cvmx_pko_configure_l4_queue(int node, int queue, int parent_queue,
  458. int prio, int rr_quantum,
  459. int child_base, int child_rr_prio)
  460. {
  461. cvmx_pko_l4_sqx_schedule_t pko_sq_sched;
  462. cvmx_pko_l4_sqx_topology_t pko_child_topology;
  463. cvmx_pko_l3_sqx_topology_t pko_parent_topology;
  464. /* parent topology configuration */
  465. pko_parent_topology.u64 =
  466. csr_rd_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue));
  467. pko_parent_topology.s.prio_anchor = child_base;
  468. pko_parent_topology.s.rr_prio = child_rr_prio;
  469. csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue),
  470. pko_parent_topology.u64);
  471. if (debug > 1)
  472. debug("CVMX_PKO_L3_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
  473. parent_queue, pko_parent_topology.s.prio_anchor,
  474. pko_parent_topology.s.parent);
  475. /* scheduler configuration for this sq in the parent queue */
  476. pko_sq_sched.u64 = 0;
  477. pko_sq_sched.s.prio = prio;
  478. pko_sq_sched.s.rr_quantum = rr_quantum;
  479. csr_wr_node(node, CVMX_PKO_L4_SQX_SCHEDULE(queue), pko_sq_sched.u64);
  480. /* topology configuration */
  481. pko_child_topology.u64 = 0;
  482. pko_child_topology.s.parent = parent_queue;
  483. csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(queue),
  484. pko_child_topology.u64);
  485. }
  486. /*
  487. * @INTERNAL
  488. * This function configures level 5 queues scheduling and topology parameters
  489. * in hardware.
  490. *
  491. * @param node is to specify the node to which this configuration is applied.
  492. * @param queue is the level5 queue number to be configured.
  493. * @param parent_queue is the parent queue at next level for this l5 queue.
  494. * @param prio is this queue's priority in parent's scheduler.
  495. * @param rr_quantum is this queue's round robin quantum value.
  496. * @param child_base is the first child queue number in the static prioriy children.
  497. * @param child_rr_prio is the round robin children priority.
  498. */
  499. static void cvmx_pko_configure_l5_queue(int node, int queue, int parent_queue,
  500. int prio, int rr_quantum,
  501. int child_base, int child_rr_prio)
  502. {
  503. cvmx_pko_l5_sqx_schedule_t pko_sq_sched;
  504. cvmx_pko_l4_sqx_topology_t pko_parent_topology;
  505. cvmx_pko_l5_sqx_topology_t pko_child_topology;
  506. /* parent topology configuration */
  507. pko_parent_topology.u64 =
  508. csr_rd_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue));
  509. pko_parent_topology.s.prio_anchor = child_base;
  510. pko_parent_topology.s.rr_prio = child_rr_prio;
  511. csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue),
  512. pko_parent_topology.u64);
  513. if (debug > 1)
  514. debug("CVMX_PKO_L4_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
  515. parent_queue, pko_parent_topology.s.prio_anchor,
  516. pko_parent_topology.s.parent);
  517. /* scheduler configuration for this sq in the parent queue */
  518. pko_sq_sched.u64 = 0;
  519. pko_sq_sched.s.prio = prio;
  520. pko_sq_sched.s.rr_quantum = rr_quantum;
  521. csr_wr_node(node, CVMX_PKO_L5_SQX_SCHEDULE(queue), pko_sq_sched.u64);
  522. /* topology configuration */
  523. pko_child_topology.u64 = 0;
  524. pko_child_topology.s.parent = parent_queue;
  525. csr_wr_node(node, CVMX_PKO_L5_SQX_TOPOLOGY(queue),
  526. pko_child_topology.u64);
  527. }
  528. /*
  529. * @INTERNAL
  530. * This function configures descriptor queues scheduling and topology parameters
  531. * in hardware.
  532. *
  533. * @param node is to specify the node to which this configuration is applied.
  534. * @param dq is the descriptor queue number to be configured.
  535. * @param parent_queue is the parent queue at next level for this dq.
  536. * @param prio is this queue's priority in parent's scheduler.
  537. * @param rr_quantum is this queue's round robin quantum value.
  538. * @param child_base is the first child queue number in the static prioriy children.
  539. * @param child_rr_prio is the round robin children priority.
  540. */
  541. static void cvmx_pko_configure_dq(int node, int dq, int parent_queue, int prio,
  542. int rr_quantum, int child_base,
  543. int child_rr_prio)
  544. {
  545. cvmx_pko_dqx_schedule_t pko_dq_sched;
  546. cvmx_pko_dqx_topology_t pko_dq_topology;
  547. cvmx_pko_l5_sqx_topology_t pko_parent_topology;
  548. cvmx_pko_dqx_wm_ctl_t pko_dq_wm_ctl;
  549. unsigned long long parent_topology_reg;
  550. char lvl;
  551. if (debug)
  552. debug("%s: dq %u parent %u child_base %u\n", __func__, dq,
  553. parent_queue, child_base);
  554. if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L5_QUEUES) {
  555. parent_topology_reg = CVMX_PKO_L5_SQX_TOPOLOGY(parent_queue);
  556. lvl = 5;
  557. } else if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L3_QUEUES) {
  558. parent_topology_reg = CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue);
  559. lvl = 3;
  560. } else {
  561. return;
  562. }
  563. if (debug)
  564. debug("%s: parent_topology_reg=%#llx\n", __func__,
  565. parent_topology_reg);
  566. /* parent topology configuration */
  567. pko_parent_topology.u64 = csr_rd_node(node, parent_topology_reg);
  568. pko_parent_topology.s.prio_anchor = child_base;
  569. pko_parent_topology.s.rr_prio = child_rr_prio;
  570. csr_wr_node(node, parent_topology_reg, pko_parent_topology.u64);
  571. if (debug > 1)
  572. debug("CVMX_PKO_L%d_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
  573. lvl, parent_queue, pko_parent_topology.s.prio_anchor,
  574. pko_parent_topology.s.parent);
  575. /* scheduler configuration for this dq in the parent queue */
  576. pko_dq_sched.u64 = 0;
  577. pko_dq_sched.s.prio = prio;
  578. pko_dq_sched.s.rr_quantum = rr_quantum;
  579. csr_wr_node(node, CVMX_PKO_DQX_SCHEDULE(dq), pko_dq_sched.u64);
  580. /* topology configuration */
  581. pko_dq_topology.u64 = 0;
  582. pko_dq_topology.s.parent = parent_queue;
  583. csr_wr_node(node, CVMX_PKO_DQX_TOPOLOGY(dq), pko_dq_topology.u64);
  584. /* configure for counting packets, not bytes at this level */
  585. pko_dq_wm_ctl.u64 = 0;
  586. pko_dq_wm_ctl.s.kind = 1;
  587. pko_dq_wm_ctl.s.enable = 0;
  588. csr_wr_node(node, CVMX_PKO_DQX_WM_CTL(dq), pko_dq_wm_ctl.u64);
  589. if (debug > 1) {
  590. pko_dq_sched.u64 = csr_rd_node(node, CVMX_PKO_DQX_SCHEDULE(dq));
  591. pko_dq_topology.u64 =
  592. csr_rd_node(node, CVMX_PKO_DQX_TOPOLOGY(dq));
  593. debug("CVMX_PKO_DQX_TOPOLOGY(%u)PARENT=%u CVMX_PKO_DQX_SCHEDULE(%u) PRIO=%u Q=%u\n",
  594. dq, pko_dq_topology.s.parent, dq, pko_dq_sched.s.prio,
  595. pko_dq_sched.s.rr_quantum);
  596. }
  597. }
  598. /*
  599. * @INTERNAL
  600. * The following structure selects the Scheduling Queue configuration
  601. * routine for each of the supported levels.
  602. * The initial content of the table will be setup in accordance
  603. * to the specific SoC model and its implemented resources
  604. */
  605. struct pko3_cfg_tab_s {
  606. /* function pointer for to configure the given level, last=DQ */
  607. struct {
  608. u8 parent_level;
  609. void (*cfg_sq_func)(int node, int queue, int parent_queue,
  610. int prio, int rr_quantum, int child_base,
  611. int child_rr_prio);
  612. //XXX for debugging exagerated size
  613. } lvl[256];
  614. };
  615. static const struct pko3_cfg_tab_s pko3_cn78xx_cfg = {
  616. { [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
  617. cvmx_pko_configure_l2_queue },
  618. [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
  619. cvmx_pko_configure_l3_queue },
  620. [CVMX_PKO_L4_QUEUES] = { CVMX_PKO_L3_QUEUES,
  621. cvmx_pko_configure_l4_queue },
  622. [CVMX_PKO_L5_QUEUES] = { CVMX_PKO_L4_QUEUES,
  623. cvmx_pko_configure_l5_queue },
  624. [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L5_QUEUES,
  625. cvmx_pko_configure_dq } }
  626. };
  627. static const struct pko3_cfg_tab_s pko3_cn73xx_cfg = {
  628. { [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
  629. cvmx_pko_configure_l2_queue },
  630. [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
  631. cvmx_pko_configure_l3_queue },
  632. [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L3_QUEUES,
  633. cvmx_pko_configure_dq } }
  634. };
  635. /*
  636. * Configure Port Queue and its children Scheduler Queue
  637. *
  638. * Port Queues (a.k.a L1) are assigned 1-to-1 to MACs.
  639. * L2 Scheduler Queues are used for specifying channels, and thus there
  640. * could be multiple L2 SQs attached to a single L1 PQ, either in a
  641. * fair round-robin scheduling, or with static and/or round-robin priorities.
  642. *
  643. * @param node on which to operate
  644. * @param mac_num is the LMAC number to that is associated with the Port Queue,
  645. * @param pq_num is the number of the L1 PQ attached to the MAC
  646. *
  647. * @returns 0 on success, -1 on failure.
  648. */
  649. int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num,
  650. unsigned int pq_num)
  651. {
  652. char b1[10];
  653. if (debug)
  654. debug("%s: MAC%u -> %s\n", __func__, mac_num,
  655. __cvmx_pko3_sq_str(b1, CVMX_PKO_PORT_QUEUES, pq_num));
  656. cvmx_pko_configure_port_queue(node, pq_num, mac_num);
  657. return 0;
  658. }
  659. /*
  660. * Configure L3 through L5 Scheduler Queues and Descriptor Queues
  661. *
  662. * The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are
  663. * configured one-to-one or many-to-one to a single parent Scheduler
  664. * Queues. The level of the parent SQ is specified in an argument,
  665. * as well as the number of children to attach to the specific parent.
  666. * The children can have fair round-robin or priority-based scheduling
  667. * when multiple children are assigned a single parent.
  668. *
  669. * @param node on which to operate
  670. * @param child_level is the level of the child queue
  671. * @param parent_queue is the number of the parent Scheduler Queue
  672. * @param child_base is the number of the first child SQ or DQ to assign to
  673. * @param child_count is the number of consecutive children to assign
  674. * @param stat_prio_count is the priority setting for the children L2 SQs
  675. *
  676. * If <stat_prio_count> is -1, the Ln children will have equal Round-Robin
  677. * relationship with eachother. If <stat_prio_count> is 0, all Ln children
  678. * will be arranged in Weighted-Round-Robin, with the first having the most
  679. * precedence. If <stat_prio_count> is between 1 and 8, it indicates how
  680. * many children will have static priority settings (with the first having
  681. * the most precedence), with the remaining Ln children having WRR scheduling.
  682. *
  683. * @returns 0 on success, -1 on failure.
  684. *
  685. * Note: this function supports the configuration of node-local unit.
  686. */
  687. int cvmx_pko3_sq_config_children(unsigned int node,
  688. enum cvmx_pko3_level_e child_level,
  689. unsigned int parent_queue,
  690. unsigned int child_base,
  691. unsigned int child_count, int stat_prio_count)
  692. {
  693. enum cvmx_pko3_level_e parent_level;
  694. unsigned int num_elem = 0;
  695. unsigned int rr_quantum, rr_count;
  696. unsigned int child, prio, rr_prio;
  697. const struct pko3_cfg_tab_s *cfg_tbl = NULL;
  698. char b1[10], b2[10];
  699. if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
  700. num_elem = NUM_ELEMENTS(pko3_cn78xx_cfg.lvl);
  701. cfg_tbl = &pko3_cn78xx_cfg;
  702. }
  703. if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  704. num_elem = NUM_ELEMENTS(pko3_cn73xx_cfg.lvl);
  705. cfg_tbl = &pko3_cn73xx_cfg;
  706. }
  707. if (!cfg_tbl || child_level >= num_elem) {
  708. cvmx_printf("ERROR: %s: model or level %#x invalid\n", __func__,
  709. child_level);
  710. return -1;
  711. }
  712. parent_level = cfg_tbl->lvl[child_level].parent_level;
  713. if (!cfg_tbl->lvl[child_level].cfg_sq_func ||
  714. cfg_tbl->lvl[child_level].parent_level == 0) {
  715. cvmx_printf("ERROR: %s: queue level %#x invalid\n", __func__,
  716. child_level);
  717. return -1;
  718. }
  719. /* First static priority is 0 - top precedence */
  720. prio = 0;
  721. if (stat_prio_count > (signed int)child_count)
  722. stat_prio_count = child_count;
  723. /* Valid PRIO field is 0..9, limit maximum static priorities */
  724. if (stat_prio_count > 9)
  725. stat_prio_count = 9;
  726. /* Special case of a single child */
  727. if (child_count == 1) {
  728. rr_count = 0;
  729. rr_prio = 0xF;
  730. /* Special case for Fair-RR */
  731. } else if (stat_prio_count < 0) {
  732. rr_count = child_count;
  733. rr_prio = 0;
  734. } else {
  735. rr_count = child_count - stat_prio_count;
  736. rr_prio = stat_prio_count;
  737. }
  738. /* Compute highest RR_QUANTUM */
  739. if (stat_prio_count > 0)
  740. rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN * rr_count;
  741. else
  742. rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN;
  743. if (debug)
  744. debug("%s: Parent %s child_base %u rr_pri %u\n", __func__,
  745. __cvmx_pko3_sq_str(b1, parent_level, parent_queue),
  746. child_base, rr_prio);
  747. /* Parent is configured with child */
  748. for (child = child_base; child < (child_base + child_count); child++) {
  749. if (debug)
  750. debug("%s: Child %s of %s prio %u rr_quantum %#x\n",
  751. __func__,
  752. __cvmx_pko3_sq_str(b1, child_level, child),
  753. __cvmx_pko3_sq_str(b2, parent_level,
  754. parent_queue),
  755. prio, rr_quantum);
  756. cfg_tbl->lvl[child_level].cfg_sq_func(node, child, parent_queue,
  757. prio, rr_quantum,
  758. child_base, rr_prio);
  759. if (prio < rr_prio)
  760. prio++;
  761. else if (stat_prio_count > 0)
  762. rr_quantum -= CVMX_PKO3_RR_QUANTUM_MIN;
  763. } /* for child */
  764. return 0;
  765. }