mvpp2_prs.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Header Parser helpers for Marvell PPv2 Network Controller
  4. *
  5. * Copyright (C) 2014 Marvell
  6. *
  7. * Marcin Wojtas <mw@semihalf.com>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/platform_device.h>
  13. #include <uapi/linux/ppp_defs.h>
  14. #include <net/ip.h>
  15. #include <net/ipv6.h>
  16. #include "mvpp2.h"
  17. #include "mvpp2_prs.h"
  18. /* Update parser tcam and sram hw entries */
  19. static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  20. {
  21. int i;
  22. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  23. return -EINVAL;
  24. /* Clear entry invalidation bit */
  25. pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  26. /* Write sram index - indirect access */
  27. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  28. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  29. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
  30. /* Write tcam index - indirect access */
  31. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  32. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  33. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
  34. return 0;
  35. }
  36. /* Initialize tcam entry from hw */
  37. int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
  38. int tid)
  39. {
  40. int i;
  41. if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  42. return -EINVAL;
  43. memset(pe, 0, sizeof(*pe));
  44. pe->index = tid;
  45. /* Write tcam index - indirect access */
  46. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  47. pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  48. MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  49. if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  50. return MVPP2_PRS_TCAM_ENTRY_INVALID;
  51. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  52. pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  53. /* Write sram index - indirect access */
  54. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  55. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  56. pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  57. return 0;
  58. }
  59. /* Invalidate tcam hw entry */
  60. static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  61. {
  62. /* Write index - indirect access */
  63. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  64. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  65. MVPP2_PRS_TCAM_INV_MASK);
  66. }
  67. /* Enable shadow table entry and set its lookup ID */
  68. static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  69. {
  70. priv->prs_shadow[index].valid = true;
  71. priv->prs_shadow[index].lu = lu;
  72. }
  73. /* Update ri fields in shadow table entry */
  74. static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  75. unsigned int ri, unsigned int ri_mask)
  76. {
  77. priv->prs_shadow[index].ri_mask = ri_mask;
  78. priv->prs_shadow[index].ri = ri;
  79. }
  80. /* Update lookup field in tcam sw entry */
  81. static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
  82. {
  83. pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
  84. pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
  85. pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
  86. pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
  87. }
  88. /* Update mask for single port in tcam sw entry */
  89. static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
  90. unsigned int port, bool add)
  91. {
  92. if (add)
  93. pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
  94. else
  95. pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
  96. }
  97. /* Update port map in tcam sw entry */
  98. static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
  99. unsigned int ports)
  100. {
  101. pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
  102. pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
  103. pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
  104. }
  105. /* Obtain port map from tcam sw entry */
  106. unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
  107. {
  108. return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
  109. }
  110. /* Set byte of data and its enable bits in tcam sw entry */
  111. static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
  112. unsigned int offs, unsigned char byte,
  113. unsigned char enable)
  114. {
  115. int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
  116. pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
  117. pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
  118. pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
  119. pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
  120. }
  121. /* Get byte of data and its enable bits from tcam sw entry */
  122. void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
  123. unsigned int offs, unsigned char *byte,
  124. unsigned char *enable)
  125. {
  126. int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
  127. *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
  128. *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
  129. }
  130. /* Compare tcam data bytes with a pattern */
  131. static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
  132. u16 data)
  133. {
  134. u16 tcam_data;
  135. tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
  136. return tcam_data == data;
  137. }
  138. /* Update ai bits in tcam sw entry */
  139. static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
  140. unsigned int bits, unsigned int enable)
  141. {
  142. int i;
  143. for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
  144. if (!(enable & BIT(i)))
  145. continue;
  146. if (bits & BIT(i))
  147. pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
  148. else
  149. pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
  150. }
  151. pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
  152. }
  153. /* Get ai bits from tcam sw entry */
  154. static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
  155. {
  156. return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
  157. }
  158. /* Set ethertype in tcam sw entry */
  159. static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
  160. unsigned short ethertype)
  161. {
  162. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
  163. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
  164. }
  165. /* Set vid in tcam sw entry */
  166. static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
  167. unsigned short vid)
  168. {
  169. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
  170. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
  171. }
  172. /* Set bits in sram sw entry */
  173. static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
  174. u32 val)
  175. {
  176. pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
  177. }
  178. /* Clear bits in sram sw entry */
  179. static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
  180. u32 val)
  181. {
  182. pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
  183. }
  184. /* Update ri bits in sram sw entry */
  185. static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
  186. unsigned int bits, unsigned int mask)
  187. {
  188. unsigned int i;
  189. for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
  190. if (!(mask & BIT(i)))
  191. continue;
  192. if (bits & BIT(i))
  193. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
  194. 1);
  195. else
  196. mvpp2_prs_sram_bits_clear(pe,
  197. MVPP2_PRS_SRAM_RI_OFFS + i,
  198. 1);
  199. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
  200. }
  201. }
  202. /* Obtain ri bits from sram sw entry */
  203. static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
  204. {
  205. return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
  206. }
  207. /* Update ai bits in sram sw entry */
  208. static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
  209. unsigned int bits, unsigned int mask)
  210. {
  211. unsigned int i;
  212. for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
  213. if (!(mask & BIT(i)))
  214. continue;
  215. if (bits & BIT(i))
  216. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
  217. 1);
  218. else
  219. mvpp2_prs_sram_bits_clear(pe,
  220. MVPP2_PRS_SRAM_AI_OFFS + i,
  221. 1);
  222. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
  223. }
  224. }
  225. /* Read ai bits from sram sw entry */
  226. static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
  227. {
  228. u8 bits;
  229. /* ai is stored on bits 90->97; so it spreads across two u32 */
  230. int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
  231. int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
  232. bits = (pe->sram[ai_off] >> ai_shift) |
  233. (pe->sram[ai_off + 1] << (32 - ai_shift));
  234. return bits;
  235. }
  236. /* In sram sw entry set lookup ID field of the tcam key to be used in the next
  237. * lookup interation
  238. */
  239. static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
  240. unsigned int lu)
  241. {
  242. int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
  243. mvpp2_prs_sram_bits_clear(pe, sram_next_off,
  244. MVPP2_PRS_SRAM_NEXT_LU_MASK);
  245. mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
  246. }
  247. /* In the sram sw entry set sign and value of the next lookup offset
  248. * and the offset value generated to the classifier
  249. */
  250. static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
  251. unsigned int op)
  252. {
  253. /* Set sign */
  254. if (shift < 0) {
  255. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  256. shift = 0 - shift;
  257. } else {
  258. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  259. }
  260. /* Set value */
  261. pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
  262. shift & MVPP2_PRS_SRAM_SHIFT_MASK;
  263. /* Reset and set operation */
  264. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
  265. MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
  266. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
  267. /* Set base offset as current */
  268. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  269. }
  270. /* In the sram sw entry set sign and value of the user defined offset
  271. * generated to the classifier
  272. */
  273. static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
  274. unsigned int type, int offset,
  275. unsigned int op)
  276. {
  277. /* Set sign */
  278. if (offset < 0) {
  279. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  280. offset = 0 - offset;
  281. } else {
  282. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  283. }
  284. /* Set value */
  285. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
  286. MVPP2_PRS_SRAM_UDF_MASK);
  287. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
  288. offset & MVPP2_PRS_SRAM_UDF_MASK);
  289. /* Set offset type */
  290. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
  291. MVPP2_PRS_SRAM_UDF_TYPE_MASK);
  292. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
  293. /* Set offset operation */
  294. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
  295. MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
  296. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
  297. op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
  298. /* Set base offset as current */
  299. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  300. }
  301. /* Find parser flow entry */
  302. static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
  303. {
  304. struct mvpp2_prs_entry pe;
  305. int tid;
  306. /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
  307. for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
  308. u8 bits;
  309. if (!priv->prs_shadow[tid].valid ||
  310. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
  311. continue;
  312. mvpp2_prs_init_from_hw(priv, &pe, tid);
  313. bits = mvpp2_prs_sram_ai_get(&pe);
  314. /* Sram store classification lookup ID in AI bits [5:0] */
  315. if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
  316. return tid;
  317. }
  318. return -ENOENT;
  319. }
  320. /* Return first free tcam index, seeking from start to end */
  321. static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
  322. unsigned char end)
  323. {
  324. int tid;
  325. if (start > end)
  326. swap(start, end);
  327. if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
  328. end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
  329. for (tid = start; tid <= end; tid++) {
  330. if (!priv->prs_shadow[tid].valid)
  331. return tid;
  332. }
  333. return -EINVAL;
  334. }
  335. /* Drop flow control pause frames */
  336. static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
  337. {
  338. unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
  339. struct mvpp2_prs_entry pe;
  340. unsigned int len;
  341. memset(&pe, 0, sizeof(pe));
  342. /* For all ports - drop flow control frames */
  343. pe.index = MVPP2_PE_FC_DROP;
  344. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  345. /* Set match on DA */
  346. len = ETH_ALEN;
  347. while (len--)
  348. mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
  349. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  350. MVPP2_PRS_RI_DROP_MASK);
  351. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  352. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  353. /* Mask all ports */
  354. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  355. /* Update shadow table and hw entry */
  356. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  357. mvpp2_prs_hw_write(priv, &pe);
  358. }
  359. /* Enable/disable dropping all mac da's */
  360. static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
  361. {
  362. struct mvpp2_prs_entry pe;
  363. if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
  364. /* Entry exist - update port only */
  365. mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
  366. } else {
  367. /* Entry doesn't exist - create new */
  368. memset(&pe, 0, sizeof(pe));
  369. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  370. pe.index = MVPP2_PE_DROP_ALL;
  371. /* Non-promiscuous mode for all ports - DROP unknown packets */
  372. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  373. MVPP2_PRS_RI_DROP_MASK);
  374. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  375. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  376. /* Update shadow table */
  377. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  378. /* Mask all ports */
  379. mvpp2_prs_tcam_port_map_set(&pe, 0);
  380. }
  381. /* Update port mask */
  382. mvpp2_prs_tcam_port_set(&pe, port, add);
  383. mvpp2_prs_hw_write(priv, &pe);
  384. }
  385. /* Set port to unicast or multicast promiscuous mode */
  386. void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
  387. enum mvpp2_prs_l2_cast l2_cast, bool add)
  388. {
  389. struct mvpp2_prs_entry pe;
  390. unsigned char cast_match;
  391. unsigned int ri;
  392. int tid;
  393. if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
  394. cast_match = MVPP2_PRS_UCAST_VAL;
  395. tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
  396. ri = MVPP2_PRS_RI_L2_UCAST;
  397. } else {
  398. cast_match = MVPP2_PRS_MCAST_VAL;
  399. tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
  400. ri = MVPP2_PRS_RI_L2_MCAST;
  401. }
  402. /* promiscuous mode - Accept unknown unicast or multicast packets */
  403. if (priv->prs_shadow[tid].valid) {
  404. mvpp2_prs_init_from_hw(priv, &pe, tid);
  405. } else {
  406. memset(&pe, 0, sizeof(pe));
  407. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  408. pe.index = tid;
  409. /* Continue - set next lookup */
  410. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  411. /* Set result info bits */
  412. mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
  413. /* Match UC or MC addresses */
  414. mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
  415. MVPP2_PRS_CAST_MASK);
  416. /* Shift to ethertype */
  417. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  418. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  419. /* Mask all ports */
  420. mvpp2_prs_tcam_port_map_set(&pe, 0);
  421. /* Update shadow table */
  422. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  423. }
  424. /* Update port mask */
  425. mvpp2_prs_tcam_port_set(&pe, port, add);
  426. mvpp2_prs_hw_write(priv, &pe);
  427. }
  428. /* Set entry for dsa packets */
  429. static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
  430. bool tagged, bool extend)
  431. {
  432. struct mvpp2_prs_entry pe;
  433. int tid, shift;
  434. if (extend) {
  435. tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
  436. shift = 8;
  437. } else {
  438. tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
  439. shift = 4;
  440. }
  441. if (priv->prs_shadow[tid].valid) {
  442. /* Entry exist - update port only */
  443. mvpp2_prs_init_from_hw(priv, &pe, tid);
  444. } else {
  445. /* Entry doesn't exist - create new */
  446. memset(&pe, 0, sizeof(pe));
  447. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  448. pe.index = tid;
  449. /* Update shadow table */
  450. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  451. if (tagged) {
  452. /* Set tagged bit in DSA tag */
  453. mvpp2_prs_tcam_data_byte_set(&pe, 0,
  454. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  455. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  456. /* Set ai bits for next iteration */
  457. if (extend)
  458. mvpp2_prs_sram_ai_update(&pe, 1,
  459. MVPP2_PRS_SRAM_AI_MASK);
  460. else
  461. mvpp2_prs_sram_ai_update(&pe, 0,
  462. MVPP2_PRS_SRAM_AI_MASK);
  463. /* Set result info bits to 'single vlan' */
  464. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
  465. MVPP2_PRS_RI_VLAN_MASK);
  466. /* If packet is tagged continue check vid filtering */
  467. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
  468. } else {
  469. /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
  470. mvpp2_prs_sram_shift_set(&pe, shift,
  471. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  472. /* Set result info bits to 'no vlans' */
  473. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  474. MVPP2_PRS_RI_VLAN_MASK);
  475. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  476. }
  477. /* Mask all ports */
  478. mvpp2_prs_tcam_port_map_set(&pe, 0);
  479. }
  480. /* Update port mask */
  481. mvpp2_prs_tcam_port_set(&pe, port, add);
  482. mvpp2_prs_hw_write(priv, &pe);
  483. }
  484. /* Set entry for dsa ethertype */
  485. static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
  486. bool add, bool tagged, bool extend)
  487. {
  488. struct mvpp2_prs_entry pe;
  489. int tid, shift, port_mask;
  490. if (extend) {
  491. tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
  492. MVPP2_PE_ETYPE_EDSA_UNTAGGED;
  493. port_mask = 0;
  494. shift = 8;
  495. } else {
  496. tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
  497. MVPP2_PE_ETYPE_DSA_UNTAGGED;
  498. port_mask = MVPP2_PRS_PORT_MASK;
  499. shift = 4;
  500. }
  501. if (priv->prs_shadow[tid].valid) {
  502. /* Entry exist - update port only */
  503. mvpp2_prs_init_from_hw(priv, &pe, tid);
  504. } else {
  505. /* Entry doesn't exist - create new */
  506. memset(&pe, 0, sizeof(pe));
  507. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  508. pe.index = tid;
  509. /* Set ethertype */
  510. mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
  511. mvpp2_prs_match_etype(&pe, 2, 0);
  512. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
  513. MVPP2_PRS_RI_DSA_MASK);
  514. /* Shift ethertype + 2 byte reserved + tag*/
  515. mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
  516. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  517. /* Update shadow table */
  518. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  519. if (tagged) {
  520. /* Set tagged bit in DSA tag */
  521. mvpp2_prs_tcam_data_byte_set(&pe,
  522. MVPP2_ETH_TYPE_LEN + 2 + 3,
  523. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  524. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  525. /* Clear all ai bits for next iteration */
  526. mvpp2_prs_sram_ai_update(&pe, 0,
  527. MVPP2_PRS_SRAM_AI_MASK);
  528. /* If packet is tagged continue check vlans */
  529. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  530. } else {
  531. /* Set result info bits to 'no vlans' */
  532. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  533. MVPP2_PRS_RI_VLAN_MASK);
  534. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  535. }
  536. /* Mask/unmask all ports, depending on dsa type */
  537. mvpp2_prs_tcam_port_map_set(&pe, port_mask);
  538. }
  539. /* Update port mask */
  540. mvpp2_prs_tcam_port_set(&pe, port, add);
  541. mvpp2_prs_hw_write(priv, &pe);
  542. }
  543. /* Search for existing single/triple vlan entry */
  544. static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
  545. {
  546. struct mvpp2_prs_entry pe;
  547. int tid;
  548. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  549. for (tid = MVPP2_PE_FIRST_FREE_TID;
  550. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  551. unsigned int ri_bits, ai_bits;
  552. bool match;
  553. if (!priv->prs_shadow[tid].valid ||
  554. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  555. continue;
  556. mvpp2_prs_init_from_hw(priv, &pe, tid);
  557. match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
  558. if (!match)
  559. continue;
  560. /* Get vlan type */
  561. ri_bits = mvpp2_prs_sram_ri_get(&pe);
  562. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  563. /* Get current ai value from tcam */
  564. ai_bits = mvpp2_prs_tcam_ai_get(&pe);
  565. /* Clear double vlan bit */
  566. ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
  567. if (ai != ai_bits)
  568. continue;
  569. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  570. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  571. return tid;
  572. }
  573. return -ENOENT;
  574. }
  575. /* Add/update single/triple vlan entry */
  576. static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
  577. unsigned int port_map)
  578. {
  579. struct mvpp2_prs_entry pe;
  580. int tid_aux, tid;
  581. int ret = 0;
  582. memset(&pe, 0, sizeof(pe));
  583. tid = mvpp2_prs_vlan_find(priv, tpid, ai);
  584. if (tid < 0) {
  585. /* Create new tcam entry */
  586. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
  587. MVPP2_PE_FIRST_FREE_TID);
  588. if (tid < 0)
  589. return tid;
  590. /* Get last double vlan tid */
  591. for (tid_aux = MVPP2_PE_LAST_FREE_TID;
  592. tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
  593. unsigned int ri_bits;
  594. if (!priv->prs_shadow[tid_aux].valid ||
  595. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  596. continue;
  597. mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
  598. ri_bits = mvpp2_prs_sram_ri_get(&pe);
  599. if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
  600. MVPP2_PRS_RI_VLAN_DOUBLE)
  601. break;
  602. }
  603. if (tid <= tid_aux)
  604. return -EINVAL;
  605. memset(&pe, 0, sizeof(pe));
  606. pe.index = tid;
  607. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  608. mvpp2_prs_match_etype(&pe, 0, tpid);
  609. /* VLAN tag detected, proceed with VID filtering */
  610. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
  611. /* Clear all ai bits for next iteration */
  612. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  613. if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
  614. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
  615. MVPP2_PRS_RI_VLAN_MASK);
  616. } else {
  617. ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
  618. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
  619. MVPP2_PRS_RI_VLAN_MASK);
  620. }
  621. mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
  622. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  623. } else {
  624. mvpp2_prs_init_from_hw(priv, &pe, tid);
  625. }
  626. /* Update ports' mask */
  627. mvpp2_prs_tcam_port_map_set(&pe, port_map);
  628. mvpp2_prs_hw_write(priv, &pe);
  629. return ret;
  630. }
  631. /* Get first free double vlan ai number */
  632. static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
  633. {
  634. int i;
  635. for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
  636. if (!priv->prs_double_vlans[i])
  637. return i;
  638. }
  639. return -EINVAL;
  640. }
  641. /* Search for existing double vlan entry */
  642. static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
  643. unsigned short tpid2)
  644. {
  645. struct mvpp2_prs_entry pe;
  646. int tid;
  647. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  648. for (tid = MVPP2_PE_FIRST_FREE_TID;
  649. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  650. unsigned int ri_mask;
  651. bool match;
  652. if (!priv->prs_shadow[tid].valid ||
  653. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  654. continue;
  655. mvpp2_prs_init_from_hw(priv, &pe, tid);
  656. match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
  657. mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
  658. if (!match)
  659. continue;
  660. ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
  661. if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
  662. return tid;
  663. }
  664. return -ENOENT;
  665. }
  666. /* Add or update double vlan entry */
  667. static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
  668. unsigned short tpid2,
  669. unsigned int port_map)
  670. {
  671. int tid_aux, tid, ai, ret = 0;
  672. struct mvpp2_prs_entry pe;
  673. memset(&pe, 0, sizeof(pe));
  674. tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
  675. if (tid < 0) {
  676. /* Create new tcam entry */
  677. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  678. MVPP2_PE_LAST_FREE_TID);
  679. if (tid < 0)
  680. return tid;
  681. /* Set ai value for new double vlan entry */
  682. ai = mvpp2_prs_double_vlan_ai_free_get(priv);
  683. if (ai < 0)
  684. return ai;
  685. /* Get first single/triple vlan tid */
  686. for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
  687. tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
  688. unsigned int ri_bits;
  689. if (!priv->prs_shadow[tid_aux].valid ||
  690. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  691. continue;
  692. mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
  693. ri_bits = mvpp2_prs_sram_ri_get(&pe);
  694. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  695. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  696. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  697. break;
  698. }
  699. if (tid >= tid_aux)
  700. return -ERANGE;
  701. memset(&pe, 0, sizeof(pe));
  702. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  703. pe.index = tid;
  704. priv->prs_double_vlans[ai] = true;
  705. mvpp2_prs_match_etype(&pe, 0, tpid1);
  706. mvpp2_prs_match_etype(&pe, 4, tpid2);
  707. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  708. /* Shift 4 bytes - skip outer vlan tag */
  709. mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
  710. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  711. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  712. MVPP2_PRS_RI_VLAN_MASK);
  713. mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
  714. MVPP2_PRS_SRAM_AI_MASK);
  715. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  716. } else {
  717. mvpp2_prs_init_from_hw(priv, &pe, tid);
  718. }
  719. /* Update ports' mask */
  720. mvpp2_prs_tcam_port_map_set(&pe, port_map);
  721. mvpp2_prs_hw_write(priv, &pe);
  722. return ret;
  723. }
  724. /* IPv4 header parsing for fragmentation and L4 offset */
  725. static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
  726. unsigned int ri, unsigned int ri_mask)
  727. {
  728. struct mvpp2_prs_entry pe;
  729. int tid;
  730. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  731. (proto != IPPROTO_IGMP))
  732. return -EINVAL;
  733. /* Not fragmented packet */
  734. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  735. MVPP2_PE_LAST_FREE_TID);
  736. if (tid < 0)
  737. return tid;
  738. memset(&pe, 0, sizeof(pe));
  739. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  740. pe.index = tid;
  741. /* Set next lu to IPv4 */
  742. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  743. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  744. /* Set L4 offset */
  745. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  746. sizeof(struct iphdr) - 4,
  747. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  748. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  749. MVPP2_PRS_IPV4_DIP_AI_BIT);
  750. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
  751. mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
  752. MVPP2_PRS_TCAM_PROTO_MASK_L);
  753. mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
  754. MVPP2_PRS_TCAM_PROTO_MASK);
  755. mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  756. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  757. /* Unmask all ports */
  758. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  759. /* Update shadow table and hw entry */
  760. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  761. mvpp2_prs_hw_write(priv, &pe);
  762. /* Fragmented packet */
  763. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  764. MVPP2_PE_LAST_FREE_TID);
  765. if (tid < 0)
  766. return tid;
  767. pe.index = tid;
  768. /* Clear ri before updating */
  769. pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  770. pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  771. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  772. mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
  773. ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
  774. mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
  775. mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
  776. /* Update shadow table and hw entry */
  777. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  778. mvpp2_prs_hw_write(priv, &pe);
  779. return 0;
  780. }
  781. /* IPv4 L3 multicast or broadcast */
  782. static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
  783. {
  784. struct mvpp2_prs_entry pe;
  785. int mask, tid;
  786. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  787. MVPP2_PE_LAST_FREE_TID);
  788. if (tid < 0)
  789. return tid;
  790. memset(&pe, 0, sizeof(pe));
  791. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  792. pe.index = tid;
  793. switch (l3_cast) {
  794. case MVPP2_PRS_L3_MULTI_CAST:
  795. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
  796. MVPP2_PRS_IPV4_MC_MASK);
  797. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  798. MVPP2_PRS_RI_L3_ADDR_MASK);
  799. break;
  800. case MVPP2_PRS_L3_BROAD_CAST:
  801. mask = MVPP2_PRS_IPV4_BC_MASK;
  802. mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
  803. mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
  804. mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
  805. mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
  806. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
  807. MVPP2_PRS_RI_L3_ADDR_MASK);
  808. break;
  809. default:
  810. return -EINVAL;
  811. }
  812. /* Finished: go to flowid generation */
  813. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  814. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  815. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  816. MVPP2_PRS_IPV4_DIP_AI_BIT);
  817. /* Unmask all ports */
  818. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  819. /* Update shadow table and hw entry */
  820. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  821. mvpp2_prs_hw_write(priv, &pe);
  822. return 0;
  823. }
  824. /* Set entries for protocols over IPv6 */
  825. static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
  826. unsigned int ri, unsigned int ri_mask)
  827. {
  828. struct mvpp2_prs_entry pe;
  829. int tid;
  830. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  831. (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
  832. return -EINVAL;
  833. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  834. MVPP2_PE_LAST_FREE_TID);
  835. if (tid < 0)
  836. return tid;
  837. memset(&pe, 0, sizeof(pe));
  838. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  839. pe.index = tid;
  840. /* Finished: go to flowid generation */
  841. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  842. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  843. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  844. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  845. sizeof(struct ipv6hdr) - 6,
  846. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  847. mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  848. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  849. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  850. /* Unmask all ports */
  851. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  852. /* Write HW */
  853. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  854. mvpp2_prs_hw_write(priv, &pe);
  855. return 0;
  856. }
  857. /* IPv6 L3 multicast entry */
  858. static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
  859. {
  860. struct mvpp2_prs_entry pe;
  861. int tid;
  862. if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
  863. return -EINVAL;
  864. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  865. MVPP2_PE_LAST_FREE_TID);
  866. if (tid < 0)
  867. return tid;
  868. memset(&pe, 0, sizeof(pe));
  869. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  870. pe.index = tid;
  871. /* Finished: go to flowid generation */
  872. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  873. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  874. MVPP2_PRS_RI_L3_ADDR_MASK);
  875. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  876. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  877. /* Shift back to IPv6 NH */
  878. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  879. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
  880. MVPP2_PRS_IPV6_MC_MASK);
  881. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  882. /* Unmask all ports */
  883. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  884. /* Update shadow table and hw entry */
  885. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  886. mvpp2_prs_hw_write(priv, &pe);
  887. return 0;
  888. }
  889. /* Parser per-port initialization */
  890. static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
  891. int lu_max, int offset)
  892. {
  893. u32 val;
  894. /* Set lookup ID */
  895. val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
  896. val &= ~MVPP2_PRS_PORT_LU_MASK(port);
  897. val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
  898. mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
  899. /* Set maximum number of loops for packet received from port */
  900. val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
  901. val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
  902. val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
  903. mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
  904. /* Set initial offset for packet header extraction for the first
  905. * searching loop
  906. */
  907. val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
  908. val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
  909. val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
  910. mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
  911. }
  912. /* Default flow entries initialization for all ports */
  913. static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
  914. {
  915. struct mvpp2_prs_entry pe;
  916. int port;
  917. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  918. memset(&pe, 0, sizeof(pe));
  919. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  920. pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
  921. /* Mask all ports */
  922. mvpp2_prs_tcam_port_map_set(&pe, 0);
  923. /* Set flow ID*/
  924. mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
  925. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  926. /* Update shadow table and hw entry */
  927. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
  928. mvpp2_prs_hw_write(priv, &pe);
  929. }
  930. }
  931. /* Set default entry for Marvell Header field */
  932. static void mvpp2_prs_mh_init(struct mvpp2 *priv)
  933. {
  934. struct mvpp2_prs_entry pe;
  935. memset(&pe, 0, sizeof(pe));
  936. pe.index = MVPP2_PE_MH_DEFAULT;
  937. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
  938. mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
  939. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  940. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
  941. /* Unmask all ports */
  942. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  943. /* Update shadow table and hw entry */
  944. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
  945. mvpp2_prs_hw_write(priv, &pe);
  946. }
  947. /* Set default entires (place holder) for promiscuous, non-promiscuous and
  948. * multicast MAC addresses
  949. */
  950. static void mvpp2_prs_mac_init(struct mvpp2 *priv)
  951. {
  952. struct mvpp2_prs_entry pe;
  953. memset(&pe, 0, sizeof(pe));
  954. /* Non-promiscuous mode for all ports - DROP unknown packets */
  955. pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
  956. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  957. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  958. MVPP2_PRS_RI_DROP_MASK);
  959. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  960. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  961. /* Unmask all ports */
  962. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  963. /* Update shadow table and hw entry */
  964. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  965. mvpp2_prs_hw_write(priv, &pe);
  966. /* Create dummy entries for drop all and promiscuous modes */
  967. mvpp2_prs_drop_fc(priv);
  968. mvpp2_prs_mac_drop_all_set(priv, 0, false);
  969. mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
  970. mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
  971. }
  972. /* Set default entries for various types of dsa packets */
  973. static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
  974. {
  975. struct mvpp2_prs_entry pe;
  976. /* None tagged EDSA entry - place holder */
  977. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  978. MVPP2_PRS_EDSA);
  979. /* Tagged EDSA entry - place holder */
  980. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  981. /* None tagged DSA entry - place holder */
  982. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  983. MVPP2_PRS_DSA);
  984. /* Tagged DSA entry - place holder */
  985. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  986. /* None tagged EDSA ethertype entry - place holder*/
  987. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  988. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  989. /* Tagged EDSA ethertype entry - place holder*/
  990. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  991. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  992. /* None tagged DSA ethertype entry */
  993. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  994. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  995. /* Tagged DSA ethertype entry */
  996. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  997. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  998. /* Set default entry, in case DSA or EDSA tag not found */
  999. memset(&pe, 0, sizeof(pe));
  1000. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1001. pe.index = MVPP2_PE_DSA_DEFAULT;
  1002. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1003. /* Shift 0 bytes */
  1004. mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1005. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1006. /* Clear all sram ai bits for next iteration */
  1007. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1008. /* Unmask all ports */
  1009. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1010. mvpp2_prs_hw_write(priv, &pe);
  1011. }
  1012. /* Initialize parser entries for VID filtering */
  1013. static void mvpp2_prs_vid_init(struct mvpp2 *priv)
  1014. {
  1015. struct mvpp2_prs_entry pe;
  1016. memset(&pe, 0, sizeof(pe));
  1017. /* Set default vid entry */
  1018. pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
  1019. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1020. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
  1021. /* Skip VLAN header - Set offset to 4 bytes */
  1022. mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
  1023. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1024. /* Clear all ai bits for next iteration */
  1025. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1026. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1027. /* Unmask all ports */
  1028. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1029. /* Update shadow table and hw entry */
  1030. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1031. mvpp2_prs_hw_write(priv, &pe);
  1032. /* Set default vid entry for extended DSA*/
  1033. memset(&pe, 0, sizeof(pe));
  1034. /* Set default vid entry */
  1035. pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
  1036. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1037. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
  1038. MVPP2_PRS_EDSA_VID_AI_BIT);
  1039. /* Skip VLAN header - Set offset to 8 bytes */
  1040. mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
  1041. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1042. /* Clear all ai bits for next iteration */
  1043. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1044. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1045. /* Unmask all ports */
  1046. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1047. /* Update shadow table and hw entry */
  1048. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1049. mvpp2_prs_hw_write(priv, &pe);
  1050. }
  1051. /* Match basic ethertypes */
  1052. static int mvpp2_prs_etype_init(struct mvpp2 *priv)
  1053. {
  1054. struct mvpp2_prs_entry pe;
  1055. int tid;
  1056. /* Ethertype: PPPoE */
  1057. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1058. MVPP2_PE_LAST_FREE_TID);
  1059. if (tid < 0)
  1060. return tid;
  1061. memset(&pe, 0, sizeof(pe));
  1062. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1063. pe.index = tid;
  1064. mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
  1065. mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
  1066. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1067. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1068. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
  1069. MVPP2_PRS_RI_PPPOE_MASK);
  1070. /* Update shadow table and hw entry */
  1071. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1072. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1073. priv->prs_shadow[pe.index].finish = false;
  1074. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
  1075. MVPP2_PRS_RI_PPPOE_MASK);
  1076. mvpp2_prs_hw_write(priv, &pe);
  1077. /* Ethertype: ARP */
  1078. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1079. MVPP2_PE_LAST_FREE_TID);
  1080. if (tid < 0)
  1081. return tid;
  1082. memset(&pe, 0, sizeof(pe));
  1083. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1084. pe.index = tid;
  1085. mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
  1086. /* Generate flow in the next iteration*/
  1087. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1088. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1089. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
  1090. MVPP2_PRS_RI_L3_PROTO_MASK);
  1091. /* Set L3 offset */
  1092. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1093. MVPP2_ETH_TYPE_LEN,
  1094. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1095. /* Update shadow table and hw entry */
  1096. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1097. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1098. priv->prs_shadow[pe.index].finish = true;
  1099. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
  1100. MVPP2_PRS_RI_L3_PROTO_MASK);
  1101. mvpp2_prs_hw_write(priv, &pe);
  1102. /* Ethertype: LBTD */
  1103. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1104. MVPP2_PE_LAST_FREE_TID);
  1105. if (tid < 0)
  1106. return tid;
  1107. memset(&pe, 0, sizeof(pe));
  1108. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1109. pe.index = tid;
  1110. mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
  1111. /* Generate flow in the next iteration*/
  1112. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1113. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1114. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1115. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1116. MVPP2_PRS_RI_CPU_CODE_MASK |
  1117. MVPP2_PRS_RI_UDF3_MASK);
  1118. /* Set L3 offset */
  1119. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1120. MVPP2_ETH_TYPE_LEN,
  1121. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1122. /* Update shadow table and hw entry */
  1123. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1124. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1125. priv->prs_shadow[pe.index].finish = true;
  1126. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1127. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1128. MVPP2_PRS_RI_CPU_CODE_MASK |
  1129. MVPP2_PRS_RI_UDF3_MASK);
  1130. mvpp2_prs_hw_write(priv, &pe);
  1131. /* Ethertype: IPv4 without options */
  1132. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1133. MVPP2_PE_LAST_FREE_TID);
  1134. if (tid < 0)
  1135. return tid;
  1136. memset(&pe, 0, sizeof(pe));
  1137. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1138. pe.index = tid;
  1139. mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
  1140. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1141. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1142. MVPP2_PRS_IPV4_HEAD_MASK |
  1143. MVPP2_PRS_IPV4_IHL_MASK);
  1144. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1145. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1146. MVPP2_PRS_RI_L3_PROTO_MASK);
  1147. /* Skip eth_type + 4 bytes of IP header */
  1148. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1149. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1150. /* Set L3 offset */
  1151. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1152. MVPP2_ETH_TYPE_LEN,
  1153. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1154. /* Update shadow table and hw entry */
  1155. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1156. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1157. priv->prs_shadow[pe.index].finish = false;
  1158. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
  1159. MVPP2_PRS_RI_L3_PROTO_MASK);
  1160. mvpp2_prs_hw_write(priv, &pe);
  1161. /* Ethertype: IPv4 with options */
  1162. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1163. MVPP2_PE_LAST_FREE_TID);
  1164. if (tid < 0)
  1165. return tid;
  1166. pe.index = tid;
  1167. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1168. MVPP2_PRS_IPV4_HEAD,
  1169. MVPP2_PRS_IPV4_HEAD_MASK);
  1170. /* Clear ri before updating */
  1171. pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1172. pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1173. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  1174. MVPP2_PRS_RI_L3_PROTO_MASK);
  1175. /* Update shadow table and hw entry */
  1176. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1177. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1178. priv->prs_shadow[pe.index].finish = false;
  1179. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
  1180. MVPP2_PRS_RI_L3_PROTO_MASK);
  1181. mvpp2_prs_hw_write(priv, &pe);
  1182. /* Ethertype: IPv6 without options */
  1183. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1184. MVPP2_PE_LAST_FREE_TID);
  1185. if (tid < 0)
  1186. return tid;
  1187. memset(&pe, 0, sizeof(pe));
  1188. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1189. pe.index = tid;
  1190. mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
  1191. /* Skip DIP of IPV6 header */
  1192. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
  1193. MVPP2_MAX_L3_ADDR_SIZE,
  1194. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1195. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1196. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  1197. MVPP2_PRS_RI_L3_PROTO_MASK);
  1198. /* Set L3 offset */
  1199. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1200. MVPP2_ETH_TYPE_LEN,
  1201. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1202. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1203. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1204. priv->prs_shadow[pe.index].finish = false;
  1205. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
  1206. MVPP2_PRS_RI_L3_PROTO_MASK);
  1207. mvpp2_prs_hw_write(priv, &pe);
  1208. /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
  1209. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1210. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1211. pe.index = MVPP2_PE_ETH_TYPE_UN;
  1212. /* Unmask all ports */
  1213. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1214. /* Generate flow in the next iteration*/
  1215. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1216. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1217. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  1218. MVPP2_PRS_RI_L3_PROTO_MASK);
  1219. /* Set L3 offset even it's unknown L3 */
  1220. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1221. MVPP2_ETH_TYPE_LEN,
  1222. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1223. /* Update shadow table and hw entry */
  1224. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1225. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1226. priv->prs_shadow[pe.index].finish = true;
  1227. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
  1228. MVPP2_PRS_RI_L3_PROTO_MASK);
  1229. mvpp2_prs_hw_write(priv, &pe);
  1230. return 0;
  1231. }
  1232. /* Configure vlan entries and detect up to 2 successive VLAN tags.
  1233. * Possible options:
  1234. * 0x8100, 0x88A8
  1235. * 0x8100, 0x8100
  1236. * 0x8100
  1237. * 0x88A8
  1238. */
  1239. static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
  1240. {
  1241. struct mvpp2_prs_entry pe;
  1242. int err;
  1243. priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
  1244. MVPP2_PRS_DBL_VLANS_MAX,
  1245. GFP_KERNEL);
  1246. if (!priv->prs_double_vlans)
  1247. return -ENOMEM;
  1248. /* Double VLAN: 0x8100, 0x88A8 */
  1249. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
  1250. MVPP2_PRS_PORT_MASK);
  1251. if (err)
  1252. return err;
  1253. /* Double VLAN: 0x8100, 0x8100 */
  1254. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
  1255. MVPP2_PRS_PORT_MASK);
  1256. if (err)
  1257. return err;
  1258. /* Single VLAN: 0x88a8 */
  1259. err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
  1260. MVPP2_PRS_PORT_MASK);
  1261. if (err)
  1262. return err;
  1263. /* Single VLAN: 0x8100 */
  1264. err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
  1265. MVPP2_PRS_PORT_MASK);
  1266. if (err)
  1267. return err;
  1268. /* Set default double vlan entry */
  1269. memset(&pe, 0, sizeof(pe));
  1270. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1271. pe.index = MVPP2_PE_VLAN_DBL;
  1272. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
  1273. /* Clear ai for next iterations */
  1274. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1275. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  1276. MVPP2_PRS_RI_VLAN_MASK);
  1277. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
  1278. MVPP2_PRS_DBL_VLAN_AI_BIT);
  1279. /* Unmask all ports */
  1280. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1281. /* Update shadow table and hw entry */
  1282. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  1283. mvpp2_prs_hw_write(priv, &pe);
  1284. /* Set default vlan none entry */
  1285. memset(&pe, 0, sizeof(pe));
  1286. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1287. pe.index = MVPP2_PE_VLAN_NONE;
  1288. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1289. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  1290. MVPP2_PRS_RI_VLAN_MASK);
  1291. /* Unmask all ports */
  1292. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1293. /* Update shadow table and hw entry */
  1294. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  1295. mvpp2_prs_hw_write(priv, &pe);
  1296. return 0;
  1297. }
  1298. /* Set entries for PPPoE ethertype */
  1299. static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
  1300. {
  1301. struct mvpp2_prs_entry pe;
  1302. int tid;
  1303. /* IPv4 over PPPoE with options */
  1304. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1305. MVPP2_PE_LAST_FREE_TID);
  1306. if (tid < 0)
  1307. return tid;
  1308. memset(&pe, 0, sizeof(pe));
  1309. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1310. pe.index = tid;
  1311. mvpp2_prs_match_etype(&pe, 0, PPP_IP);
  1312. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1313. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  1314. MVPP2_PRS_RI_L3_PROTO_MASK);
  1315. /* Skip eth_type + 4 bytes of IP header */
  1316. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1317. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1318. /* Set L3 offset */
  1319. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1320. MVPP2_ETH_TYPE_LEN,
  1321. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1322. /* Update shadow table and hw entry */
  1323. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1324. mvpp2_prs_hw_write(priv, &pe);
  1325. /* IPv4 over PPPoE without options */
  1326. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1327. MVPP2_PE_LAST_FREE_TID);
  1328. if (tid < 0)
  1329. return tid;
  1330. pe.index = tid;
  1331. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1332. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1333. MVPP2_PRS_IPV4_HEAD_MASK |
  1334. MVPP2_PRS_IPV4_IHL_MASK);
  1335. /* Clear ri before updating */
  1336. pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1337. pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1338. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1339. MVPP2_PRS_RI_L3_PROTO_MASK);
  1340. /* Update shadow table and hw entry */
  1341. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1342. mvpp2_prs_hw_write(priv, &pe);
  1343. /* IPv6 over PPPoE */
  1344. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1345. MVPP2_PE_LAST_FREE_TID);
  1346. if (tid < 0)
  1347. return tid;
  1348. memset(&pe, 0, sizeof(pe));
  1349. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1350. pe.index = tid;
  1351. mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
  1352. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1353. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  1354. MVPP2_PRS_RI_L3_PROTO_MASK);
  1355. /* Jump to DIP of IPV6 header */
  1356. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
  1357. MVPP2_MAX_L3_ADDR_SIZE,
  1358. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1359. /* Set L3 offset */
  1360. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1361. MVPP2_ETH_TYPE_LEN,
  1362. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1363. /* Update shadow table and hw entry */
  1364. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1365. mvpp2_prs_hw_write(priv, &pe);
  1366. /* Non-IP over PPPoE */
  1367. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1368. MVPP2_PE_LAST_FREE_TID);
  1369. if (tid < 0)
  1370. return tid;
  1371. memset(&pe, 0, sizeof(pe));
  1372. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1373. pe.index = tid;
  1374. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  1375. MVPP2_PRS_RI_L3_PROTO_MASK);
  1376. /* Finished: go to flowid generation */
  1377. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1378. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1379. /* Set L3 offset even if it's unknown L3 */
  1380. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1381. MVPP2_ETH_TYPE_LEN,
  1382. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1383. /* Update shadow table and hw entry */
  1384. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1385. mvpp2_prs_hw_write(priv, &pe);
  1386. return 0;
  1387. }
  1388. /* Initialize entries for IPv4 */
  1389. static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
  1390. {
  1391. struct mvpp2_prs_entry pe;
  1392. int err;
  1393. /* Set entries for TCP, UDP and IGMP over IPv4 */
  1394. err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
  1395. MVPP2_PRS_RI_L4_PROTO_MASK);
  1396. if (err)
  1397. return err;
  1398. err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
  1399. MVPP2_PRS_RI_L4_PROTO_MASK);
  1400. if (err)
  1401. return err;
  1402. err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
  1403. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1404. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1405. MVPP2_PRS_RI_CPU_CODE_MASK |
  1406. MVPP2_PRS_RI_UDF3_MASK);
  1407. if (err)
  1408. return err;
  1409. /* IPv4 Broadcast */
  1410. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
  1411. if (err)
  1412. return err;
  1413. /* IPv4 Multicast */
  1414. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  1415. if (err)
  1416. return err;
  1417. /* Default IPv4 entry for unknown protocols */
  1418. memset(&pe, 0, sizeof(pe));
  1419. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1420. pe.index = MVPP2_PE_IP4_PROTO_UN;
  1421. /* Set next lu to IPv4 */
  1422. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1423. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1424. /* Set L4 offset */
  1425. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1426. sizeof(struct iphdr) - 4,
  1427. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1428. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1429. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1430. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  1431. MVPP2_PRS_RI_L4_PROTO_MASK);
  1432. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  1433. /* Unmask all ports */
  1434. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1435. /* Update shadow table and hw entry */
  1436. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1437. mvpp2_prs_hw_write(priv, &pe);
  1438. /* Default IPv4 entry for unicast address */
  1439. memset(&pe, 0, sizeof(pe));
  1440. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1441. pe.index = MVPP2_PE_IP4_ADDR_UN;
  1442. /* Finished: go to flowid generation */
  1443. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1444. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1445. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  1446. MVPP2_PRS_RI_L3_ADDR_MASK);
  1447. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1448. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1449. /* Unmask all ports */
  1450. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1451. /* Update shadow table and hw entry */
  1452. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1453. mvpp2_prs_hw_write(priv, &pe);
  1454. return 0;
  1455. }
  1456. /* Initialize entries for IPv6 */
  1457. static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
  1458. {
  1459. struct mvpp2_prs_entry pe;
  1460. int tid, err;
  1461. /* Set entries for TCP, UDP and ICMP over IPv6 */
  1462. err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
  1463. MVPP2_PRS_RI_L4_TCP,
  1464. MVPP2_PRS_RI_L4_PROTO_MASK);
  1465. if (err)
  1466. return err;
  1467. err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
  1468. MVPP2_PRS_RI_L4_UDP,
  1469. MVPP2_PRS_RI_L4_PROTO_MASK);
  1470. if (err)
  1471. return err;
  1472. err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
  1473. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1474. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1475. MVPP2_PRS_RI_CPU_CODE_MASK |
  1476. MVPP2_PRS_RI_UDF3_MASK);
  1477. if (err)
  1478. return err;
  1479. /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
  1480. /* Result Info: UDF7=1, DS lite */
  1481. err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
  1482. MVPP2_PRS_RI_UDF7_IP6_LITE,
  1483. MVPP2_PRS_RI_UDF7_MASK);
  1484. if (err)
  1485. return err;
  1486. /* IPv6 multicast */
  1487. err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  1488. if (err)
  1489. return err;
  1490. /* Entry for checking hop limit */
  1491. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1492. MVPP2_PE_LAST_FREE_TID);
  1493. if (tid < 0)
  1494. return tid;
  1495. memset(&pe, 0, sizeof(pe));
  1496. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1497. pe.index = tid;
  1498. /* Finished: go to flowid generation */
  1499. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1500. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1501. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
  1502. MVPP2_PRS_RI_DROP_MASK,
  1503. MVPP2_PRS_RI_L3_PROTO_MASK |
  1504. MVPP2_PRS_RI_DROP_MASK);
  1505. mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
  1506. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1507. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1508. /* Update shadow table and hw entry */
  1509. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1510. mvpp2_prs_hw_write(priv, &pe);
  1511. /* Default IPv6 entry for unknown protocols */
  1512. memset(&pe, 0, sizeof(pe));
  1513. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1514. pe.index = MVPP2_PE_IP6_PROTO_UN;
  1515. /* Finished: go to flowid generation */
  1516. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1517. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1518. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  1519. MVPP2_PRS_RI_L4_PROTO_MASK);
  1520. /* Set L4 offset relatively to our current place */
  1521. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1522. sizeof(struct ipv6hdr) - 4,
  1523. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1524. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1525. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1526. /* Unmask all ports */
  1527. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1528. /* Update shadow table and hw entry */
  1529. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1530. mvpp2_prs_hw_write(priv, &pe);
  1531. /* Default IPv6 entry for unknown ext protocols */
  1532. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1533. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1534. pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
  1535. /* Finished: go to flowid generation */
  1536. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1537. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1538. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  1539. MVPP2_PRS_RI_L4_PROTO_MASK);
  1540. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
  1541. MVPP2_PRS_IPV6_EXT_AI_BIT);
  1542. /* Unmask all ports */
  1543. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1544. /* Update shadow table and hw entry */
  1545. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1546. mvpp2_prs_hw_write(priv, &pe);
  1547. /* Default IPv6 entry for unicast address */
  1548. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1549. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1550. pe.index = MVPP2_PE_IP6_ADDR_UN;
  1551. /* Finished: go to IPv6 again */
  1552. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1553. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  1554. MVPP2_PRS_RI_L3_ADDR_MASK);
  1555. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1556. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1557. /* Shift back to IPV6 NH */
  1558. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1559. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1560. /* Unmask all ports */
  1561. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1562. /* Update shadow table and hw entry */
  1563. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  1564. mvpp2_prs_hw_write(priv, &pe);
  1565. return 0;
  1566. }
  1567. /* Find tcam entry with matched pair <vid,port> */
  1568. static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
  1569. {
  1570. unsigned char byte[2], enable[2];
  1571. struct mvpp2_prs_entry pe;
  1572. u16 rvid, rmask;
  1573. int tid;
  1574. /* Go through the all entries with MVPP2_PRS_LU_VID */
  1575. for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
  1576. tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
  1577. if (!port->priv->prs_shadow[tid].valid ||
  1578. port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
  1579. continue;
  1580. mvpp2_prs_init_from_hw(port->priv, &pe, tid);
  1581. mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
  1582. mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
  1583. rvid = ((byte[0] & 0xf) << 8) + byte[1];
  1584. rmask = ((enable[0] & 0xf) << 8) + enable[1];
  1585. if (rvid != vid || rmask != mask)
  1586. continue;
  1587. return tid;
  1588. }
  1589. return -ENOENT;
  1590. }
  1591. /* Write parser entry for VID filtering */
  1592. int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
  1593. {
  1594. unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
  1595. port->id * MVPP2_PRS_VLAN_FILT_MAX;
  1596. unsigned int mask = 0xfff, reg_val, shift;
  1597. struct mvpp2 *priv = port->priv;
  1598. struct mvpp2_prs_entry pe;
  1599. int tid;
  1600. memset(&pe, 0, sizeof(pe));
  1601. /* Scan TCAM and see if entry with this <vid,port> already exist */
  1602. tid = mvpp2_prs_vid_range_find(port, vid, mask);
  1603. reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
  1604. if (reg_val & MVPP2_DSA_EXTENDED)
  1605. shift = MVPP2_VLAN_TAG_EDSA_LEN;
  1606. else
  1607. shift = MVPP2_VLAN_TAG_LEN;
  1608. /* No such entry */
  1609. if (tid < 0) {
  1610. /* Go through all entries from first to last in vlan range */
  1611. tid = mvpp2_prs_tcam_first_free(priv, vid_start,
  1612. vid_start +
  1613. MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
  1614. /* There isn't room for a new VID filter */
  1615. if (tid < 0)
  1616. return tid;
  1617. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1618. pe.index = tid;
  1619. /* Mask all ports */
  1620. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1621. } else {
  1622. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1623. }
  1624. /* Enable the current port */
  1625. mvpp2_prs_tcam_port_set(&pe, port->id, true);
  1626. /* Continue - set next lookup */
  1627. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1628. /* Skip VLAN header - Set offset to 4 or 8 bytes */
  1629. mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1630. /* Set match on VID */
  1631. mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
  1632. /* Clear all ai bits for next iteration */
  1633. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1634. /* Update shadow table */
  1635. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1636. mvpp2_prs_hw_write(priv, &pe);
  1637. return 0;
  1638. }
  1639. /* Write parser entry for VID filtering */
  1640. void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
  1641. {
  1642. struct mvpp2 *priv = port->priv;
  1643. int tid;
  1644. /* Scan TCAM and see if entry with this <vid,port> already exist */
  1645. tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
  1646. /* No such entry */
  1647. if (tid < 0)
  1648. return;
  1649. mvpp2_prs_hw_inv(priv, tid);
  1650. priv->prs_shadow[tid].valid = false;
  1651. }
  1652. /* Remove all existing VID filters on this port */
  1653. void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
  1654. {
  1655. struct mvpp2 *priv = port->priv;
  1656. int tid;
  1657. for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
  1658. tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
  1659. if (priv->prs_shadow[tid].valid) {
  1660. mvpp2_prs_hw_inv(priv, tid);
  1661. priv->prs_shadow[tid].valid = false;
  1662. }
  1663. }
  1664. }
  1665. /* Remove VID filering entry for this port */
  1666. void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
  1667. {
  1668. unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
  1669. struct mvpp2 *priv = port->priv;
  1670. /* Invalidate the guard entry */
  1671. mvpp2_prs_hw_inv(priv, tid);
  1672. priv->prs_shadow[tid].valid = false;
  1673. }
  1674. /* Add guard entry that drops packets when no VID is matched on this port */
  1675. void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
  1676. {
  1677. unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
  1678. struct mvpp2 *priv = port->priv;
  1679. unsigned int reg_val, shift;
  1680. struct mvpp2_prs_entry pe;
  1681. if (priv->prs_shadow[tid].valid)
  1682. return;
  1683. memset(&pe, 0, sizeof(pe));
  1684. pe.index = tid;
  1685. reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
  1686. if (reg_val & MVPP2_DSA_EXTENDED)
  1687. shift = MVPP2_VLAN_TAG_EDSA_LEN;
  1688. else
  1689. shift = MVPP2_VLAN_TAG_LEN;
  1690. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1691. /* Mask all ports */
  1692. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1693. /* Update port mask */
  1694. mvpp2_prs_tcam_port_set(&pe, port->id, true);
  1695. /* Continue - set next lookup */
  1696. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1697. /* Skip VLAN header - Set offset to 4 or 8 bytes */
  1698. mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1699. /* Drop VLAN packets that don't belong to any VIDs on this port */
  1700. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1701. MVPP2_PRS_RI_DROP_MASK);
  1702. /* Clear all ai bits for next iteration */
  1703. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1704. /* Update shadow table */
  1705. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1706. mvpp2_prs_hw_write(priv, &pe);
  1707. }
  1708. /* Parser default initialization */
  1709. int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
  1710. {
  1711. int err, index, i;
  1712. /* Enable tcam table */
  1713. mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
  1714. /* Clear all tcam and sram entries */
  1715. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
  1716. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  1717. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  1718. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
  1719. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
  1720. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  1721. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
  1722. }
  1723. /* Invalidate all tcam entries */
  1724. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
  1725. mvpp2_prs_hw_inv(priv, index);
  1726. priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
  1727. sizeof(*priv->prs_shadow),
  1728. GFP_KERNEL);
  1729. if (!priv->prs_shadow)
  1730. return -ENOMEM;
  1731. /* Always start from lookup = 0 */
  1732. for (index = 0; index < MVPP2_MAX_PORTS; index++)
  1733. mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
  1734. MVPP2_PRS_PORT_LU_MAX, 0);
  1735. mvpp2_prs_def_flow_init(priv);
  1736. mvpp2_prs_mh_init(priv);
  1737. mvpp2_prs_mac_init(priv);
  1738. mvpp2_prs_dsa_init(priv);
  1739. mvpp2_prs_vid_init(priv);
  1740. err = mvpp2_prs_etype_init(priv);
  1741. if (err)
  1742. return err;
  1743. err = mvpp2_prs_vlan_init(pdev, priv);
  1744. if (err)
  1745. return err;
  1746. err = mvpp2_prs_pppoe_init(priv);
  1747. if (err)
  1748. return err;
  1749. err = mvpp2_prs_ip6_init(priv);
  1750. if (err)
  1751. return err;
  1752. err = mvpp2_prs_ip4_init(priv);
  1753. if (err)
  1754. return err;
  1755. return 0;
  1756. }
  1757. /* Compare MAC DA with tcam entry data */
  1758. static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
  1759. const u8 *da, unsigned char *mask)
  1760. {
  1761. unsigned char tcam_byte, tcam_mask;
  1762. int index;
  1763. for (index = 0; index < ETH_ALEN; index++) {
  1764. mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
  1765. if (tcam_mask != mask[index])
  1766. return false;
  1767. if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
  1768. return false;
  1769. }
  1770. return true;
  1771. }
  1772. /* Find tcam entry with matched pair <MAC DA, port> */
  1773. static int
  1774. mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
  1775. unsigned char *mask, int udf_type)
  1776. {
  1777. struct mvpp2_prs_entry pe;
  1778. int tid;
  1779. /* Go through the all entires with MVPP2_PRS_LU_MAC */
  1780. for (tid = MVPP2_PE_MAC_RANGE_START;
  1781. tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
  1782. unsigned int entry_pmap;
  1783. if (!priv->prs_shadow[tid].valid ||
  1784. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  1785. (priv->prs_shadow[tid].udf != udf_type))
  1786. continue;
  1787. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1788. entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
  1789. if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
  1790. entry_pmap == pmap)
  1791. return tid;
  1792. }
  1793. return -ENOENT;
  1794. }
  1795. /* Update parser's mac da entry */
  1796. int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
  1797. {
  1798. unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  1799. struct mvpp2 *priv = port->priv;
  1800. unsigned int pmap, len, ri;
  1801. struct mvpp2_prs_entry pe;
  1802. int tid;
  1803. memset(&pe, 0, sizeof(pe));
  1804. /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
  1805. tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
  1806. MVPP2_PRS_UDF_MAC_DEF);
  1807. /* No such entry */
  1808. if (tid < 0) {
  1809. if (!add)
  1810. return 0;
  1811. /* Create new TCAM entry */
  1812. /* Go through the all entries from first to last */
  1813. tid = mvpp2_prs_tcam_first_free(priv,
  1814. MVPP2_PE_MAC_RANGE_START,
  1815. MVPP2_PE_MAC_RANGE_END);
  1816. if (tid < 0)
  1817. return tid;
  1818. pe.index = tid;
  1819. /* Mask all ports */
  1820. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1821. } else {
  1822. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1823. }
  1824. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1825. /* Update port mask */
  1826. mvpp2_prs_tcam_port_set(&pe, port->id, add);
  1827. /* Invalidate the entry if no ports are left enabled */
  1828. pmap = mvpp2_prs_tcam_port_map_get(&pe);
  1829. if (pmap == 0) {
  1830. if (add)
  1831. return -EINVAL;
  1832. mvpp2_prs_hw_inv(priv, pe.index);
  1833. priv->prs_shadow[pe.index].valid = false;
  1834. return 0;
  1835. }
  1836. /* Continue - set next lookup */
  1837. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1838. /* Set match on DA */
  1839. len = ETH_ALEN;
  1840. while (len--)
  1841. mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
  1842. /* Set result info bits */
  1843. if (is_broadcast_ether_addr(da)) {
  1844. ri = MVPP2_PRS_RI_L2_BCAST;
  1845. } else if (is_multicast_ether_addr(da)) {
  1846. ri = MVPP2_PRS_RI_L2_MCAST;
  1847. } else {
  1848. ri = MVPP2_PRS_RI_L2_UCAST;
  1849. if (ether_addr_equal(da, port->dev->dev_addr))
  1850. ri |= MVPP2_PRS_RI_MAC_ME_MASK;
  1851. }
  1852. mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  1853. MVPP2_PRS_RI_MAC_ME_MASK);
  1854. mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  1855. MVPP2_PRS_RI_MAC_ME_MASK);
  1856. /* Shift to ethertype */
  1857. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1858. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1859. /* Update shadow table and hw entry */
  1860. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
  1861. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1862. mvpp2_prs_hw_write(priv, &pe);
  1863. return 0;
  1864. }
  1865. int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
  1866. {
  1867. struct mvpp2_port *port = netdev_priv(dev);
  1868. int err;
  1869. /* Remove old parser entry */
  1870. err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
  1871. if (err)
  1872. return err;
  1873. /* Add new parser entry */
  1874. err = mvpp2_prs_mac_da_accept(port, da, true);
  1875. if (err)
  1876. return err;
  1877. /* Set addr in the device */
  1878. ether_addr_copy(dev->dev_addr, da);
  1879. return 0;
  1880. }
  1881. void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
  1882. {
  1883. struct mvpp2 *priv = port->priv;
  1884. struct mvpp2_prs_entry pe;
  1885. unsigned long pmap;
  1886. int index, tid;
  1887. for (tid = MVPP2_PE_MAC_RANGE_START;
  1888. tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
  1889. unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
  1890. if (!priv->prs_shadow[tid].valid ||
  1891. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  1892. (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
  1893. continue;
  1894. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1895. pmap = mvpp2_prs_tcam_port_map_get(&pe);
  1896. /* We only want entries active on this port */
  1897. if (!test_bit(port->id, &pmap))
  1898. continue;
  1899. /* Read mac addr from entry */
  1900. for (index = 0; index < ETH_ALEN; index++)
  1901. mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
  1902. &da_mask[index]);
  1903. /* Special cases : Don't remove broadcast and port's own
  1904. * address
  1905. */
  1906. if (is_broadcast_ether_addr(da) ||
  1907. ether_addr_equal(da, port->dev->dev_addr))
  1908. continue;
  1909. /* Remove entry from TCAM */
  1910. mvpp2_prs_mac_da_accept(port, da, false);
  1911. }
  1912. }
  1913. int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
  1914. {
  1915. switch (type) {
  1916. case MVPP2_TAG_TYPE_EDSA:
  1917. /* Add port to EDSA entries */
  1918. mvpp2_prs_dsa_tag_set(priv, port, true,
  1919. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1920. mvpp2_prs_dsa_tag_set(priv, port, true,
  1921. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1922. /* Remove port from DSA entries */
  1923. mvpp2_prs_dsa_tag_set(priv, port, false,
  1924. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1925. mvpp2_prs_dsa_tag_set(priv, port, false,
  1926. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1927. break;
  1928. case MVPP2_TAG_TYPE_DSA:
  1929. /* Add port to DSA entries */
  1930. mvpp2_prs_dsa_tag_set(priv, port, true,
  1931. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1932. mvpp2_prs_dsa_tag_set(priv, port, true,
  1933. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1934. /* Remove port from EDSA entries */
  1935. mvpp2_prs_dsa_tag_set(priv, port, false,
  1936. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1937. mvpp2_prs_dsa_tag_set(priv, port, false,
  1938. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1939. break;
  1940. case MVPP2_TAG_TYPE_MH:
  1941. case MVPP2_TAG_TYPE_NONE:
  1942. /* Remove port form EDSA and DSA entries */
  1943. mvpp2_prs_dsa_tag_set(priv, port, false,
  1944. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1945. mvpp2_prs_dsa_tag_set(priv, port, false,
  1946. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1947. mvpp2_prs_dsa_tag_set(priv, port, false,
  1948. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1949. mvpp2_prs_dsa_tag_set(priv, port, false,
  1950. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1951. break;
  1952. default:
  1953. if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
  1954. return -EINVAL;
  1955. }
  1956. return 0;
  1957. }
  1958. int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
  1959. {
  1960. struct mvpp2_prs_entry pe;
  1961. u8 *ri_byte, *ri_byte_mask;
  1962. int tid, i;
  1963. memset(&pe, 0, sizeof(pe));
  1964. tid = mvpp2_prs_tcam_first_free(priv,
  1965. MVPP2_PE_LAST_FREE_TID,
  1966. MVPP2_PE_FIRST_FREE_TID);
  1967. if (tid < 0)
  1968. return tid;
  1969. pe.index = tid;
  1970. ri_byte = (u8 *)&ri;
  1971. ri_byte_mask = (u8 *)&ri_mask;
  1972. mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
  1973. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  1974. for (i = 0; i < 4; i++) {
  1975. mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
  1976. ri_byte_mask[i]);
  1977. }
  1978. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
  1979. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1980. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1981. mvpp2_prs_hw_write(priv, &pe);
  1982. return 0;
  1983. }
  1984. /* Set prs flow for the port */
  1985. int mvpp2_prs_def_flow(struct mvpp2_port *port)
  1986. {
  1987. struct mvpp2_prs_entry pe;
  1988. int tid;
  1989. memset(&pe, 0, sizeof(pe));
  1990. tid = mvpp2_prs_flow_find(port->priv, port->id);
  1991. /* Such entry not exist */
  1992. if (tid < 0) {
  1993. /* Go through the all entires from last to first */
  1994. tid = mvpp2_prs_tcam_first_free(port->priv,
  1995. MVPP2_PE_LAST_FREE_TID,
  1996. MVPP2_PE_FIRST_FREE_TID);
  1997. if (tid < 0)
  1998. return tid;
  1999. pe.index = tid;
  2000. /* Set flow ID*/
  2001. mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
  2002. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  2003. /* Update shadow table */
  2004. mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
  2005. } else {
  2006. mvpp2_prs_init_from_hw(port->priv, &pe, tid);
  2007. }
  2008. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2009. mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
  2010. mvpp2_prs_hw_write(port->priv, &pe);
  2011. return 0;
  2012. }
  2013. int mvpp2_prs_hits(struct mvpp2 *priv, int index)
  2014. {
  2015. u32 val;
  2016. if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
  2017. return -EINVAL;
  2018. mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
  2019. val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
  2020. val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
  2021. return val;
  2022. }