ipa_endpoint.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2019-2024 Linaro Ltd.
  4. */
  5. #include <linux/bitfield.h>
  6. #include <linux/bits.h>
  7. #include <linux/device.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/if_rmnet.h>
  10. #include <linux/types.h>
  11. #include "gsi.h"
  12. #include "gsi_trans.h"
  13. #include "ipa.h"
  14. #include "ipa_cmd.h"
  15. #include "ipa_data.h"
  16. #include "ipa_endpoint.h"
  17. #include "ipa_gsi.h"
  18. #include "ipa_interrupt.h"
  19. #include "ipa_mem.h"
  20. #include "ipa_modem.h"
  21. #include "ipa_power.h"
  22. #include "ipa_reg.h"
  23. #include "ipa_table.h"
  24. #include "ipa_version.h"
  25. /* Hardware is told about receive buffers once a "batch" has been queued */
  26. #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
  27. /* The amount of RX buffer space consumed by standard skb overhead */
  28. #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  29. /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  30. #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
  31. #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
  32. /** enum ipa_status_opcode - IPA status opcode field hardware values */
  33. enum ipa_status_opcode { /* *Not* a bitmask */
  34. IPA_STATUS_OPCODE_PACKET = 1,
  35. IPA_STATUS_OPCODE_NEW_RULE_PACKET = 2,
  36. IPA_STATUS_OPCODE_DROPPED_PACKET = 4,
  37. IPA_STATUS_OPCODE_SUSPENDED_PACKET = 8,
  38. IPA_STATUS_OPCODE_LOG = 16,
  39. IPA_STATUS_OPCODE_DCMP = 32,
  40. IPA_STATUS_OPCODE_PACKET_2ND_PASS = 64,
  41. };
  42. /** enum ipa_status_exception - IPA status exception field hardware values */
  43. enum ipa_status_exception { /* *Not* a bitmask */
  44. /* 0 means no exception */
  45. IPA_STATUS_EXCEPTION_DEAGGR = 1,
  46. IPA_STATUS_EXCEPTION_IPTYPE = 4,
  47. IPA_STATUS_EXCEPTION_PACKET_LENGTH = 8,
  48. IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 16,
  49. IPA_STATUS_EXCEPTION_SW_FILTER = 32,
  50. IPA_STATUS_EXCEPTION_NAT = 64, /* IPv4 */
  51. IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK = 64, /* IPv6 */
  52. IPA_STATUS_EXCEPTION_UC = 128,
  53. IPA_STATUS_EXCEPTION_INVALID_ENDPOINT = 129,
  54. IPA_STATUS_EXCEPTION_HEADER_INSERT = 136,
  55. IPA_STATUS_EXCEPTION_CHEKCSUM = 229,
  56. };
  57. /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
  58. enum ipa_status_mask {
  59. IPA_STATUS_MASK_FRAG_PROCESS = BIT(0),
  60. IPA_STATUS_MASK_FILT_PROCESS = BIT(1),
  61. IPA_STATUS_MASK_NAT_PROCESS = BIT(2),
  62. IPA_STATUS_MASK_ROUTE_PROCESS = BIT(3),
  63. IPA_STATUS_MASK_TAG_VALID = BIT(4),
  64. IPA_STATUS_MASK_FRAGMENT = BIT(5),
  65. IPA_STATUS_MASK_FIRST_FRAGMENT = BIT(6),
  66. IPA_STATUS_MASK_V4 = BIT(7),
  67. IPA_STATUS_MASK_CKSUM_PROCESS = BIT(8),
  68. IPA_STATUS_MASK_AGGR_PROCESS = BIT(9),
  69. IPA_STATUS_MASK_DEST_EOT = BIT(10),
  70. IPA_STATUS_MASK_DEAGGR_PROCESS = BIT(11),
  71. IPA_STATUS_MASK_DEAGG_FIRST = BIT(12),
  72. IPA_STATUS_MASK_SRC_EOT = BIT(13),
  73. IPA_STATUS_MASK_PREV_EOT = BIT(14),
  74. IPA_STATUS_MASK_BYTE_LIMIT = BIT(15),
  75. };
  76. /* Special IPA filter/router rule field value indicating "rule miss" */
  77. #define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
  78. /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
  79. /* enum ipa_status_field_id - IPA packet status structure field identifiers */
  80. enum ipa_status_field_id {
  81. STATUS_OPCODE, /* enum ipa_status_opcode */
  82. STATUS_EXCEPTION, /* enum ipa_status_exception */
  83. STATUS_MASK, /* enum ipa_status_mask (bitmask) */
  84. STATUS_LENGTH,
  85. STATUS_SRC_ENDPOINT,
  86. STATUS_DST_ENDPOINT,
  87. STATUS_METADATA,
  88. STATUS_FILTER_LOCAL, /* Boolean */
  89. STATUS_FILTER_HASH, /* Boolean */
  90. STATUS_FILTER_GLOBAL, /* Boolean */
  91. STATUS_FILTER_RETAIN, /* Boolean */
  92. STATUS_FILTER_RULE_INDEX,
  93. STATUS_ROUTER_LOCAL, /* Boolean */
  94. STATUS_ROUTER_HASH, /* Boolean */
  95. STATUS_UCP, /* Boolean */
  96. STATUS_ROUTER_TABLE,
  97. STATUS_ROUTER_RULE_INDEX,
  98. STATUS_NAT_HIT, /* Boolean */
  99. STATUS_NAT_INDEX,
  100. STATUS_NAT_TYPE, /* enum ipa_nat_type */
  101. STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
  102. STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
  103. STATUS_SEQUENCE,
  104. STATUS_TIME_OF_DAY,
  105. STATUS_HEADER_LOCAL, /* Boolean */
  106. STATUS_HEADER_OFFSET,
  107. STATUS_FRAG_HIT, /* Boolean */
  108. STATUS_FRAG_RULE_INDEX,
  109. };
  110. /* Size in bytes of an IPA packet status structure */
  111. #define IPA_STATUS_SIZE sizeof(__le32[8])
  112. /* IPA status structure decoder; looks up field values for a structure */
  113. static u32 ipa_status_extract(struct ipa *ipa, const void *data,
  114. enum ipa_status_field_id field)
  115. {
  116. enum ipa_version version = ipa->version;
  117. const __le32 *word = data;
  118. switch (field) {
  119. case STATUS_OPCODE:
  120. return le32_get_bits(word[0], GENMASK(7, 0));
  121. case STATUS_EXCEPTION:
  122. return le32_get_bits(word[0], GENMASK(15, 8));
  123. case STATUS_MASK:
  124. return le32_get_bits(word[0], GENMASK(31, 16));
  125. case STATUS_LENGTH:
  126. return le32_get_bits(word[1], GENMASK(15, 0));
  127. case STATUS_SRC_ENDPOINT:
  128. if (version < IPA_VERSION_5_0)
  129. return le32_get_bits(word[1], GENMASK(20, 16));
  130. return le32_get_bits(word[1], GENMASK(23, 16));
  131. /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
  132. /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
  133. case STATUS_DST_ENDPOINT:
  134. if (version < IPA_VERSION_5_0)
  135. return le32_get_bits(word[1], GENMASK(28, 24));
  136. return le32_get_bits(word[7], GENMASK(23, 16));
  137. /* Status word 1, bits 29-31 are reserved */
  138. case STATUS_METADATA:
  139. return le32_to_cpu(word[2]);
  140. case STATUS_FILTER_LOCAL:
  141. return le32_get_bits(word[3], GENMASK(0, 0));
  142. case STATUS_FILTER_HASH:
  143. return le32_get_bits(word[3], GENMASK(1, 1));
  144. case STATUS_FILTER_GLOBAL:
  145. return le32_get_bits(word[3], GENMASK(2, 2));
  146. case STATUS_FILTER_RETAIN:
  147. return le32_get_bits(word[3], GENMASK(3, 3));
  148. case STATUS_FILTER_RULE_INDEX:
  149. return le32_get_bits(word[3], GENMASK(13, 4));
  150. /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
  151. case STATUS_ROUTER_LOCAL:
  152. if (version < IPA_VERSION_5_0)
  153. return le32_get_bits(word[3], GENMASK(14, 14));
  154. return le32_get_bits(word[1], GENMASK(27, 27));
  155. case STATUS_ROUTER_HASH:
  156. if (version < IPA_VERSION_5_0)
  157. return le32_get_bits(word[3], GENMASK(15, 15));
  158. return le32_get_bits(word[1], GENMASK(28, 28));
  159. case STATUS_UCP:
  160. if (version < IPA_VERSION_5_0)
  161. return le32_get_bits(word[3], GENMASK(16, 16));
  162. return le32_get_bits(word[7], GENMASK(31, 31));
  163. case STATUS_ROUTER_TABLE:
  164. if (version < IPA_VERSION_5_0)
  165. return le32_get_bits(word[3], GENMASK(21, 17));
  166. return le32_get_bits(word[3], GENMASK(21, 14));
  167. case STATUS_ROUTER_RULE_INDEX:
  168. return le32_get_bits(word[3], GENMASK(31, 22));
  169. case STATUS_NAT_HIT:
  170. return le32_get_bits(word[4], GENMASK(0, 0));
  171. case STATUS_NAT_INDEX:
  172. return le32_get_bits(word[4], GENMASK(13, 1));
  173. case STATUS_NAT_TYPE:
  174. return le32_get_bits(word[4], GENMASK(15, 14));
  175. case STATUS_TAG_LOW32:
  176. return le32_get_bits(word[4], GENMASK(31, 16)) |
  177. (le32_get_bits(word[5], GENMASK(15, 0)) << 16);
  178. case STATUS_TAG_HIGH16:
  179. return le32_get_bits(word[5], GENMASK(31, 16));
  180. case STATUS_SEQUENCE:
  181. return le32_get_bits(word[6], GENMASK(7, 0));
  182. case STATUS_TIME_OF_DAY:
  183. return le32_get_bits(word[6], GENMASK(31, 8));
  184. case STATUS_HEADER_LOCAL:
  185. return le32_get_bits(word[7], GENMASK(0, 0));
  186. case STATUS_HEADER_OFFSET:
  187. return le32_get_bits(word[7], GENMASK(10, 1));
  188. case STATUS_FRAG_HIT:
  189. return le32_get_bits(word[7], GENMASK(11, 11));
  190. case STATUS_FRAG_RULE_INDEX:
  191. return le32_get_bits(word[7], GENMASK(15, 12));
  192. /* Status word 7, bits 16-30 are reserved */
  193. /* Status word 7, bit 31 is reserved (not IPA v5.0+) */
  194. default:
  195. WARN(true, "%s: bad field_id %u\n", __func__, field);
  196. return 0;
  197. }
  198. }
  199. /* Compute the aggregation size value to use for a given buffer size */
  200. static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
  201. {
  202. /* A hard aggregation limit will not be crossed; aggregation closes
  203. * if saving incoming data would cross the hard byte limit boundary.
  204. *
  205. * With a soft limit, aggregation closes *after* the size boundary
  206. * has been crossed. In that case the limit must leave enough space
  207. * after that limit to receive a full MTU of data plus overhead.
  208. */
  209. if (!aggr_hard_limit)
  210. rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
  211. /* The byte limit is encoded as a number of kilobytes */
  212. return rx_buffer_size / SZ_1K;
  213. }
  214. static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
  215. const struct ipa_gsi_endpoint_data *all_data,
  216. const struct ipa_gsi_endpoint_data *data)
  217. {
  218. const struct ipa_gsi_endpoint_data *other_data;
  219. enum ipa_endpoint_name other_name;
  220. struct device *dev = ipa->dev;
  221. if (ipa_gsi_endpoint_data_empty(data))
  222. return true;
  223. if (!data->toward_ipa) {
  224. const struct ipa_endpoint_rx *rx_config;
  225. const struct reg *reg;
  226. u32 buffer_size;
  227. u32 aggr_size;
  228. u32 limit;
  229. if (data->endpoint.filter_support) {
  230. dev_err(dev, "filtering not supported for "
  231. "RX endpoint %u\n",
  232. data->endpoint_id);
  233. return false;
  234. }
  235. /* Nothing more to check for non-AP RX */
  236. if (data->ee_id != GSI_EE_AP)
  237. return true;
  238. rx_config = &data->endpoint.config.rx;
  239. /* The buffer size must hold an MTU plus overhead */
  240. buffer_size = rx_config->buffer_size;
  241. limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
  242. if (buffer_size < limit) {
  243. dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
  244. data->endpoint_id, buffer_size, limit);
  245. return false;
  246. }
  247. if (!data->endpoint.config.aggregation) {
  248. bool result = true;
  249. /* No aggregation; check for bogus aggregation data */
  250. if (rx_config->aggr_time_limit) {
  251. dev_err(dev,
  252. "time limit with no aggregation for RX endpoint %u\n",
  253. data->endpoint_id);
  254. result = false;
  255. }
  256. if (rx_config->aggr_hard_limit) {
  257. dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
  258. data->endpoint_id);
  259. result = false;
  260. }
  261. if (rx_config->aggr_close_eof) {
  262. dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
  263. data->endpoint_id);
  264. result = false;
  265. }
  266. return result; /* Nothing more to check */
  267. }
  268. /* For an endpoint supporting receive aggregation, the byte
  269. * limit defines the point at which aggregation closes. This
  270. * check ensures the receive buffer size doesn't result in a
  271. * limit that exceeds what's representable in the aggregation
  272. * byte limit field.
  273. */
  274. aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
  275. rx_config->aggr_hard_limit);
  276. reg = ipa_reg(ipa, ENDP_INIT_AGGR);
  277. limit = reg_field_max(reg, BYTE_LIMIT);
  278. if (aggr_size > limit) {
  279. dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
  280. data->endpoint_id, aggr_size, limit);
  281. return false;
  282. }
  283. return true; /* Nothing more to check for RX */
  284. }
  285. /* Starting with IPA v4.5 sequencer replication is obsolete */
  286. if (ipa->version >= IPA_VERSION_4_5) {
  287. if (data->endpoint.config.tx.seq_rep_type) {
  288. dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
  289. data->endpoint_id);
  290. return false;
  291. }
  292. }
  293. if (data->endpoint.config.status_enable) {
  294. other_name = data->endpoint.config.tx.status_endpoint;
  295. if (other_name >= count) {
  296. dev_err(dev, "status endpoint name %u out of range "
  297. "for endpoint %u\n",
  298. other_name, data->endpoint_id);
  299. return false;
  300. }
  301. /* Status endpoint must be defined... */
  302. other_data = &all_data[other_name];
  303. if (ipa_gsi_endpoint_data_empty(other_data)) {
  304. dev_err(dev, "DMA endpoint name %u undefined "
  305. "for endpoint %u\n",
  306. other_name, data->endpoint_id);
  307. return false;
  308. }
  309. /* ...and has to be an RX endpoint... */
  310. if (other_data->toward_ipa) {
  311. dev_err(dev,
  312. "status endpoint for endpoint %u not RX\n",
  313. data->endpoint_id);
  314. return false;
  315. }
  316. /* ...and if it's to be an AP endpoint... */
  317. if (other_data->ee_id == GSI_EE_AP) {
  318. /* ...make sure it has status enabled. */
  319. if (!other_data->endpoint.config.status_enable) {
  320. dev_err(dev,
  321. "status not enabled for endpoint %u\n",
  322. other_data->endpoint_id);
  323. return false;
  324. }
  325. }
  326. }
  327. if (data->endpoint.config.dma_mode) {
  328. other_name = data->endpoint.config.dma_endpoint;
  329. if (other_name >= count) {
  330. dev_err(dev, "DMA endpoint name %u out of range "
  331. "for endpoint %u\n",
  332. other_name, data->endpoint_id);
  333. return false;
  334. }
  335. other_data = &all_data[other_name];
  336. if (ipa_gsi_endpoint_data_empty(other_data)) {
  337. dev_err(dev, "DMA endpoint name %u undefined "
  338. "for endpoint %u\n",
  339. other_name, data->endpoint_id);
  340. return false;
  341. }
  342. }
  343. return true;
  344. }
  345. /* Validate endpoint configuration data. Return max defined endpoint ID */
  346. static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
  347. const struct ipa_gsi_endpoint_data *data)
  348. {
  349. const struct ipa_gsi_endpoint_data *dp = data;
  350. struct device *dev = ipa->dev;
  351. enum ipa_endpoint_name name;
  352. u32 max;
  353. if (count > IPA_ENDPOINT_COUNT) {
  354. dev_err(dev, "too many endpoints specified (%u > %u)\n",
  355. count, IPA_ENDPOINT_COUNT);
  356. return 0;
  357. }
  358. /* Make sure needed endpoints have defined data */
  359. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
  360. dev_err(dev, "command TX endpoint not defined\n");
  361. return 0;
  362. }
  363. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
  364. dev_err(dev, "LAN RX endpoint not defined\n");
  365. return 0;
  366. }
  367. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
  368. dev_err(dev, "AP->modem TX endpoint not defined\n");
  369. return 0;
  370. }
  371. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
  372. dev_err(dev, "AP<-modem RX endpoint not defined\n");
  373. return 0;
  374. }
  375. max = 0;
  376. for (name = 0; name < count; name++, dp++) {
  377. if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
  378. return 0;
  379. max = max_t(u32, max, dp->endpoint_id);
  380. }
  381. return max;
  382. }
  383. /* Allocate a transaction to use on a non-command endpoint */
  384. static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
  385. u32 tre_count)
  386. {
  387. struct gsi *gsi = &endpoint->ipa->gsi;
  388. u32 channel_id = endpoint->channel_id;
  389. enum dma_data_direction direction;
  390. direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  391. return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
  392. }
  393. /* suspend_delay represents suspend for RX, delay for TX endpoints.
  394. * Note that suspend is not supported starting with IPA v4.0, and
  395. * delay mode should not be used starting with IPA v4.2.
  396. */
  397. static bool
  398. ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
  399. {
  400. struct ipa *ipa = endpoint->ipa;
  401. const struct reg *reg;
  402. u32 field_id;
  403. u32 offset;
  404. bool state;
  405. u32 mask;
  406. u32 val;
  407. if (endpoint->toward_ipa)
  408. WARN_ON(ipa->version >= IPA_VERSION_4_2);
  409. else
  410. WARN_ON(ipa->version >= IPA_VERSION_4_0);
  411. reg = ipa_reg(ipa, ENDP_INIT_CTRL);
  412. offset = reg_n_offset(reg, endpoint->endpoint_id);
  413. val = ioread32(ipa->reg_virt + offset);
  414. field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
  415. mask = reg_bit(reg, field_id);
  416. state = !!(val & mask);
  417. /* Don't bother if it's already in the requested state */
  418. if (suspend_delay != state) {
  419. val ^= mask;
  420. iowrite32(val, ipa->reg_virt + offset);
  421. }
  422. return state;
  423. }
  424. /* We don't care what the previous state was for delay mode */
  425. static void
  426. ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
  427. {
  428. /* Delay mode should not be used for IPA v4.2+ */
  429. WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
  430. WARN_ON(!endpoint->toward_ipa);
  431. (void)ipa_endpoint_init_ctrl(endpoint, enable);
  432. }
  433. static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
  434. {
  435. u32 endpoint_id = endpoint->endpoint_id;
  436. struct ipa *ipa = endpoint->ipa;
  437. u32 unit = endpoint_id / 32;
  438. const struct reg *reg;
  439. u32 val;
  440. WARN_ON(!test_bit(endpoint_id, ipa->available));
  441. reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
  442. val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
  443. return !!(val & BIT(endpoint_id % 32));
  444. }
  445. static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
  446. {
  447. u32 endpoint_id = endpoint->endpoint_id;
  448. u32 mask = BIT(endpoint_id % 32);
  449. struct ipa *ipa = endpoint->ipa;
  450. u32 unit = endpoint_id / 32;
  451. const struct reg *reg;
  452. WARN_ON(!test_bit(endpoint_id, ipa->available));
  453. reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
  454. iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
  455. }
  456. /**
  457. * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
  458. * @endpoint: Endpoint on which to emulate a suspend
  459. *
  460. * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
  461. * with an open aggregation frame. This is to work around a hardware
  462. * issue in IPA version 3.5.1 where the suspend interrupt will not be
  463. * generated when it should be.
  464. */
  465. static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
  466. {
  467. struct ipa *ipa = endpoint->ipa;
  468. if (!endpoint->config.aggregation)
  469. return;
  470. /* Nothing to do if the endpoint doesn't have aggregation open */
  471. if (!ipa_endpoint_aggr_active(endpoint))
  472. return;
  473. /* Force close aggregation */
  474. ipa_endpoint_force_close(endpoint);
  475. ipa_interrupt_simulate_suspend(ipa->interrupt);
  476. }
  477. /* Returns previous suspend state (true means suspend was enabled) */
  478. static bool
  479. ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
  480. {
  481. bool suspended;
  482. if (endpoint->ipa->version >= IPA_VERSION_4_0)
  483. return enable; /* For IPA v4.0+, no change made */
  484. WARN_ON(endpoint->toward_ipa);
  485. suspended = ipa_endpoint_init_ctrl(endpoint, enable);
  486. /* A client suspended with an open aggregation frame will not
  487. * generate a SUSPEND IPA interrupt. If enabling suspend, have
  488. * ipa_endpoint_suspend_aggr() handle this.
  489. */
  490. if (enable && !suspended)
  491. ipa_endpoint_suspend_aggr(endpoint);
  492. return suspended;
  493. }
  494. /* Put all modem RX endpoints into suspend mode, and stop transmission
  495. * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
  496. * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
  497. * control instead.
  498. */
  499. void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
  500. {
  501. u32 endpoint_id = 0;
  502. while (endpoint_id < ipa->endpoint_count) {
  503. struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
  504. if (endpoint->ee_id != GSI_EE_MODEM)
  505. continue;
  506. if (!endpoint->toward_ipa)
  507. (void)ipa_endpoint_program_suspend(endpoint, enable);
  508. else if (ipa->version < IPA_VERSION_4_2)
  509. ipa_endpoint_program_delay(endpoint, enable);
  510. else
  511. gsi_modem_channel_flow_control(&ipa->gsi,
  512. endpoint->channel_id,
  513. enable);
  514. }
  515. }
  516. /* Reset all modem endpoints to use the default exception endpoint */
  517. int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
  518. {
  519. struct gsi_trans *trans;
  520. u32 endpoint_id;
  521. u32 count;
  522. /* We need one command per modem TX endpoint, plus the commands
  523. * that clear the pipeline.
  524. */
  525. count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
  526. trans = ipa_cmd_trans_alloc(ipa, count);
  527. if (!trans) {
  528. dev_err(ipa->dev,
  529. "no transaction to reset modem exception endpoints\n");
  530. return -EBUSY;
  531. }
  532. for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
  533. struct ipa_endpoint *endpoint;
  534. const struct reg *reg;
  535. u32 offset;
  536. /* We only reset modem TX endpoints */
  537. endpoint = &ipa->endpoint[endpoint_id];
  538. if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
  539. continue;
  540. reg = ipa_reg(ipa, ENDP_STATUS);
  541. offset = reg_n_offset(reg, endpoint_id);
  542. /* Value written is 0, and all bits are updated. That
  543. * means status is disabled on the endpoint, and as a
  544. * result all other fields in the register are ignored.
  545. */
  546. ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
  547. }
  548. ipa_cmd_pipeline_clear_add(trans);
  549. gsi_trans_commit_wait(trans);
  550. ipa_cmd_pipeline_clear_wait(ipa);
  551. return 0;
  552. }
  553. static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
  554. {
  555. u32 endpoint_id = endpoint->endpoint_id;
  556. struct ipa *ipa = endpoint->ipa;
  557. enum ipa_cs_offload_en enabled;
  558. const struct reg *reg;
  559. u32 val = 0;
  560. reg = ipa_reg(ipa, ENDP_INIT_CFG);
  561. /* FRAG_OFFLOAD_EN is 0 */
  562. if (endpoint->config.checksum) {
  563. enum ipa_version version = ipa->version;
  564. if (endpoint->toward_ipa) {
  565. u32 off;
  566. /* Checksum header offset is in 4-byte units */
  567. off = sizeof(struct rmnet_map_header) / sizeof(u32);
  568. val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
  569. enabled = version < IPA_VERSION_4_5
  570. ? IPA_CS_OFFLOAD_UL
  571. : IPA_CS_OFFLOAD_INLINE;
  572. } else {
  573. enabled = version < IPA_VERSION_4_5
  574. ? IPA_CS_OFFLOAD_DL
  575. : IPA_CS_OFFLOAD_INLINE;
  576. }
  577. } else {
  578. enabled = IPA_CS_OFFLOAD_NONE;
  579. }
  580. val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
  581. /* CS_GEN_QMB_MASTER_SEL is 0 */
  582. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  583. }
  584. static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
  585. {
  586. u32 endpoint_id = endpoint->endpoint_id;
  587. struct ipa *ipa = endpoint->ipa;
  588. const struct reg *reg;
  589. u32 val;
  590. if (!endpoint->toward_ipa)
  591. return;
  592. reg = ipa_reg(ipa, ENDP_INIT_NAT);
  593. val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
  594. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  595. }
  596. static u32
  597. ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
  598. {
  599. u32 header_size = sizeof(struct rmnet_map_header);
  600. /* Without checksum offload, we just have the MAP header */
  601. if (!endpoint->config.checksum)
  602. return header_size;
  603. if (version < IPA_VERSION_4_5) {
  604. /* Checksum header inserted for AP TX endpoints only */
  605. if (endpoint->toward_ipa)
  606. header_size += sizeof(struct rmnet_map_ul_csum_header);
  607. } else {
  608. /* Checksum header is used in both directions */
  609. header_size += sizeof(struct rmnet_map_v5_csum_header);
  610. }
  611. return header_size;
  612. }
  613. /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
  614. static u32 ipa_header_size_encode(enum ipa_version version,
  615. const struct reg *reg, u32 header_size)
  616. {
  617. u32 field_max = reg_field_max(reg, HDR_LEN);
  618. u32 val;
  619. /* We know field_max can be used as a mask (2^n - 1) */
  620. val = reg_encode(reg, HDR_LEN, header_size & field_max);
  621. if (version < IPA_VERSION_4_5) {
  622. WARN_ON(header_size > field_max);
  623. return val;
  624. }
  625. /* IPA v4.5 adds a few more most-significant bits */
  626. header_size >>= hweight32(field_max);
  627. WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
  628. val |= reg_encode(reg, HDR_LEN_MSB, header_size);
  629. return val;
  630. }
  631. /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
  632. static u32 ipa_metadata_offset_encode(enum ipa_version version,
  633. const struct reg *reg, u32 offset)
  634. {
  635. u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
  636. u32 val;
  637. /* We know field_max can be used as a mask (2^n - 1) */
  638. val = reg_encode(reg, HDR_OFST_METADATA, offset);
  639. if (version < IPA_VERSION_4_5) {
  640. WARN_ON(offset > field_max);
  641. return val;
  642. }
  643. /* IPA v4.5 adds a few more most-significant bits */
  644. offset >>= hweight32(field_max);
  645. WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
  646. val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
  647. return val;
  648. }
  649. /**
  650. * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
  651. * @endpoint: Endpoint pointer
  652. *
  653. * We program QMAP endpoints so each packet received is preceded by a QMAP
  654. * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
  655. * packet size field, and we have the IPA hardware populate both for each
  656. * received packet. The header is configured (in the HDR_EXT register)
  657. * to use big endian format.
  658. *
  659. * The packet size is written into the QMAP header's pkt_len field. That
  660. * location is defined here using the HDR_OFST_PKT_SIZE field.
  661. *
  662. * The mux_id comes from a 4-byte metadata value supplied with each packet
  663. * by the modem. It is *not* a QMAP header, but it does contain the mux_id
  664. * value that we want, in its low-order byte. A bitmask defined in the
  665. * endpoint's METADATA_MASK register defines which byte within the modem
  666. * metadata contains the mux_id. And the OFST_METADATA field programmed
  667. * here indicates where the extracted byte should be placed within the QMAP
  668. * header.
  669. */
  670. static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
  671. {
  672. u32 endpoint_id = endpoint->endpoint_id;
  673. struct ipa *ipa = endpoint->ipa;
  674. const struct reg *reg;
  675. u32 val = 0;
  676. reg = ipa_reg(ipa, ENDP_INIT_HDR);
  677. if (endpoint->config.qmap) {
  678. enum ipa_version version = ipa->version;
  679. size_t header_size;
  680. header_size = ipa_qmap_header_size(version, endpoint);
  681. val = ipa_header_size_encode(version, reg, header_size);
  682. /* Define how to fill fields in a received QMAP header */
  683. if (!endpoint->toward_ipa) {
  684. u32 off; /* Field offset within header */
  685. /* Where IPA will write the metadata value */
  686. off = offsetof(struct rmnet_map_header, mux_id);
  687. val |= ipa_metadata_offset_encode(version, reg, off);
  688. /* Where IPA will write the length */
  689. off = offsetof(struct rmnet_map_header, pkt_len);
  690. /* Upper bits are stored in HDR_EXT with IPA v4.5 */
  691. if (version >= IPA_VERSION_4_5)
  692. off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
  693. val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
  694. val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
  695. }
  696. /* For QMAP TX, metadata offset is 0 (modem assumes this) */
  697. val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
  698. /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
  699. /* HDR_A5_MUX is 0 */
  700. /* HDR_LEN_INC_DEAGG_HDR is 0 */
  701. /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
  702. }
  703. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  704. }
  705. static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
  706. {
  707. u32 pad_align = endpoint->config.rx.pad_align;
  708. u32 endpoint_id = endpoint->endpoint_id;
  709. struct ipa *ipa = endpoint->ipa;
  710. const struct reg *reg;
  711. u32 val = 0;
  712. reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
  713. if (endpoint->config.qmap) {
  714. /* We have a header, so we must specify its endianness */
  715. val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
  716. /* A QMAP header contains a 6 bit pad field at offset 0.
  717. * The RMNet driver assumes this field is meaningful in
  718. * packets it receives, and assumes the header's payload
  719. * length includes that padding. The RMNet driver does
  720. * *not* pad packets it sends, however, so the pad field
  721. * (although 0) should be ignored.
  722. */
  723. if (!endpoint->toward_ipa) {
  724. val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
  725. /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
  726. val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
  727. /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
  728. }
  729. }
  730. /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
  731. if (!endpoint->toward_ipa)
  732. val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
  733. /* IPA v4.5 adds some most-significant bits to a few fields,
  734. * two of which are defined in the HDR (not HDR_EXT) register.
  735. */
  736. if (ipa->version >= IPA_VERSION_4_5) {
  737. /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
  738. if (endpoint->config.qmap && !endpoint->toward_ipa) {
  739. u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
  740. u32 off; /* Field offset within header */
  741. off = offsetof(struct rmnet_map_header, pkt_len);
  742. /* Low bits are in the ENDP_INIT_HDR register */
  743. off >>= hweight32(mask);
  744. val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
  745. /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
  746. }
  747. }
  748. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  749. }
  750. static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
  751. {
  752. u32 endpoint_id = endpoint->endpoint_id;
  753. struct ipa *ipa = endpoint->ipa;
  754. const struct reg *reg;
  755. u32 val = 0;
  756. u32 offset;
  757. if (endpoint->toward_ipa)
  758. return; /* Register not valid for TX endpoints */
  759. reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
  760. offset = reg_n_offset(reg, endpoint_id);
  761. /* Note that HDR_ENDIANNESS indicates big endian header fields */
  762. if (endpoint->config.qmap)
  763. val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
  764. iowrite32(val, ipa->reg_virt + offset);
  765. }
  766. static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
  767. {
  768. struct ipa *ipa = endpoint->ipa;
  769. const struct reg *reg;
  770. u32 offset;
  771. u32 val;
  772. if (!endpoint->toward_ipa)
  773. return; /* Register not valid for RX endpoints */
  774. reg = ipa_reg(ipa, ENDP_INIT_MODE);
  775. if (endpoint->config.dma_mode) {
  776. enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
  777. u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
  778. val = reg_encode(reg, ENDP_MODE, IPA_DMA);
  779. val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
  780. } else {
  781. val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
  782. }
  783. /* All other bits unspecified (and 0) */
  784. offset = reg_n_offset(reg, endpoint->endpoint_id);
  785. iowrite32(val, ipa->reg_virt + offset);
  786. }
  787. /* For IPA v4.5+, times are expressed using Qtime. A time is represented
  788. * at one of several available granularities, which are configured in
  789. * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
  790. * generators are set up with different "tick" periods. A Qtime value
  791. * encodes a tick count along with an indication of a pulse generator
  792. * (which has a fixed tick period). Two pulse generators are always
  793. * available to the AP; a third is available starting with IPA v5.0.
  794. * This function determines which pulse generator most accurately
  795. * represents the time period provided, and returns the tick count to
  796. * use to represent that time.
  797. */
  798. static u32
  799. ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
  800. {
  801. u32 which = 0;
  802. u32 ticks;
  803. /* Pulse generator 0 has 100 microsecond granularity */
  804. ticks = DIV_ROUND_CLOSEST(microseconds, 100);
  805. if (ticks <= max)
  806. goto out;
  807. /* Pulse generator 1 has millisecond granularity */
  808. which = 1;
  809. ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
  810. if (ticks <= max)
  811. goto out;
  812. if (ipa->version >= IPA_VERSION_5_0) {
  813. /* Pulse generator 2 has 10 millisecond granularity */
  814. which = 2;
  815. ticks = DIV_ROUND_CLOSEST(microseconds, 100);
  816. }
  817. WARN_ON(ticks > max);
  818. out:
  819. *select = which;
  820. return ticks;
  821. }
  822. /* Encode the aggregation timer limit (microseconds) based on IPA version */
  823. static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
  824. u32 microseconds)
  825. {
  826. u32 ticks;
  827. u32 max;
  828. if (!microseconds)
  829. return 0; /* Nothing to compute if time limit is 0 */
  830. max = reg_field_max(reg, TIME_LIMIT);
  831. if (ipa->version >= IPA_VERSION_4_5) {
  832. u32 select;
  833. ticks = ipa_qtime_val(ipa, microseconds, max, &select);
  834. return reg_encode(reg, AGGR_GRAN_SEL, select) |
  835. reg_encode(reg, TIME_LIMIT, ticks);
  836. }
  837. /* We program aggregation granularity in ipa_hardware_config() */
  838. ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
  839. WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
  840. microseconds, max * IPA_AGGR_GRANULARITY);
  841. return reg_encode(reg, TIME_LIMIT, ticks);
  842. }
  843. static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
  844. {
  845. u32 endpoint_id = endpoint->endpoint_id;
  846. struct ipa *ipa = endpoint->ipa;
  847. const struct reg *reg;
  848. u32 val = 0;
  849. reg = ipa_reg(ipa, ENDP_INIT_AGGR);
  850. if (endpoint->config.aggregation) {
  851. if (!endpoint->toward_ipa) {
  852. const struct ipa_endpoint_rx *rx_config;
  853. u32 buffer_size;
  854. u32 limit;
  855. rx_config = &endpoint->config.rx;
  856. val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
  857. val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
  858. buffer_size = rx_config->buffer_size;
  859. limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
  860. rx_config->aggr_hard_limit);
  861. val |= reg_encode(reg, BYTE_LIMIT, limit);
  862. limit = rx_config->aggr_time_limit;
  863. val |= aggr_time_limit_encode(ipa, reg, limit);
  864. /* AGGR_PKT_LIMIT is 0 (unlimited) */
  865. if (rx_config->aggr_close_eof)
  866. val |= reg_bit(reg, SW_EOF_ACTIVE);
  867. } else {
  868. val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
  869. val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
  870. /* other fields ignored */
  871. }
  872. /* AGGR_FORCE_CLOSE is 0 */
  873. /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
  874. } else {
  875. val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
  876. /* other fields ignored */
  877. }
  878. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  879. }
  880. /* The head-of-line blocking timer is defined as a tick count. For
  881. * IPA version 4.5 the tick count is based on the Qtimer, which is
  882. * derived from the 19.2 MHz SoC XO clock. For older IPA versions
  883. * each tick represents 128 cycles of the IPA core clock.
  884. *
  885. * Return the encoded value representing the timeout period provided
  886. * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
  887. */
  888. static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
  889. u32 microseconds)
  890. {
  891. u32 width;
  892. u32 scale;
  893. u64 ticks;
  894. u64 rate;
  895. u32 high;
  896. u32 val;
  897. if (!microseconds)
  898. return 0; /* Nothing to compute if timer period is 0 */
  899. if (ipa->version >= IPA_VERSION_4_5) {
  900. u32 max = reg_field_max(reg, TIMER_LIMIT);
  901. u32 select;
  902. u32 ticks;
  903. ticks = ipa_qtime_val(ipa, microseconds, max, &select);
  904. return reg_encode(reg, TIMER_GRAN_SEL, 1) |
  905. reg_encode(reg, TIMER_LIMIT, ticks);
  906. }
  907. /* Use 64 bit arithmetic to avoid overflow */
  908. rate = ipa_core_clock_rate(ipa);
  909. ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
  910. /* We still need the result to fit into the field */
  911. WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
  912. /* IPA v3.5.1 through v4.1 just record the tick count */
  913. if (ipa->version < IPA_VERSION_4_2)
  914. return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
  915. /* For IPA v4.2, the tick count is represented by base and
  916. * scale fields within the 32-bit timer register, where:
  917. * ticks = base << scale;
  918. * The best precision is achieved when the base value is as
  919. * large as possible. Find the highest set bit in the tick
  920. * count, and extract the number of bits in the base field
  921. * such that high bit is included.
  922. */
  923. high = fls(ticks); /* 1..32 (or warning above) */
  924. width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
  925. scale = high > width ? high - width : 0;
  926. if (scale) {
  927. /* If we're scaling, round up to get a closer result */
  928. ticks += 1 << (scale - 1);
  929. /* High bit was set, so rounding might have affected it */
  930. if (fls(ticks) != high)
  931. scale++;
  932. }
  933. val = reg_encode(reg, TIMER_SCALE, scale);
  934. val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
  935. return val;
  936. }
  937. /* If microseconds is 0, timeout is immediate */
  938. static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
  939. u32 microseconds)
  940. {
  941. u32 endpoint_id = endpoint->endpoint_id;
  942. struct ipa *ipa = endpoint->ipa;
  943. const struct reg *reg;
  944. u32 val;
  945. /* This should only be changed when HOL_BLOCK_EN is disabled */
  946. reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
  947. val = hol_block_timer_encode(ipa, reg, microseconds);
  948. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  949. }
  950. static void
  951. ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
  952. {
  953. u32 endpoint_id = endpoint->endpoint_id;
  954. struct ipa *ipa = endpoint->ipa;
  955. const struct reg *reg;
  956. u32 offset;
  957. u32 val;
  958. reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
  959. offset = reg_n_offset(reg, endpoint_id);
  960. val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
  961. iowrite32(val, ipa->reg_virt + offset);
  962. /* When enabling, the register must be written twice for IPA v4.5+ */
  963. if (enable && ipa->version >= IPA_VERSION_4_5)
  964. iowrite32(val, ipa->reg_virt + offset);
  965. }
  966. /* Assumes HOL_BLOCK is in disabled state */
  967. static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
  968. u32 microseconds)
  969. {
  970. ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
  971. ipa_endpoint_init_hol_block_en(endpoint, true);
  972. }
  973. static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
  974. {
  975. ipa_endpoint_init_hol_block_en(endpoint, false);
  976. }
  977. void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
  978. {
  979. u32 endpoint_id = 0;
  980. while (endpoint_id < ipa->endpoint_count) {
  981. struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
  982. if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
  983. continue;
  984. ipa_endpoint_init_hol_block_disable(endpoint);
  985. ipa_endpoint_init_hol_block_enable(endpoint, 0);
  986. }
  987. }
  988. static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
  989. {
  990. u32 endpoint_id = endpoint->endpoint_id;
  991. struct ipa *ipa = endpoint->ipa;
  992. const struct reg *reg;
  993. u32 val = 0;
  994. if (!endpoint->toward_ipa)
  995. return; /* Register not valid for RX endpoints */
  996. reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
  997. /* DEAGGR_HDR_LEN is 0 */
  998. /* PACKET_OFFSET_VALID is 0 */
  999. /* PACKET_OFFSET_LOCATION is ignored (not valid) */
  1000. /* MAX_PACKET_LEN is 0 (not enforced) */
  1001. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  1002. }
  1003. static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
  1004. {
  1005. u32 resource_group = endpoint->config.resource_group;
  1006. u32 endpoint_id = endpoint->endpoint_id;
  1007. struct ipa *ipa = endpoint->ipa;
  1008. const struct reg *reg;
  1009. u32 val;
  1010. reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
  1011. val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
  1012. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  1013. }
  1014. static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
  1015. {
  1016. u32 endpoint_id = endpoint->endpoint_id;
  1017. struct ipa *ipa = endpoint->ipa;
  1018. const struct reg *reg;
  1019. u32 val;
  1020. if (!endpoint->toward_ipa)
  1021. return; /* Register not valid for RX endpoints */
  1022. reg = ipa_reg(ipa, ENDP_INIT_SEQ);
  1023. /* Low-order byte configures primary packet processing */
  1024. val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
  1025. /* Second byte (if supported) configures replicated packet processing */
  1026. if (ipa->version < IPA_VERSION_4_5)
  1027. val |= reg_encode(reg, SEQ_REP_TYPE,
  1028. endpoint->config.tx.seq_rep_type);
  1029. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  1030. }
  1031. /**
  1032. * ipa_endpoint_skb_tx() - Transmit a socket buffer
  1033. * @endpoint: Endpoint pointer
  1034. * @skb: Socket buffer to send
  1035. *
  1036. * Returns: 0 if successful, or a negative error code
  1037. */
  1038. int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
  1039. {
  1040. struct gsi_trans *trans;
  1041. u32 nr_frags;
  1042. int ret;
  1043. /* Make sure source endpoint's TLV FIFO has enough entries to
  1044. * hold the linear portion of the skb and all its fragments.
  1045. * If not, see if we can linearize it before giving up.
  1046. */
  1047. nr_frags = skb_shinfo(skb)->nr_frags;
  1048. if (nr_frags > endpoint->skb_frag_max) {
  1049. if (skb_linearize(skb))
  1050. return -E2BIG;
  1051. nr_frags = 0;
  1052. }
  1053. trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
  1054. if (!trans)
  1055. return -EBUSY;
  1056. ret = gsi_trans_skb_add(trans, skb);
  1057. if (ret)
  1058. goto err_trans_free;
  1059. trans->data = skb; /* transaction owns skb now */
  1060. gsi_trans_commit(trans, !netdev_xmit_more());
  1061. return 0;
  1062. err_trans_free:
  1063. gsi_trans_free(trans);
  1064. return -ENOMEM;
  1065. }
  1066. static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
  1067. {
  1068. u32 endpoint_id = endpoint->endpoint_id;
  1069. struct ipa *ipa = endpoint->ipa;
  1070. const struct reg *reg;
  1071. u32 val = 0;
  1072. reg = ipa_reg(ipa, ENDP_STATUS);
  1073. if (endpoint->config.status_enable) {
  1074. val |= reg_bit(reg, STATUS_EN);
  1075. if (endpoint->toward_ipa) {
  1076. enum ipa_endpoint_name name;
  1077. u32 status_endpoint_id;
  1078. name = endpoint->config.tx.status_endpoint;
  1079. status_endpoint_id = ipa->name_map[name]->endpoint_id;
  1080. val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
  1081. }
  1082. /* STATUS_LOCATION is 0, meaning IPA packet status
  1083. * precedes the packet (not present for IPA v4.5+)
  1084. */
  1085. /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
  1086. }
  1087. iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
  1088. }
  1089. static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
  1090. struct gsi_trans *trans)
  1091. {
  1092. struct page *page;
  1093. u32 buffer_size;
  1094. u32 offset;
  1095. u32 len;
  1096. int ret;
  1097. buffer_size = endpoint->config.rx.buffer_size;
  1098. page = dev_alloc_pages(get_order(buffer_size));
  1099. if (!page)
  1100. return -ENOMEM;
  1101. /* Offset the buffer to make space for skb headroom */
  1102. offset = NET_SKB_PAD;
  1103. len = buffer_size - offset;
  1104. ret = gsi_trans_page_add(trans, page, len, offset);
  1105. if (ret)
  1106. put_page(page);
  1107. else
  1108. trans->data = page; /* transaction owns page now */
  1109. return ret;
  1110. }
  1111. /**
  1112. * ipa_endpoint_replenish() - Replenish endpoint receive buffers
  1113. * @endpoint: Endpoint to be replenished
  1114. *
  1115. * The IPA hardware can hold a fixed number of receive buffers for an RX
  1116. * endpoint, based on the number of entries in the underlying channel ring
  1117. * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
  1118. * more receive buffers can be supplied to the hardware. Replenishing for
  1119. * an endpoint can be disabled, in which case buffers are not queued to
  1120. * the hardware.
  1121. */
  1122. static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
  1123. {
  1124. struct gsi_trans *trans;
  1125. if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
  1126. return;
  1127. /* Skip it if it's already active */
  1128. if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
  1129. return;
  1130. while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
  1131. bool doorbell;
  1132. if (ipa_endpoint_replenish_one(endpoint, trans))
  1133. goto try_again_later;
  1134. /* Ring the doorbell if we've got a full batch */
  1135. doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
  1136. gsi_trans_commit(trans, doorbell);
  1137. }
  1138. clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
  1139. return;
  1140. try_again_later:
  1141. gsi_trans_free(trans);
  1142. clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
  1143. /* Whenever a receive buffer transaction completes we'll try to
  1144. * replenish again. It's unlikely, but if we fail to supply even
  1145. * one buffer, nothing will trigger another replenish attempt.
  1146. * If the hardware has no receive buffers queued, schedule work to
  1147. * try replenishing again.
  1148. */
  1149. if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
  1150. schedule_delayed_work(&endpoint->replenish_work,
  1151. msecs_to_jiffies(1));
  1152. }
  1153. static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
  1154. {
  1155. set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
  1156. /* Start replenishing if hardware currently has no buffers */
  1157. if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
  1158. ipa_endpoint_replenish(endpoint);
  1159. }
  1160. static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
  1161. {
  1162. clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
  1163. }
  1164. static void ipa_endpoint_replenish_work(struct work_struct *work)
  1165. {
  1166. struct delayed_work *dwork = to_delayed_work(work);
  1167. struct ipa_endpoint *endpoint;
  1168. endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
  1169. ipa_endpoint_replenish(endpoint);
  1170. }
  1171. static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
  1172. void *data, u32 len, u32 extra)
  1173. {
  1174. struct sk_buff *skb;
  1175. if (!endpoint->netdev)
  1176. return;
  1177. skb = __dev_alloc_skb(len, GFP_ATOMIC);
  1178. if (skb) {
  1179. /* Copy the data into the socket buffer and receive it */
  1180. skb_put(skb, len);
  1181. memcpy(skb->data, data, len);
  1182. skb->truesize += extra;
  1183. }
  1184. ipa_modem_skb_rx(endpoint->netdev, skb);
  1185. }
  1186. static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
  1187. struct page *page, u32 len)
  1188. {
  1189. u32 buffer_size = endpoint->config.rx.buffer_size;
  1190. struct sk_buff *skb;
  1191. /* Nothing to do if there's no netdev */
  1192. if (!endpoint->netdev)
  1193. return false;
  1194. WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
  1195. skb = build_skb(page_address(page), buffer_size);
  1196. if (skb) {
  1197. /* Reserve the headroom and account for the data */
  1198. skb_reserve(skb, NET_SKB_PAD);
  1199. skb_put(skb, len);
  1200. }
  1201. /* Receive the buffer (or record drop if unable to build it) */
  1202. ipa_modem_skb_rx(endpoint->netdev, skb);
  1203. return skb != NULL;
  1204. }
  1205. /* The format of an IPA packet status structure is the same for several
  1206. * status types (opcodes). Other types aren't currently supported.
  1207. */
  1208. static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
  1209. {
  1210. switch (opcode) {
  1211. case IPA_STATUS_OPCODE_PACKET:
  1212. case IPA_STATUS_OPCODE_DROPPED_PACKET:
  1213. case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
  1214. case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
  1215. return true;
  1216. default:
  1217. return false;
  1218. }
  1219. }
  1220. static bool
  1221. ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
  1222. {
  1223. struct ipa *ipa = endpoint->ipa;
  1224. enum ipa_status_opcode opcode;
  1225. u32 endpoint_id;
  1226. opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
  1227. if (!ipa_status_format_packet(opcode))
  1228. return true;
  1229. endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
  1230. if (endpoint_id != endpoint->endpoint_id)
  1231. return true;
  1232. return false; /* Don't skip this packet, process it */
  1233. }
  1234. static bool
  1235. ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
  1236. {
  1237. struct ipa_endpoint *command_endpoint;
  1238. enum ipa_status_mask status_mask;
  1239. struct ipa *ipa = endpoint->ipa;
  1240. u32 endpoint_id;
  1241. status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
  1242. if (!status_mask)
  1243. return false; /* No valid tag */
  1244. /* The status contains a valid tag. We know the packet was sent to
  1245. * this endpoint (already verified by ipa_endpoint_status_skip()).
  1246. * If the packet came from the AP->command TX endpoint we know
  1247. * this packet was sent as part of the pipeline clear process.
  1248. */
  1249. endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
  1250. command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
  1251. if (endpoint_id == command_endpoint->endpoint_id) {
  1252. complete(&ipa->completion);
  1253. } else {
  1254. dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
  1255. endpoint_id);
  1256. }
  1257. return true;
  1258. }
  1259. /* Return whether the status indicates the packet should be dropped */
  1260. static bool
  1261. ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
  1262. {
  1263. enum ipa_status_exception exception;
  1264. struct ipa *ipa = endpoint->ipa;
  1265. u32 rule;
  1266. /* If the status indicates a tagged transfer, we'll drop the packet */
  1267. if (ipa_endpoint_status_tag_valid(endpoint, data))
  1268. return true;
  1269. /* Deaggregation exceptions we drop; all other types we consume */
  1270. exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
  1271. if (exception)
  1272. return exception == IPA_STATUS_EXCEPTION_DEAGGR;
  1273. /* Drop the packet if it fails to match a routing rule; otherwise no */
  1274. rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
  1275. return rule == IPA_STATUS_RULE_MISS;
  1276. }
  1277. static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
  1278. struct page *page, u32 total_len)
  1279. {
  1280. u32 buffer_size = endpoint->config.rx.buffer_size;
  1281. void *data = page_address(page) + NET_SKB_PAD;
  1282. u32 unused = buffer_size - total_len;
  1283. struct ipa *ipa = endpoint->ipa;
  1284. struct device *dev = ipa->dev;
  1285. u32 resid = total_len;
  1286. while (resid) {
  1287. u32 length;
  1288. u32 align;
  1289. u32 len;
  1290. if (resid < IPA_STATUS_SIZE) {
  1291. dev_err(dev,
  1292. "short message (%u bytes < %zu byte status)\n",
  1293. resid, IPA_STATUS_SIZE);
  1294. break;
  1295. }
  1296. /* Skip over status packets that lack packet data */
  1297. length = ipa_status_extract(ipa, data, STATUS_LENGTH);
  1298. if (!length || ipa_endpoint_status_skip(endpoint, data)) {
  1299. data += IPA_STATUS_SIZE;
  1300. resid -= IPA_STATUS_SIZE;
  1301. continue;
  1302. }
  1303. /* Compute the amount of buffer space consumed by the packet,
  1304. * including the status. If the hardware is configured to
  1305. * pad packet data to an aligned boundary, account for that.
  1306. * And if checksum offload is enabled a trailer containing
  1307. * computed checksum information will be appended.
  1308. */
  1309. align = endpoint->config.rx.pad_align ? : 1;
  1310. len = IPA_STATUS_SIZE + ALIGN(length, align);
  1311. if (endpoint->config.checksum)
  1312. len += sizeof(struct rmnet_map_dl_csum_trailer);
  1313. if (!ipa_endpoint_status_drop(endpoint, data)) {
  1314. void *data2;
  1315. u32 extra;
  1316. /* Client receives only packet data (no status) */
  1317. data2 = data + IPA_STATUS_SIZE;
  1318. /* Have the true size reflect the extra unused space in
  1319. * the original receive buffer. Distribute the "cost"
  1320. * proportionately across all aggregated packets in the
  1321. * buffer.
  1322. */
  1323. extra = DIV_ROUND_CLOSEST(unused * len, total_len);
  1324. ipa_endpoint_skb_copy(endpoint, data2, length, extra);
  1325. }
  1326. /* Consume status and the full packet it describes */
  1327. data += len;
  1328. resid -= len;
  1329. }
  1330. }
  1331. void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
  1332. struct gsi_trans *trans)
  1333. {
  1334. struct page *page;
  1335. if (endpoint->toward_ipa)
  1336. return;
  1337. if (trans->cancelled)
  1338. goto done;
  1339. /* Parse or build a socket buffer using the actual received length */
  1340. page = trans->data;
  1341. if (endpoint->config.status_enable)
  1342. ipa_endpoint_status_parse(endpoint, page, trans->len);
  1343. else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
  1344. trans->data = NULL; /* Pages have been consumed */
  1345. done:
  1346. ipa_endpoint_replenish(endpoint);
  1347. }
  1348. void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
  1349. struct gsi_trans *trans)
  1350. {
  1351. if (endpoint->toward_ipa) {
  1352. struct ipa *ipa = endpoint->ipa;
  1353. /* Nothing to do for command transactions */
  1354. if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
  1355. struct sk_buff *skb = trans->data;
  1356. if (skb)
  1357. dev_kfree_skb_any(skb);
  1358. }
  1359. } else {
  1360. struct page *page = trans->data;
  1361. if (page)
  1362. put_page(page);
  1363. }
  1364. }
  1365. void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
  1366. {
  1367. const struct reg *reg;
  1368. u32 val;
  1369. reg = ipa_reg(ipa, ROUTE);
  1370. /* ROUTE_DIS is 0 */
  1371. val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
  1372. val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
  1373. /* ROUTE_DEF_HDR_OFST is 0 */
  1374. val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
  1375. val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
  1376. iowrite32(val, ipa->reg_virt + reg_offset(reg));
  1377. }
  1378. void ipa_endpoint_default_route_clear(struct ipa *ipa)
  1379. {
  1380. ipa_endpoint_default_route_set(ipa, 0);
  1381. }
  1382. /**
  1383. * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
  1384. * @endpoint: Endpoint to be reset
  1385. *
  1386. * If aggregation is active on an RX endpoint when a reset is performed
  1387. * on its underlying GSI channel, a special sequence of actions must be
  1388. * taken to ensure the IPA pipeline is properly cleared.
  1389. *
  1390. * Return: 0 if successful, or a negative error code
  1391. */
  1392. static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
  1393. {
  1394. struct ipa *ipa = endpoint->ipa;
  1395. struct device *dev = ipa->dev;
  1396. struct gsi *gsi = &ipa->gsi;
  1397. bool suspended = false;
  1398. dma_addr_t addr;
  1399. u32 retries;
  1400. u32 len = 1;
  1401. void *virt;
  1402. int ret;
  1403. virt = kzalloc(len, GFP_KERNEL);
  1404. if (!virt)
  1405. return -ENOMEM;
  1406. addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
  1407. if (dma_mapping_error(dev, addr)) {
  1408. ret = -ENOMEM;
  1409. goto out_kfree;
  1410. }
  1411. /* Force close aggregation before issuing the reset */
  1412. ipa_endpoint_force_close(endpoint);
  1413. /* Reset and reconfigure the channel with the doorbell engine
  1414. * disabled. Then poll until we know aggregation is no longer
  1415. * active. We'll re-enable the doorbell (if appropriate) when
  1416. * we reset again below.
  1417. */
  1418. gsi_channel_reset(gsi, endpoint->channel_id, false);
  1419. /* Make sure the channel isn't suspended */
  1420. suspended = ipa_endpoint_program_suspend(endpoint, false);
  1421. /* Start channel and do a 1 byte read */
  1422. ret = gsi_channel_start(gsi, endpoint->channel_id);
  1423. if (ret)
  1424. goto out_suspend_again;
  1425. ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
  1426. if (ret)
  1427. goto err_endpoint_stop;
  1428. /* Wait for aggregation to be closed on the channel */
  1429. retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
  1430. do {
  1431. if (!ipa_endpoint_aggr_active(endpoint))
  1432. break;
  1433. usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
  1434. } while (retries--);
  1435. /* Check one last time */
  1436. if (ipa_endpoint_aggr_active(endpoint))
  1437. dev_err(dev, "endpoint %u still active during reset\n",
  1438. endpoint->endpoint_id);
  1439. gsi_trans_read_byte_done(gsi, endpoint->channel_id);
  1440. ret = gsi_channel_stop(gsi, endpoint->channel_id);
  1441. if (ret)
  1442. goto out_suspend_again;
  1443. /* Finally, reset and reconfigure the channel again (re-enabling
  1444. * the doorbell engine if appropriate). Sleep for 1 millisecond to
  1445. * complete the channel reset sequence. Finish by suspending the
  1446. * channel again (if necessary).
  1447. */
  1448. gsi_channel_reset(gsi, endpoint->channel_id, true);
  1449. usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
  1450. goto out_suspend_again;
  1451. err_endpoint_stop:
  1452. (void)gsi_channel_stop(gsi, endpoint->channel_id);
  1453. out_suspend_again:
  1454. if (suspended)
  1455. (void)ipa_endpoint_program_suspend(endpoint, true);
  1456. dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
  1457. out_kfree:
  1458. kfree(virt);
  1459. return ret;
  1460. }
  1461. static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
  1462. {
  1463. u32 channel_id = endpoint->channel_id;
  1464. struct ipa *ipa = endpoint->ipa;
  1465. bool special;
  1466. int ret = 0;
  1467. /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
  1468. * is active, we need to handle things specially to recover.
  1469. * All other cases just need to reset the underlying GSI channel.
  1470. */
  1471. special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
  1472. endpoint->config.aggregation;
  1473. if (special && ipa_endpoint_aggr_active(endpoint))
  1474. ret = ipa_endpoint_reset_rx_aggr(endpoint);
  1475. else
  1476. gsi_channel_reset(&ipa->gsi, channel_id, true);
  1477. if (ret)
  1478. dev_err(ipa->dev,
  1479. "error %d resetting channel %u for endpoint %u\n",
  1480. ret, endpoint->channel_id, endpoint->endpoint_id);
  1481. }
  1482. static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
  1483. {
  1484. if (endpoint->toward_ipa) {
  1485. /* Newer versions of IPA use GSI channel flow control
  1486. * instead of endpoint DELAY mode to prevent sending data.
  1487. * Flow control is disabled for newly-allocated channels,
  1488. * and we can assume flow control is not (ever) enabled
  1489. * for AP TX channels.
  1490. */
  1491. if (endpoint->ipa->version < IPA_VERSION_4_2)
  1492. ipa_endpoint_program_delay(endpoint, false);
  1493. } else {
  1494. /* Ensure suspend mode is off on all AP RX endpoints */
  1495. (void)ipa_endpoint_program_suspend(endpoint, false);
  1496. }
  1497. ipa_endpoint_init_cfg(endpoint);
  1498. ipa_endpoint_init_nat(endpoint);
  1499. ipa_endpoint_init_hdr(endpoint);
  1500. ipa_endpoint_init_hdr_ext(endpoint);
  1501. ipa_endpoint_init_hdr_metadata_mask(endpoint);
  1502. ipa_endpoint_init_mode(endpoint);
  1503. ipa_endpoint_init_aggr(endpoint);
  1504. if (!endpoint->toward_ipa) {
  1505. if (endpoint->config.rx.holb_drop)
  1506. ipa_endpoint_init_hol_block_enable(endpoint, 0);
  1507. else
  1508. ipa_endpoint_init_hol_block_disable(endpoint);
  1509. }
  1510. ipa_endpoint_init_deaggr(endpoint);
  1511. ipa_endpoint_init_rsrc_grp(endpoint);
  1512. ipa_endpoint_init_seq(endpoint);
  1513. ipa_endpoint_status(endpoint);
  1514. }
  1515. int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
  1516. {
  1517. u32 endpoint_id = endpoint->endpoint_id;
  1518. struct ipa *ipa = endpoint->ipa;
  1519. struct gsi *gsi = &ipa->gsi;
  1520. int ret;
  1521. ret = gsi_channel_start(gsi, endpoint->channel_id);
  1522. if (ret) {
  1523. dev_err(ipa->dev,
  1524. "error %d starting %cX channel %u for endpoint %u\n",
  1525. ret, endpoint->toward_ipa ? 'T' : 'R',
  1526. endpoint->channel_id, endpoint_id);
  1527. return ret;
  1528. }
  1529. if (!endpoint->toward_ipa) {
  1530. ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
  1531. ipa_endpoint_replenish_enable(endpoint);
  1532. }
  1533. __set_bit(endpoint_id, ipa->enabled);
  1534. return 0;
  1535. }
  1536. void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
  1537. {
  1538. u32 endpoint_id = endpoint->endpoint_id;
  1539. struct ipa *ipa = endpoint->ipa;
  1540. struct gsi *gsi = &ipa->gsi;
  1541. int ret;
  1542. if (!test_bit(endpoint_id, ipa->enabled))
  1543. return;
  1544. __clear_bit(endpoint_id, endpoint->ipa->enabled);
  1545. if (!endpoint->toward_ipa) {
  1546. ipa_endpoint_replenish_disable(endpoint);
  1547. ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
  1548. }
  1549. /* Note that if stop fails, the channel's state is not well-defined */
  1550. ret = gsi_channel_stop(gsi, endpoint->channel_id);
  1551. if (ret)
  1552. dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
  1553. ret, endpoint_id);
  1554. }
  1555. void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
  1556. {
  1557. struct device *dev = endpoint->ipa->dev;
  1558. struct gsi *gsi = &endpoint->ipa->gsi;
  1559. int ret;
  1560. if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
  1561. return;
  1562. if (!endpoint->toward_ipa) {
  1563. ipa_endpoint_replenish_disable(endpoint);
  1564. (void)ipa_endpoint_program_suspend(endpoint, true);
  1565. }
  1566. ret = gsi_channel_suspend(gsi, endpoint->channel_id);
  1567. if (ret)
  1568. dev_err(dev, "error %d suspending channel %u\n", ret,
  1569. endpoint->channel_id);
  1570. }
  1571. void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
  1572. {
  1573. struct device *dev = endpoint->ipa->dev;
  1574. struct gsi *gsi = &endpoint->ipa->gsi;
  1575. int ret;
  1576. if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
  1577. return;
  1578. if (!endpoint->toward_ipa)
  1579. (void)ipa_endpoint_program_suspend(endpoint, false);
  1580. ret = gsi_channel_resume(gsi, endpoint->channel_id);
  1581. if (ret)
  1582. dev_err(dev, "error %d resuming channel %u\n", ret,
  1583. endpoint->channel_id);
  1584. else if (!endpoint->toward_ipa)
  1585. ipa_endpoint_replenish_enable(endpoint);
  1586. }
  1587. void ipa_endpoint_suspend(struct ipa *ipa)
  1588. {
  1589. if (!ipa->setup_complete)
  1590. return;
  1591. if (ipa->modem_netdev)
  1592. ipa_modem_suspend(ipa->modem_netdev);
  1593. ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
  1594. ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
  1595. }
  1596. void ipa_endpoint_resume(struct ipa *ipa)
  1597. {
  1598. if (!ipa->setup_complete)
  1599. return;
  1600. ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
  1601. ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
  1602. if (ipa->modem_netdev)
  1603. ipa_modem_resume(ipa->modem_netdev);
  1604. }
  1605. static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
  1606. {
  1607. struct gsi *gsi = &endpoint->ipa->gsi;
  1608. u32 channel_id = endpoint->channel_id;
  1609. /* Only AP endpoints get set up */
  1610. if (endpoint->ee_id != GSI_EE_AP)
  1611. return;
  1612. endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
  1613. if (!endpoint->toward_ipa) {
  1614. /* RX transactions require a single TRE, so the maximum
  1615. * backlog is the same as the maximum outstanding TREs.
  1616. */
  1617. clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
  1618. clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
  1619. INIT_DELAYED_WORK(&endpoint->replenish_work,
  1620. ipa_endpoint_replenish_work);
  1621. }
  1622. ipa_endpoint_program(endpoint);
  1623. __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
  1624. }
  1625. static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
  1626. {
  1627. __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
  1628. if (!endpoint->toward_ipa)
  1629. cancel_delayed_work_sync(&endpoint->replenish_work);
  1630. ipa_endpoint_reset(endpoint);
  1631. }
  1632. void ipa_endpoint_setup(struct ipa *ipa)
  1633. {
  1634. u32 endpoint_id;
  1635. for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
  1636. ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
  1637. }
  1638. void ipa_endpoint_teardown(struct ipa *ipa)
  1639. {
  1640. u32 endpoint_id;
  1641. for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
  1642. ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
  1643. }
  1644. void ipa_endpoint_deconfig(struct ipa *ipa)
  1645. {
  1646. ipa->available_count = 0;
  1647. bitmap_free(ipa->available);
  1648. ipa->available = NULL;
  1649. }
  1650. int ipa_endpoint_config(struct ipa *ipa)
  1651. {
  1652. struct device *dev = ipa->dev;
  1653. const struct reg *reg;
  1654. u32 endpoint_id;
  1655. u32 hw_limit;
  1656. u32 tx_count;
  1657. u32 rx_count;
  1658. u32 rx_base;
  1659. u32 limit;
  1660. u32 val;
  1661. /* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
  1662. * Furthermore, the endpoints were not grouped such that TX
  1663. * endpoint numbers started with 0 and RX endpoints had numbers
  1664. * higher than all TX endpoints, so we can't do the simple
  1665. * direction check used for newer hardware below.
  1666. *
  1667. * For hardware that doesn't support the FLAVOR_0 register,
  1668. * just set the available mask to support any endpoint, and
  1669. * assume the configuration is valid.
  1670. */
  1671. if (ipa->version < IPA_VERSION_3_5) {
  1672. ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
  1673. if (!ipa->available)
  1674. return -ENOMEM;
  1675. ipa->available_count = IPA_ENDPOINT_MAX;
  1676. bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
  1677. return 0;
  1678. }
  1679. /* Find out about the endpoints supplied by the hardware, and ensure
  1680. * the highest one doesn't exceed the number supported by software.
  1681. */
  1682. reg = ipa_reg(ipa, FLAVOR_0);
  1683. val = ioread32(ipa->reg_virt + reg_offset(reg));
  1684. /* Our RX is an IPA producer; our TX is an IPA consumer. */
  1685. tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
  1686. rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
  1687. rx_base = reg_decode(reg, PROD_LOWEST, val);
  1688. limit = rx_base + rx_count;
  1689. if (limit > IPA_ENDPOINT_MAX) {
  1690. dev_err(dev, "too many endpoints, %u > %u\n",
  1691. limit, IPA_ENDPOINT_MAX);
  1692. return -EINVAL;
  1693. }
  1694. /* Until IPA v5.0, the max endpoint ID was 32 */
  1695. hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
  1696. if (limit > hw_limit) {
  1697. dev_err(dev, "unexpected endpoint count, %u > %u\n",
  1698. limit, hw_limit);
  1699. return -EINVAL;
  1700. }
  1701. /* Allocate and initialize the available endpoint bitmap */
  1702. ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
  1703. if (!ipa->available)
  1704. return -ENOMEM;
  1705. ipa->available_count = limit;
  1706. /* Mark all supported RX and TX endpoints as available */
  1707. bitmap_set(ipa->available, 0, tx_count);
  1708. bitmap_set(ipa->available, rx_base, rx_count);
  1709. for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
  1710. struct ipa_endpoint *endpoint;
  1711. if (endpoint_id >= limit) {
  1712. dev_err(dev, "invalid endpoint id, %u > %u\n",
  1713. endpoint_id, limit - 1);
  1714. goto err_free_bitmap;
  1715. }
  1716. if (!test_bit(endpoint_id, ipa->available)) {
  1717. dev_err(dev, "unavailable endpoint id %u\n",
  1718. endpoint_id);
  1719. goto err_free_bitmap;
  1720. }
  1721. /* Make sure it's pointing in the right direction */
  1722. endpoint = &ipa->endpoint[endpoint_id];
  1723. if (endpoint->toward_ipa) {
  1724. if (endpoint_id < tx_count)
  1725. continue;
  1726. } else if (endpoint_id >= rx_base) {
  1727. continue;
  1728. }
  1729. dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
  1730. goto err_free_bitmap;
  1731. }
  1732. return 0;
  1733. err_free_bitmap:
  1734. ipa_endpoint_deconfig(ipa);
  1735. return -EINVAL;
  1736. }
  1737. static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
  1738. const struct ipa_gsi_endpoint_data *data)
  1739. {
  1740. struct ipa_endpoint *endpoint;
  1741. endpoint = &ipa->endpoint[data->endpoint_id];
  1742. if (data->ee_id == GSI_EE_AP)
  1743. ipa->channel_map[data->channel_id] = endpoint;
  1744. ipa->name_map[name] = endpoint;
  1745. endpoint->ipa = ipa;
  1746. endpoint->ee_id = data->ee_id;
  1747. endpoint->channel_id = data->channel_id;
  1748. endpoint->endpoint_id = data->endpoint_id;
  1749. endpoint->toward_ipa = data->toward_ipa;
  1750. endpoint->config = data->endpoint.config;
  1751. __set_bit(endpoint->endpoint_id, ipa->defined);
  1752. }
  1753. static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
  1754. {
  1755. __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
  1756. memset(endpoint, 0, sizeof(*endpoint));
  1757. }
  1758. void ipa_endpoint_exit(struct ipa *ipa)
  1759. {
  1760. u32 endpoint_id;
  1761. ipa->filtered = 0;
  1762. for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
  1763. ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
  1764. bitmap_free(ipa->enabled);
  1765. ipa->enabled = NULL;
  1766. bitmap_free(ipa->set_up);
  1767. ipa->set_up = NULL;
  1768. bitmap_free(ipa->defined);
  1769. ipa->defined = NULL;
  1770. memset(ipa->name_map, 0, sizeof(ipa->name_map));
  1771. memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
  1772. }
  1773. /* Returns a bitmask of endpoints that support filtering, or 0 on error */
  1774. int ipa_endpoint_init(struct ipa *ipa, u32 count,
  1775. const struct ipa_gsi_endpoint_data *data)
  1776. {
  1777. enum ipa_endpoint_name name;
  1778. u32 filtered;
  1779. BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
  1780. /* Number of endpoints is one more than the maximum ID */
  1781. ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
  1782. if (!ipa->endpoint_count)
  1783. return -EINVAL;
  1784. /* Initialize endpoint state bitmaps */
  1785. ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
  1786. if (!ipa->defined)
  1787. return -ENOMEM;
  1788. ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
  1789. if (!ipa->set_up)
  1790. goto err_free_defined;
  1791. ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
  1792. if (!ipa->enabled)
  1793. goto err_free_set_up;
  1794. filtered = 0;
  1795. for (name = 0; name < count; name++, data++) {
  1796. if (ipa_gsi_endpoint_data_empty(data))
  1797. continue; /* Skip over empty slots */
  1798. ipa_endpoint_init_one(ipa, name, data);
  1799. if (data->endpoint.filter_support)
  1800. filtered |= BIT(data->endpoint_id);
  1801. if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
  1802. ipa->modem_tx_count++;
  1803. }
  1804. /* Make sure the set of filtered endpoints is valid */
  1805. if (!ipa_filtered_valid(ipa, filtered)) {
  1806. ipa_endpoint_exit(ipa);
  1807. return -EINVAL;
  1808. }
  1809. ipa->filtered = filtered;
  1810. return 0;
  1811. err_free_set_up:
  1812. bitmap_free(ipa->set_up);
  1813. ipa->set_up = NULL;
  1814. err_free_defined:
  1815. bitmap_free(ipa->defined);
  1816. ipa->defined = NULL;
  1817. return -ENOMEM;
  1818. }