isp1760-hcd.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for the NXP ISP1760 chip
  4. *
  5. * However, the code might contain some bugs. What doesn't work for sure is:
  6. * - ISO
  7. * - OTG
  8. e The interrupt line is configured as active low, level.
  9. *
  10. * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
  11. *
  12. * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
  13. *
  14. */
  15. #include <linux/gpio/consumer.h>
  16. #include <linux/module.h>
  17. #include <linux/kernel.h>
  18. #include <linux/slab.h>
  19. #include <linux/list.h>
  20. #include <linux/usb.h>
  21. #include <linux/usb/hcd.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/io.h>
  25. #include <linux/mm.h>
  26. #include <linux/timer.h>
  27. #include <asm/unaligned.h>
  28. #include <asm/cacheflush.h>
  29. #include "isp1760-core.h"
  30. #include "isp1760-hcd.h"
  31. #include "isp1760-regs.h"
  32. static struct kmem_cache *qtd_cachep;
  33. static struct kmem_cache *qh_cachep;
  34. static struct kmem_cache *urb_listitem_cachep;
  35. typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
  36. struct isp1760_qtd *qtd);
  37. static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
  38. {
  39. return *(struct isp1760_hcd **)hcd->hcd_priv;
  40. }
  41. /* urb state*/
  42. #define DELETE_URB (0x0008)
  43. #define NO_TRANSFER_ACTIVE (0xffffffff)
  44. /* Philips Proprietary Transfer Descriptor (PTD) */
  45. typedef __u32 __bitwise __dw;
  46. struct ptd {
  47. __dw dw0;
  48. __dw dw1;
  49. __dw dw2;
  50. __dw dw3;
  51. __dw dw4;
  52. __dw dw5;
  53. __dw dw6;
  54. __dw dw7;
  55. };
  56. #define PTD_OFFSET 0x0400
  57. #define ISO_PTD_OFFSET 0x0400
  58. #define INT_PTD_OFFSET 0x0800
  59. #define ATL_PTD_OFFSET 0x0c00
  60. #define PAYLOAD_OFFSET 0x1000
  61. /* ATL */
  62. /* DW0 */
  63. #define DW0_VALID_BIT 1
  64. #define FROM_DW0_VALID(x) ((x) & 0x01)
  65. #define TO_DW0_LENGTH(x) (((u32) x) << 3)
  66. #define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
  67. #define TO_DW0_MULTI(x) (((u32) x) << 29)
  68. #define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
  69. /* DW1 */
  70. #define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
  71. #define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
  72. #define DW1_TRANS_BULK ((u32) 2 << 12)
  73. #define DW1_TRANS_INT ((u32) 3 << 12)
  74. #define DW1_TRANS_SPLIT ((u32) 1 << 14)
  75. #define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
  76. #define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
  77. #define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
  78. /* DW2 */
  79. #define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
  80. #define TO_DW2_RL(x) ((x) << 25)
  81. #define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
  82. /* DW3 */
  83. #define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
  84. #define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
  85. #define TO_DW3_NAKCOUNT(x) ((x) << 19)
  86. #define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
  87. #define TO_DW3_CERR(x) ((x) << 23)
  88. #define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
  89. #define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
  90. #define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
  91. #define TO_DW3_PING(x) ((x) << 26)
  92. #define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
  93. #define DW3_ERROR_BIT (1 << 28)
  94. #define DW3_BABBLE_BIT (1 << 29)
  95. #define DW3_HALT_BIT (1 << 30)
  96. #define DW3_ACTIVE_BIT (1 << 31)
  97. #define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
  98. #define INT_UNDERRUN (1 << 2)
  99. #define INT_BABBLE (1 << 1)
  100. #define INT_EXACT (1 << 0)
  101. #define SETUP_PID (2)
  102. #define IN_PID (1)
  103. #define OUT_PID (0)
  104. /* Errata 1 */
  105. #define RL_COUNTER (0)
  106. #define NAK_COUNTER (0)
  107. #define ERR_COUNTER (2)
  108. struct isp1760_qtd {
  109. u8 packet_type;
  110. void *data_buffer;
  111. u32 payload_addr;
  112. /* the rest is HCD-private */
  113. struct list_head qtd_list;
  114. struct urb *urb;
  115. size_t length;
  116. size_t actual_length;
  117. /* QTD_ENQUEUED: waiting for transfer (inactive) */
  118. /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
  119. /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
  120. interrupt handler may touch this qtd! */
  121. /* QTD_XFER_COMPLETE: payload has been transferred successfully */
  122. /* QTD_RETIRE: transfer error/abort qtd */
  123. #define QTD_ENQUEUED 0
  124. #define QTD_PAYLOAD_ALLOC 1
  125. #define QTD_XFER_STARTED 2
  126. #define QTD_XFER_COMPLETE 3
  127. #define QTD_RETIRE 4
  128. u32 status;
  129. };
  130. /* Queue head, one for each active endpoint */
  131. struct isp1760_qh {
  132. struct list_head qh_list;
  133. struct list_head qtd_list;
  134. u32 toggle;
  135. u32 ping;
  136. int slot;
  137. int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
  138. };
  139. struct urb_listitem {
  140. struct list_head urb_list;
  141. struct urb *urb;
  142. };
  143. /*
  144. * Access functions for isp176x registers (addresses 0..0x03FF).
  145. */
  146. static u32 reg_read32(void __iomem *base, u32 reg)
  147. {
  148. return isp1760_read32(base, reg);
  149. }
  150. static void reg_write32(void __iomem *base, u32 reg, u32 val)
  151. {
  152. isp1760_write32(base, reg, val);
  153. }
  154. /*
  155. * Access functions for isp176x memory (offset >= 0x0400).
  156. *
  157. * bank_reads8() reads memory locations prefetched by an earlier write to
  158. * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
  159. * bank optimizations, you should use the more generic mem_reads8() below.
  160. *
  161. * For access to ptd memory, use the specialized ptd_read() and ptd_write()
  162. * below.
  163. *
  164. * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
  165. * doesn't quite work because some people have to enforce 32-bit access
  166. */
  167. static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
  168. __u32 *dst, u32 bytes)
  169. {
  170. __u32 __iomem *src;
  171. u32 val;
  172. __u8 *src_byteptr;
  173. __u8 *dst_byteptr;
  174. src = src_base + (bank_addr | src_offset);
  175. if (src_offset < PAYLOAD_OFFSET) {
  176. while (bytes >= 4) {
  177. *dst = le32_to_cpu(__raw_readl(src));
  178. bytes -= 4;
  179. src++;
  180. dst++;
  181. }
  182. } else {
  183. while (bytes >= 4) {
  184. *dst = __raw_readl(src);
  185. bytes -= 4;
  186. src++;
  187. dst++;
  188. }
  189. }
  190. if (!bytes)
  191. return;
  192. /* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
  193. * allocated.
  194. */
  195. if (src_offset < PAYLOAD_OFFSET)
  196. val = le32_to_cpu(__raw_readl(src));
  197. else
  198. val = __raw_readl(src);
  199. dst_byteptr = (void *) dst;
  200. src_byteptr = (void *) &val;
  201. while (bytes > 0) {
  202. *dst_byteptr = *src_byteptr;
  203. dst_byteptr++;
  204. src_byteptr++;
  205. bytes--;
  206. }
  207. }
  208. static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
  209. u32 bytes)
  210. {
  211. reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
  212. ndelay(90);
  213. bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
  214. }
  215. static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
  216. __u32 const *src, u32 bytes)
  217. {
  218. __u32 __iomem *dst;
  219. dst = dst_base + dst_offset;
  220. if (dst_offset < PAYLOAD_OFFSET) {
  221. while (bytes >= 4) {
  222. __raw_writel(cpu_to_le32(*src), dst);
  223. bytes -= 4;
  224. src++;
  225. dst++;
  226. }
  227. } else {
  228. while (bytes >= 4) {
  229. __raw_writel(*src, dst);
  230. bytes -= 4;
  231. src++;
  232. dst++;
  233. }
  234. }
  235. if (!bytes)
  236. return;
  237. /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
  238. * extra bytes should not be read by the HW.
  239. */
  240. if (dst_offset < PAYLOAD_OFFSET)
  241. __raw_writel(cpu_to_le32(*src), dst);
  242. else
  243. __raw_writel(*src, dst);
  244. }
  245. /*
  246. * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
  247. * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
  248. */
  249. static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
  250. struct ptd *ptd)
  251. {
  252. reg_write32(base, HC_MEMORY_REG,
  253. ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
  254. ndelay(90);
  255. bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
  256. (void *) ptd, sizeof(*ptd));
  257. }
  258. static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
  259. struct ptd *ptd)
  260. {
  261. mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
  262. &ptd->dw1, 7*sizeof(ptd->dw1));
  263. /* Make sure dw0 gets written last (after other dw's and after payload)
  264. since it contains the enable bit */
  265. wmb();
  266. mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
  267. sizeof(ptd->dw0));
  268. }
  269. /* memory management of the 60kb on the chip from 0x1000 to 0xffff */
  270. static void init_memory(struct isp1760_hcd *priv)
  271. {
  272. int i, curr;
  273. u32 payload_addr;
  274. payload_addr = PAYLOAD_OFFSET;
  275. for (i = 0; i < BLOCK_1_NUM; i++) {
  276. priv->memory_pool[i].start = payload_addr;
  277. priv->memory_pool[i].size = BLOCK_1_SIZE;
  278. priv->memory_pool[i].free = 1;
  279. payload_addr += priv->memory_pool[i].size;
  280. }
  281. curr = i;
  282. for (i = 0; i < BLOCK_2_NUM; i++) {
  283. priv->memory_pool[curr + i].start = payload_addr;
  284. priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
  285. priv->memory_pool[curr + i].free = 1;
  286. payload_addr += priv->memory_pool[curr + i].size;
  287. }
  288. curr = i;
  289. for (i = 0; i < BLOCK_3_NUM; i++) {
  290. priv->memory_pool[curr + i].start = payload_addr;
  291. priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
  292. priv->memory_pool[curr + i].free = 1;
  293. payload_addr += priv->memory_pool[curr + i].size;
  294. }
  295. WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
  296. }
  297. static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
  298. {
  299. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  300. int i;
  301. WARN_ON(qtd->payload_addr);
  302. if (!qtd->length)
  303. return;
  304. for (i = 0; i < BLOCKS; i++) {
  305. if (priv->memory_pool[i].size >= qtd->length &&
  306. priv->memory_pool[i].free) {
  307. priv->memory_pool[i].free = 0;
  308. qtd->payload_addr = priv->memory_pool[i].start;
  309. return;
  310. }
  311. }
  312. }
  313. static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
  314. {
  315. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  316. int i;
  317. if (!qtd->payload_addr)
  318. return;
  319. for (i = 0; i < BLOCKS; i++) {
  320. if (priv->memory_pool[i].start == qtd->payload_addr) {
  321. WARN_ON(priv->memory_pool[i].free);
  322. priv->memory_pool[i].free = 1;
  323. qtd->payload_addr = 0;
  324. return;
  325. }
  326. }
  327. dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
  328. __func__, qtd->payload_addr);
  329. WARN_ON(1);
  330. qtd->payload_addr = 0;
  331. }
  332. static int handshake(struct usb_hcd *hcd, u32 reg,
  333. u32 mask, u32 done, int usec)
  334. {
  335. u32 result;
  336. do {
  337. result = reg_read32(hcd->regs, reg);
  338. if (result == ~0)
  339. return -ENODEV;
  340. result &= mask;
  341. if (result == done)
  342. return 0;
  343. udelay(1);
  344. usec--;
  345. } while (usec > 0);
  346. return -ETIMEDOUT;
  347. }
  348. /* reset a non-running (STS_HALT == 1) controller */
  349. static int ehci_reset(struct usb_hcd *hcd)
  350. {
  351. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  352. u32 command = reg_read32(hcd->regs, HC_USBCMD);
  353. command |= CMD_RESET;
  354. reg_write32(hcd->regs, HC_USBCMD, command);
  355. hcd->state = HC_STATE_HALT;
  356. priv->next_statechange = jiffies;
  357. return handshake(hcd, HC_USBCMD, CMD_RESET, 0, 250 * 1000);
  358. }
  359. static struct isp1760_qh *qh_alloc(gfp_t flags)
  360. {
  361. struct isp1760_qh *qh;
  362. qh = kmem_cache_zalloc(qh_cachep, flags);
  363. if (!qh)
  364. return NULL;
  365. INIT_LIST_HEAD(&qh->qh_list);
  366. INIT_LIST_HEAD(&qh->qtd_list);
  367. qh->slot = -1;
  368. return qh;
  369. }
  370. static void qh_free(struct isp1760_qh *qh)
  371. {
  372. WARN_ON(!list_empty(&qh->qtd_list));
  373. WARN_ON(qh->slot > -1);
  374. kmem_cache_free(qh_cachep, qh);
  375. }
  376. /* one-time init, only for memory state */
  377. static int priv_init(struct usb_hcd *hcd)
  378. {
  379. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  380. u32 hcc_params;
  381. int i;
  382. spin_lock_init(&priv->lock);
  383. for (i = 0; i < QH_END; i++)
  384. INIT_LIST_HEAD(&priv->qh_list[i]);
  385. /*
  386. * hw default: 1K periodic list heads, one per frame.
  387. * periodic_size can shrink by USBCMD update if hcc_params allows.
  388. */
  389. priv->periodic_size = DEFAULT_I_TDPS;
  390. /* controllers may cache some of the periodic schedule ... */
  391. hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
  392. /* full frame cache */
  393. if (HCC_ISOC_CACHE(hcc_params))
  394. priv->i_thresh = 8;
  395. else /* N microframes cached */
  396. priv->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
  397. return 0;
  398. }
  399. static int isp1760_hc_setup(struct usb_hcd *hcd)
  400. {
  401. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  402. int result;
  403. u32 scratch, hwmode;
  404. reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
  405. /* Change bus pattern */
  406. scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
  407. scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
  408. if (scratch != 0xdeadbabe) {
  409. dev_err(hcd->self.controller, "Scratch test failed.\n");
  410. return -ENODEV;
  411. }
  412. /*
  413. * The RESET_HC bit in the SW_RESET register is supposed to reset the
  414. * host controller without touching the CPU interface registers, but at
  415. * least on the ISP1761 it seems to behave as the RESET_ALL bit and
  416. * reset the whole device. We thus can't use it here, so let's reset
  417. * the host controller through the EHCI USB Command register. The device
  418. * has been reset in core code anyway, so this shouldn't matter.
  419. */
  420. reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
  421. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  422. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  423. reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  424. result = ehci_reset(hcd);
  425. if (result)
  426. return result;
  427. /* Step 11 passed */
  428. /* ATL reset */
  429. hwmode = reg_read32(hcd->regs, HC_HW_MODE_CTRL) & ~ALL_ATX_RESET;
  430. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
  431. mdelay(10);
  432. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  433. reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
  434. priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
  435. return priv_init(hcd);
  436. }
  437. static u32 base_to_chip(u32 base)
  438. {
  439. return ((base - 0x400) >> 3);
  440. }
  441. static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
  442. {
  443. struct urb *urb;
  444. if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
  445. return 1;
  446. urb = qtd->urb;
  447. qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
  448. return (qtd->urb != urb);
  449. }
  450. /* magic numbers that can affect system performance */
  451. #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
  452. #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
  453. #define EHCI_TUNE_RL_TT 0
  454. #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
  455. #define EHCI_TUNE_MULT_TT 1
  456. #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
  457. static void create_ptd_atl(struct isp1760_qh *qh,
  458. struct isp1760_qtd *qtd, struct ptd *ptd)
  459. {
  460. u32 maxpacket;
  461. u32 multi;
  462. u32 rl = RL_COUNTER;
  463. u32 nak = NAK_COUNTER;
  464. memset(ptd, 0, sizeof(*ptd));
  465. /* according to 3.6.2, max packet len can not be > 0x400 */
  466. maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
  467. usb_pipeout(qtd->urb->pipe));
  468. multi = 1 + ((maxpacket >> 11) & 0x3);
  469. maxpacket &= 0x7ff;
  470. /* DW0 */
  471. ptd->dw0 = DW0_VALID_BIT;
  472. ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
  473. ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
  474. ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
  475. /* DW1 */
  476. ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
  477. ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
  478. ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
  479. if (usb_pipebulk(qtd->urb->pipe))
  480. ptd->dw1 |= DW1_TRANS_BULK;
  481. else if (usb_pipeint(qtd->urb->pipe))
  482. ptd->dw1 |= DW1_TRANS_INT;
  483. if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
  484. /* split transaction */
  485. ptd->dw1 |= DW1_TRANS_SPLIT;
  486. if (qtd->urb->dev->speed == USB_SPEED_LOW)
  487. ptd->dw1 |= DW1_SE_USB_LOSPEED;
  488. ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
  489. ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
  490. /* SE bit for Split INT transfers */
  491. if (usb_pipeint(qtd->urb->pipe) &&
  492. (qtd->urb->dev->speed == USB_SPEED_LOW))
  493. ptd->dw1 |= 2 << 16;
  494. rl = 0;
  495. nak = 0;
  496. } else {
  497. ptd->dw0 |= TO_DW0_MULTI(multi);
  498. if (usb_pipecontrol(qtd->urb->pipe) ||
  499. usb_pipebulk(qtd->urb->pipe))
  500. ptd->dw3 |= TO_DW3_PING(qh->ping);
  501. }
  502. /* DW2 */
  503. ptd->dw2 = 0;
  504. ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
  505. ptd->dw2 |= TO_DW2_RL(rl);
  506. /* DW3 */
  507. ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
  508. ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
  509. if (usb_pipecontrol(qtd->urb->pipe)) {
  510. if (qtd->data_buffer == qtd->urb->setup_packet)
  511. ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
  512. else if (last_qtd_of_urb(qtd, qh))
  513. ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
  514. }
  515. ptd->dw3 |= DW3_ACTIVE_BIT;
  516. /* Cerr */
  517. ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
  518. }
  519. static void transform_add_int(struct isp1760_qh *qh,
  520. struct isp1760_qtd *qtd, struct ptd *ptd)
  521. {
  522. u32 usof;
  523. u32 period;
  524. /*
  525. * Most of this is guessing. ISP1761 datasheet is quite unclear, and
  526. * the algorithm from the original Philips driver code, which was
  527. * pretty much used in this driver before as well, is quite horrendous
  528. * and, i believe, incorrect. The code below follows the datasheet and
  529. * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
  530. * more reliable this way (fingers crossed...).
  531. */
  532. if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
  533. /* urb->interval is in units of microframes (1/8 ms) */
  534. period = qtd->urb->interval >> 3;
  535. if (qtd->urb->interval > 4)
  536. usof = 0x01; /* One bit set =>
  537. interval 1 ms * uFrame-match */
  538. else if (qtd->urb->interval > 2)
  539. usof = 0x22; /* Two bits set => interval 1/2 ms */
  540. else if (qtd->urb->interval > 1)
  541. usof = 0x55; /* Four bits set => interval 1/4 ms */
  542. else
  543. usof = 0xff; /* All bits set => interval 1/8 ms */
  544. } else {
  545. /* urb->interval is in units of frames (1 ms) */
  546. period = qtd->urb->interval;
  547. usof = 0x0f; /* Execute Start Split on any of the
  548. four first uFrames */
  549. /*
  550. * First 8 bits in dw5 is uSCS and "specifies which uSOF the
  551. * complete split needs to be sent. Valid only for IN." Also,
  552. * "All bits can be set to one for every transfer." (p 82,
  553. * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
  554. * that number come from? 0xff seems to work fine...
  555. */
  556. /* ptd->dw5 = 0x1c; */
  557. ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
  558. }
  559. period = period >> 1;/* Ensure equal or shorter period than requested */
  560. period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
  561. ptd->dw2 |= period;
  562. ptd->dw4 = usof;
  563. }
  564. static void create_ptd_int(struct isp1760_qh *qh,
  565. struct isp1760_qtd *qtd, struct ptd *ptd)
  566. {
  567. create_ptd_atl(qh, qtd, ptd);
  568. transform_add_int(qh, qtd, ptd);
  569. }
  570. static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
  571. __releases(priv->lock)
  572. __acquires(priv->lock)
  573. {
  574. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  575. if (!urb->unlinked) {
  576. if (urb->status == -EINPROGRESS)
  577. urb->status = 0;
  578. }
  579. if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
  580. void *ptr;
  581. for (ptr = urb->transfer_buffer;
  582. ptr < urb->transfer_buffer + urb->transfer_buffer_length;
  583. ptr += PAGE_SIZE)
  584. flush_dcache_page(virt_to_page(ptr));
  585. }
  586. /* complete() can reenter this HCD */
  587. usb_hcd_unlink_urb_from_ep(hcd, urb);
  588. spin_unlock(&priv->lock);
  589. usb_hcd_giveback_urb(hcd, urb, urb->status);
  590. spin_lock(&priv->lock);
  591. }
  592. static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
  593. u8 packet_type)
  594. {
  595. struct isp1760_qtd *qtd;
  596. qtd = kmem_cache_zalloc(qtd_cachep, flags);
  597. if (!qtd)
  598. return NULL;
  599. INIT_LIST_HEAD(&qtd->qtd_list);
  600. qtd->urb = urb;
  601. qtd->packet_type = packet_type;
  602. qtd->status = QTD_ENQUEUED;
  603. qtd->actual_length = 0;
  604. return qtd;
  605. }
  606. static void qtd_free(struct isp1760_qtd *qtd)
  607. {
  608. WARN_ON(qtd->payload_addr);
  609. kmem_cache_free(qtd_cachep, qtd);
  610. }
  611. static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
  612. struct isp1760_slotinfo *slots,
  613. struct isp1760_qtd *qtd, struct isp1760_qh *qh,
  614. struct ptd *ptd)
  615. {
  616. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  617. int skip_map;
  618. WARN_ON((slot < 0) || (slot > 31));
  619. WARN_ON(qtd->length && !qtd->payload_addr);
  620. WARN_ON(slots[slot].qtd);
  621. WARN_ON(slots[slot].qh);
  622. WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
  623. /* Make sure done map has not triggered from some unlinked transfer */
  624. if (ptd_offset == ATL_PTD_OFFSET) {
  625. priv->atl_done_map |= reg_read32(hcd->regs,
  626. HC_ATL_PTD_DONEMAP_REG);
  627. priv->atl_done_map &= ~(1 << slot);
  628. } else {
  629. priv->int_done_map |= reg_read32(hcd->regs,
  630. HC_INT_PTD_DONEMAP_REG);
  631. priv->int_done_map &= ~(1 << slot);
  632. }
  633. qh->slot = slot;
  634. qtd->status = QTD_XFER_STARTED;
  635. slots[slot].timestamp = jiffies;
  636. slots[slot].qtd = qtd;
  637. slots[slot].qh = qh;
  638. ptd_write(hcd->regs, ptd_offset, slot, ptd);
  639. if (ptd_offset == ATL_PTD_OFFSET) {
  640. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  641. skip_map &= ~(1 << qh->slot);
  642. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
  643. } else {
  644. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  645. skip_map &= ~(1 << qh->slot);
  646. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
  647. }
  648. }
  649. static int is_short_bulk(struct isp1760_qtd *qtd)
  650. {
  651. return (usb_pipebulk(qtd->urb->pipe) &&
  652. (qtd->actual_length < qtd->length));
  653. }
  654. static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
  655. struct list_head *urb_list)
  656. {
  657. int last_qtd;
  658. struct isp1760_qtd *qtd, *qtd_next;
  659. struct urb_listitem *urb_listitem;
  660. list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
  661. if (qtd->status < QTD_XFER_COMPLETE)
  662. break;
  663. last_qtd = last_qtd_of_urb(qtd, qh);
  664. if ((!last_qtd) && (qtd->status == QTD_RETIRE))
  665. qtd_next->status = QTD_RETIRE;
  666. if (qtd->status == QTD_XFER_COMPLETE) {
  667. if (qtd->actual_length) {
  668. switch (qtd->packet_type) {
  669. case IN_PID:
  670. mem_reads8(hcd->regs, qtd->payload_addr,
  671. qtd->data_buffer,
  672. qtd->actual_length);
  673. /* Fall through (?) */
  674. case OUT_PID:
  675. qtd->urb->actual_length +=
  676. qtd->actual_length;
  677. /* Fall through ... */
  678. case SETUP_PID:
  679. break;
  680. }
  681. }
  682. if (is_short_bulk(qtd)) {
  683. if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
  684. qtd->urb->status = -EREMOTEIO;
  685. if (!last_qtd)
  686. qtd_next->status = QTD_RETIRE;
  687. }
  688. }
  689. if (qtd->payload_addr)
  690. free_mem(hcd, qtd);
  691. if (last_qtd) {
  692. if ((qtd->status == QTD_RETIRE) &&
  693. (qtd->urb->status == -EINPROGRESS))
  694. qtd->urb->status = -EPIPE;
  695. /* Defer calling of urb_done() since it releases lock */
  696. urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
  697. GFP_ATOMIC);
  698. if (unlikely(!urb_listitem))
  699. break; /* Try again on next call */
  700. urb_listitem->urb = qtd->urb;
  701. list_add_tail(&urb_listitem->urb_list, urb_list);
  702. }
  703. list_del(&qtd->qtd_list);
  704. qtd_free(qtd);
  705. }
  706. }
  707. #define ENQUEUE_DEPTH 2
  708. static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
  709. {
  710. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  711. int ptd_offset;
  712. struct isp1760_slotinfo *slots;
  713. int curr_slot, free_slot;
  714. int n;
  715. struct ptd ptd;
  716. struct isp1760_qtd *qtd;
  717. if (unlikely(list_empty(&qh->qtd_list))) {
  718. WARN_ON(1);
  719. return;
  720. }
  721. /* Make sure this endpoint's TT buffer is clean before queueing ptds */
  722. if (qh->tt_buffer_dirty)
  723. return;
  724. if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
  725. qtd_list)->urb->pipe)) {
  726. ptd_offset = INT_PTD_OFFSET;
  727. slots = priv->int_slots;
  728. } else {
  729. ptd_offset = ATL_PTD_OFFSET;
  730. slots = priv->atl_slots;
  731. }
  732. free_slot = -1;
  733. for (curr_slot = 0; curr_slot < 32; curr_slot++) {
  734. if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
  735. free_slot = curr_slot;
  736. if (slots[curr_slot].qh == qh)
  737. break;
  738. }
  739. n = 0;
  740. list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
  741. if (qtd->status == QTD_ENQUEUED) {
  742. WARN_ON(qtd->payload_addr);
  743. alloc_mem(hcd, qtd);
  744. if ((qtd->length) && (!qtd->payload_addr))
  745. break;
  746. if ((qtd->length) &&
  747. ((qtd->packet_type == SETUP_PID) ||
  748. (qtd->packet_type == OUT_PID))) {
  749. mem_writes8(hcd->regs, qtd->payload_addr,
  750. qtd->data_buffer, qtd->length);
  751. }
  752. qtd->status = QTD_PAYLOAD_ALLOC;
  753. }
  754. if (qtd->status == QTD_PAYLOAD_ALLOC) {
  755. /*
  756. if ((curr_slot > 31) && (free_slot == -1))
  757. dev_dbg(hcd->self.controller, "%s: No slot "
  758. "available for transfer\n", __func__);
  759. */
  760. /* Start xfer for this endpoint if not already done */
  761. if ((curr_slot > 31) && (free_slot > -1)) {
  762. if (usb_pipeint(qtd->urb->pipe))
  763. create_ptd_int(qh, qtd, &ptd);
  764. else
  765. create_ptd_atl(qh, qtd, &ptd);
  766. start_bus_transfer(hcd, ptd_offset, free_slot,
  767. slots, qtd, qh, &ptd);
  768. curr_slot = free_slot;
  769. }
  770. n++;
  771. if (n >= ENQUEUE_DEPTH)
  772. break;
  773. }
  774. }
  775. }
  776. static void schedule_ptds(struct usb_hcd *hcd)
  777. {
  778. struct isp1760_hcd *priv;
  779. struct isp1760_qh *qh, *qh_next;
  780. struct list_head *ep_queue;
  781. LIST_HEAD(urb_list);
  782. struct urb_listitem *urb_listitem, *urb_listitem_next;
  783. int i;
  784. if (!hcd) {
  785. WARN_ON(1);
  786. return;
  787. }
  788. priv = hcd_to_priv(hcd);
  789. /*
  790. * check finished/retired xfers, transfer payloads, call urb_done()
  791. */
  792. for (i = 0; i < QH_END; i++) {
  793. ep_queue = &priv->qh_list[i];
  794. list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
  795. collect_qtds(hcd, qh, &urb_list);
  796. if (list_empty(&qh->qtd_list))
  797. list_del(&qh->qh_list);
  798. }
  799. }
  800. list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
  801. urb_list) {
  802. isp1760_urb_done(hcd, urb_listitem->urb);
  803. kmem_cache_free(urb_listitem_cachep, urb_listitem);
  804. }
  805. /*
  806. * Schedule packets for transfer.
  807. *
  808. * According to USB2.0 specification:
  809. *
  810. * 1st prio: interrupt xfers, up to 80 % of bandwidth
  811. * 2nd prio: control xfers
  812. * 3rd prio: bulk xfers
  813. *
  814. * ... but let's use a simpler scheme here (mostly because ISP1761 doc
  815. * is very unclear on how to prioritize traffic):
  816. *
  817. * 1) Enqueue any queued control transfers, as long as payload chip mem
  818. * and PTD ATL slots are available.
  819. * 2) Enqueue any queued INT transfers, as long as payload chip mem
  820. * and PTD INT slots are available.
  821. * 3) Enqueue any queued bulk transfers, as long as payload chip mem
  822. * and PTD ATL slots are available.
  823. *
  824. * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
  825. * conservation of chip mem and performance.
  826. *
  827. * I'm sure this scheme could be improved upon!
  828. */
  829. for (i = 0; i < QH_END; i++) {
  830. ep_queue = &priv->qh_list[i];
  831. list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
  832. enqueue_qtds(hcd, qh);
  833. }
  834. }
  835. #define PTD_STATE_QTD_DONE 1
  836. #define PTD_STATE_QTD_RELOAD 2
  837. #define PTD_STATE_URB_RETIRE 3
  838. static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
  839. struct urb *urb)
  840. {
  841. __dw dw4;
  842. int i;
  843. dw4 = ptd->dw4;
  844. dw4 >>= 8;
  845. /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
  846. need to handle these errors? Is it done in hardware? */
  847. if (ptd->dw3 & DW3_HALT_BIT) {
  848. urb->status = -EPROTO; /* Default unknown error */
  849. for (i = 0; i < 8; i++) {
  850. switch (dw4 & 0x7) {
  851. case INT_UNDERRUN:
  852. dev_dbg(hcd->self.controller, "%s: underrun "
  853. "during uFrame %d\n",
  854. __func__, i);
  855. urb->status = -ECOMM; /* Could not write data */
  856. break;
  857. case INT_EXACT:
  858. dev_dbg(hcd->self.controller, "%s: transaction "
  859. "error during uFrame %d\n",
  860. __func__, i);
  861. urb->status = -EPROTO; /* timeout, bad CRC, PID
  862. error etc. */
  863. break;
  864. case INT_BABBLE:
  865. dev_dbg(hcd->self.controller, "%s: babble "
  866. "error during uFrame %d\n",
  867. __func__, i);
  868. urb->status = -EOVERFLOW;
  869. break;
  870. }
  871. dw4 >>= 3;
  872. }
  873. return PTD_STATE_URB_RETIRE;
  874. }
  875. return PTD_STATE_QTD_DONE;
  876. }
  877. static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
  878. struct urb *urb)
  879. {
  880. WARN_ON(!ptd);
  881. if (ptd->dw3 & DW3_HALT_BIT) {
  882. if (ptd->dw3 & DW3_BABBLE_BIT)
  883. urb->status = -EOVERFLOW;
  884. else if (FROM_DW3_CERR(ptd->dw3))
  885. urb->status = -EPIPE; /* Stall */
  886. else if (ptd->dw3 & DW3_ERROR_BIT)
  887. urb->status = -EPROTO; /* XactErr */
  888. else
  889. urb->status = -EPROTO; /* Unknown */
  890. /*
  891. dev_dbg(hcd->self.controller, "%s: ptd error:\n"
  892. " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
  893. " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
  894. __func__,
  895. ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
  896. ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
  897. */
  898. return PTD_STATE_URB_RETIRE;
  899. }
  900. if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
  901. /* Transfer Error, *but* active and no HALT -> reload */
  902. dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
  903. return PTD_STATE_QTD_RELOAD;
  904. }
  905. if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
  906. /*
  907. * NAKs are handled in HW by the chip. Usually if the
  908. * device is not able to send data fast enough.
  909. * This happens mostly on slower hardware.
  910. */
  911. return PTD_STATE_QTD_RELOAD;
  912. }
  913. return PTD_STATE_QTD_DONE;
  914. }
  915. static void handle_done_ptds(struct usb_hcd *hcd)
  916. {
  917. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  918. struct ptd ptd;
  919. struct isp1760_qh *qh;
  920. int slot;
  921. int state;
  922. struct isp1760_slotinfo *slots;
  923. u32 ptd_offset;
  924. struct isp1760_qtd *qtd;
  925. int modified;
  926. int skip_map;
  927. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  928. priv->int_done_map &= ~skip_map;
  929. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  930. priv->atl_done_map &= ~skip_map;
  931. modified = priv->int_done_map || priv->atl_done_map;
  932. while (priv->int_done_map || priv->atl_done_map) {
  933. if (priv->int_done_map) {
  934. /* INT ptd */
  935. slot = __ffs(priv->int_done_map);
  936. priv->int_done_map &= ~(1 << slot);
  937. slots = priv->int_slots;
  938. /* This should not trigger, and could be removed if
  939. noone have any problems with it triggering: */
  940. if (!slots[slot].qh) {
  941. WARN_ON(1);
  942. continue;
  943. }
  944. ptd_offset = INT_PTD_OFFSET;
  945. ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
  946. state = check_int_transfer(hcd, &ptd,
  947. slots[slot].qtd->urb);
  948. } else {
  949. /* ATL ptd */
  950. slot = __ffs(priv->atl_done_map);
  951. priv->atl_done_map &= ~(1 << slot);
  952. slots = priv->atl_slots;
  953. /* This should not trigger, and could be removed if
  954. noone have any problems with it triggering: */
  955. if (!slots[slot].qh) {
  956. WARN_ON(1);
  957. continue;
  958. }
  959. ptd_offset = ATL_PTD_OFFSET;
  960. ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
  961. state = check_atl_transfer(hcd, &ptd,
  962. slots[slot].qtd->urb);
  963. }
  964. qtd = slots[slot].qtd;
  965. slots[slot].qtd = NULL;
  966. qh = slots[slot].qh;
  967. slots[slot].qh = NULL;
  968. qh->slot = -1;
  969. WARN_ON(qtd->status != QTD_XFER_STARTED);
  970. switch (state) {
  971. case PTD_STATE_QTD_DONE:
  972. if ((usb_pipeint(qtd->urb->pipe)) &&
  973. (qtd->urb->dev->speed != USB_SPEED_HIGH))
  974. qtd->actual_length =
  975. FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
  976. else
  977. qtd->actual_length =
  978. FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
  979. qtd->status = QTD_XFER_COMPLETE;
  980. if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
  981. is_short_bulk(qtd))
  982. qtd = NULL;
  983. else
  984. qtd = list_entry(qtd->qtd_list.next,
  985. typeof(*qtd), qtd_list);
  986. qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
  987. qh->ping = FROM_DW3_PING(ptd.dw3);
  988. break;
  989. case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
  990. qtd->status = QTD_PAYLOAD_ALLOC;
  991. ptd.dw0 |= DW0_VALID_BIT;
  992. /* RL counter = ERR counter */
  993. ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
  994. ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
  995. ptd.dw3 &= ~TO_DW3_CERR(3);
  996. ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
  997. qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
  998. qh->ping = FROM_DW3_PING(ptd.dw3);
  999. break;
  1000. case PTD_STATE_URB_RETIRE:
  1001. qtd->status = QTD_RETIRE;
  1002. if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
  1003. (qtd->urb->status != -EPIPE) &&
  1004. (qtd->urb->status != -EREMOTEIO)) {
  1005. qh->tt_buffer_dirty = 1;
  1006. if (usb_hub_clear_tt_buffer(qtd->urb))
  1007. /* Clear failed; let's hope things work
  1008. anyway */
  1009. qh->tt_buffer_dirty = 0;
  1010. }
  1011. qtd = NULL;
  1012. qh->toggle = 0;
  1013. qh->ping = 0;
  1014. break;
  1015. default:
  1016. WARN_ON(1);
  1017. continue;
  1018. }
  1019. if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
  1020. if (slots == priv->int_slots) {
  1021. if (state == PTD_STATE_QTD_RELOAD)
  1022. dev_err(hcd->self.controller,
  1023. "%s: PTD_STATE_QTD_RELOAD on "
  1024. "interrupt packet\n", __func__);
  1025. if (state != PTD_STATE_QTD_RELOAD)
  1026. create_ptd_int(qh, qtd, &ptd);
  1027. } else {
  1028. if (state != PTD_STATE_QTD_RELOAD)
  1029. create_ptd_atl(qh, qtd, &ptd);
  1030. }
  1031. start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
  1032. qh, &ptd);
  1033. }
  1034. }
  1035. if (modified)
  1036. schedule_ptds(hcd);
  1037. }
  1038. static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
  1039. {
  1040. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1041. u32 imask;
  1042. irqreturn_t irqret = IRQ_NONE;
  1043. spin_lock(&priv->lock);
  1044. if (!(hcd->state & HC_STATE_RUNNING))
  1045. goto leave;
  1046. imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
  1047. if (unlikely(!imask))
  1048. goto leave;
  1049. reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
  1050. priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
  1051. priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
  1052. handle_done_ptds(hcd);
  1053. irqret = IRQ_HANDLED;
  1054. leave:
  1055. spin_unlock(&priv->lock);
  1056. return irqret;
  1057. }
  1058. /*
  1059. * Workaround for problem described in chip errata 2:
  1060. *
  1061. * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
  1062. * One solution suggested in the errata is to use SOF interrupts _instead_of_
  1063. * ATL done interrupts (the "instead of" might be important since it seems
  1064. * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
  1065. * to set the PTD's done bit in addition to not generating an interrupt!).
  1066. *
  1067. * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
  1068. * done bit is not being set. This is bad - it blocks the endpoint until reboot.
  1069. *
  1070. * If we use SOF interrupts only, we get latency between ptd completion and the
  1071. * actual handling. This is very noticeable in testusb runs which takes several
  1072. * minutes longer without ATL interrupts.
  1073. *
  1074. * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
  1075. * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
  1076. * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
  1077. * completed and its done map bit is set.
  1078. *
  1079. * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
  1080. * not to cause too much lag when this HW bug occurs, while still hopefully
  1081. * ensuring that the check does not falsely trigger.
  1082. */
  1083. #define SLOT_TIMEOUT 300
  1084. #define SLOT_CHECK_PERIOD 200
  1085. static struct timer_list errata2_timer;
  1086. static struct usb_hcd *errata2_timer_hcd;
  1087. static void errata2_function(struct timer_list *unused)
  1088. {
  1089. struct usb_hcd *hcd = errata2_timer_hcd;
  1090. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1091. int slot;
  1092. struct ptd ptd;
  1093. unsigned long spinflags;
  1094. spin_lock_irqsave(&priv->lock, spinflags);
  1095. for (slot = 0; slot < 32; slot++)
  1096. if (priv->atl_slots[slot].qh && time_after(jiffies,
  1097. priv->atl_slots[slot].timestamp +
  1098. msecs_to_jiffies(SLOT_TIMEOUT))) {
  1099. ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
  1100. if (!FROM_DW0_VALID(ptd.dw0) &&
  1101. !FROM_DW3_ACTIVE(ptd.dw3))
  1102. priv->atl_done_map |= 1 << slot;
  1103. }
  1104. if (priv->atl_done_map)
  1105. handle_done_ptds(hcd);
  1106. spin_unlock_irqrestore(&priv->lock, spinflags);
  1107. errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
  1108. add_timer(&errata2_timer);
  1109. }
  1110. static int isp1760_run(struct usb_hcd *hcd)
  1111. {
  1112. int retval;
  1113. u32 temp;
  1114. u32 command;
  1115. u32 chipid;
  1116. hcd->uses_new_polling = 1;
  1117. hcd->state = HC_STATE_RUNNING;
  1118. /* Set PTD interrupt AND & OR maps */
  1119. reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
  1120. reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
  1121. reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
  1122. reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
  1123. reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
  1124. reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
  1125. /* step 23 passed */
  1126. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1127. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
  1128. command = reg_read32(hcd->regs, HC_USBCMD);
  1129. command &= ~(CMD_LRESET|CMD_RESET);
  1130. command |= CMD_RUN;
  1131. reg_write32(hcd->regs, HC_USBCMD, command);
  1132. retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
  1133. if (retval)
  1134. return retval;
  1135. /*
  1136. * XXX
  1137. * Spec says to write FLAG_CF as last config action, priv code grabs
  1138. * the semaphore while doing so.
  1139. */
  1140. down_write(&ehci_cf_port_reset_rwsem);
  1141. reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
  1142. retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
  1143. up_write(&ehci_cf_port_reset_rwsem);
  1144. if (retval)
  1145. return retval;
  1146. errata2_timer_hcd = hcd;
  1147. timer_setup(&errata2_timer, errata2_function, 0);
  1148. errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
  1149. add_timer(&errata2_timer);
  1150. chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
  1151. dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
  1152. chipid & 0xffff, chipid >> 16);
  1153. /* PTD Register Init Part 2, Step 28 */
  1154. /* Setup registers controlling PTD checking */
  1155. reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
  1156. reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
  1157. reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
  1158. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
  1159. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
  1160. reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
  1161. reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
  1162. ATL_BUF_FILL | INT_BUF_FILL);
  1163. /* GRR this is run-once init(), being done every time the HC starts.
  1164. * So long as they're part of class devices, we can't do it init()
  1165. * since the class device isn't created that early.
  1166. */
  1167. return 0;
  1168. }
  1169. static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
  1170. {
  1171. qtd->data_buffer = databuffer;
  1172. if (len > MAX_PAYLOAD_SIZE)
  1173. len = MAX_PAYLOAD_SIZE;
  1174. qtd->length = len;
  1175. return qtd->length;
  1176. }
  1177. static void qtd_list_free(struct list_head *qtd_list)
  1178. {
  1179. struct isp1760_qtd *qtd, *qtd_next;
  1180. list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
  1181. list_del(&qtd->qtd_list);
  1182. qtd_free(qtd);
  1183. }
  1184. }
  1185. /*
  1186. * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
  1187. * Also calculate the PID type (SETUP/IN/OUT) for each packet.
  1188. */
  1189. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  1190. static void packetize_urb(struct usb_hcd *hcd,
  1191. struct urb *urb, struct list_head *head, gfp_t flags)
  1192. {
  1193. struct isp1760_qtd *qtd;
  1194. void *buf;
  1195. int len, maxpacketsize;
  1196. u8 packet_type;
  1197. /*
  1198. * URBs map to sequences of QTDs: one logical transaction
  1199. */
  1200. if (!urb->transfer_buffer && urb->transfer_buffer_length) {
  1201. /* XXX This looks like usb storage / SCSI bug */
  1202. dev_err(hcd->self.controller,
  1203. "buf is null, dma is %08lx len is %d\n",
  1204. (long unsigned)urb->transfer_dma,
  1205. urb->transfer_buffer_length);
  1206. WARN_ON(1);
  1207. }
  1208. if (usb_pipein(urb->pipe))
  1209. packet_type = IN_PID;
  1210. else
  1211. packet_type = OUT_PID;
  1212. if (usb_pipecontrol(urb->pipe)) {
  1213. qtd = qtd_alloc(flags, urb, SETUP_PID);
  1214. if (!qtd)
  1215. goto cleanup;
  1216. qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
  1217. list_add_tail(&qtd->qtd_list, head);
  1218. /* for zero length DATA stages, STATUS is always IN */
  1219. if (urb->transfer_buffer_length == 0)
  1220. packet_type = IN_PID;
  1221. }
  1222. maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
  1223. usb_pipeout(urb->pipe)));
  1224. /*
  1225. * buffer gets wrapped in one or more qtds;
  1226. * last one may be "short" (including zero len)
  1227. * and may serve as a control status ack
  1228. */
  1229. buf = urb->transfer_buffer;
  1230. len = urb->transfer_buffer_length;
  1231. for (;;) {
  1232. int this_qtd_len;
  1233. qtd = qtd_alloc(flags, urb, packet_type);
  1234. if (!qtd)
  1235. goto cleanup;
  1236. this_qtd_len = qtd_fill(qtd, buf, len);
  1237. list_add_tail(&qtd->qtd_list, head);
  1238. len -= this_qtd_len;
  1239. buf += this_qtd_len;
  1240. if (len <= 0)
  1241. break;
  1242. }
  1243. /*
  1244. * control requests may need a terminating data "status" ack;
  1245. * bulk ones may need a terminating short packet (zero length).
  1246. */
  1247. if (urb->transfer_buffer_length != 0) {
  1248. int one_more = 0;
  1249. if (usb_pipecontrol(urb->pipe)) {
  1250. one_more = 1;
  1251. if (packet_type == IN_PID)
  1252. packet_type = OUT_PID;
  1253. else
  1254. packet_type = IN_PID;
  1255. } else if (usb_pipebulk(urb->pipe)
  1256. && (urb->transfer_flags & URB_ZERO_PACKET)
  1257. && !(urb->transfer_buffer_length %
  1258. maxpacketsize)) {
  1259. one_more = 1;
  1260. }
  1261. if (one_more) {
  1262. qtd = qtd_alloc(flags, urb, packet_type);
  1263. if (!qtd)
  1264. goto cleanup;
  1265. /* never any data in such packets */
  1266. qtd_fill(qtd, NULL, 0);
  1267. list_add_tail(&qtd->qtd_list, head);
  1268. }
  1269. }
  1270. return;
  1271. cleanup:
  1272. qtd_list_free(head);
  1273. }
  1274. static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  1275. gfp_t mem_flags)
  1276. {
  1277. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1278. struct list_head *ep_queue;
  1279. struct isp1760_qh *qh, *qhit;
  1280. unsigned long spinflags;
  1281. LIST_HEAD(new_qtds);
  1282. int retval;
  1283. int qh_in_queue;
  1284. switch (usb_pipetype(urb->pipe)) {
  1285. case PIPE_CONTROL:
  1286. ep_queue = &priv->qh_list[QH_CONTROL];
  1287. break;
  1288. case PIPE_BULK:
  1289. ep_queue = &priv->qh_list[QH_BULK];
  1290. break;
  1291. case PIPE_INTERRUPT:
  1292. if (urb->interval < 0)
  1293. return -EINVAL;
  1294. /* FIXME: Check bandwidth */
  1295. ep_queue = &priv->qh_list[QH_INTERRUPT];
  1296. break;
  1297. case PIPE_ISOCHRONOUS:
  1298. dev_err(hcd->self.controller, "%s: isochronous USB packets "
  1299. "not yet supported\n",
  1300. __func__);
  1301. return -EPIPE;
  1302. default:
  1303. dev_err(hcd->self.controller, "%s: unknown pipe type\n",
  1304. __func__);
  1305. return -EPIPE;
  1306. }
  1307. if (usb_pipein(urb->pipe))
  1308. urb->actual_length = 0;
  1309. packetize_urb(hcd, urb, &new_qtds, mem_flags);
  1310. if (list_empty(&new_qtds))
  1311. return -ENOMEM;
  1312. retval = 0;
  1313. spin_lock_irqsave(&priv->lock, spinflags);
  1314. if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
  1315. retval = -ESHUTDOWN;
  1316. qtd_list_free(&new_qtds);
  1317. goto out;
  1318. }
  1319. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  1320. if (retval) {
  1321. qtd_list_free(&new_qtds);
  1322. goto out;
  1323. }
  1324. qh = urb->ep->hcpriv;
  1325. if (qh) {
  1326. qh_in_queue = 0;
  1327. list_for_each_entry(qhit, ep_queue, qh_list) {
  1328. if (qhit == qh) {
  1329. qh_in_queue = 1;
  1330. break;
  1331. }
  1332. }
  1333. if (!qh_in_queue)
  1334. list_add_tail(&qh->qh_list, ep_queue);
  1335. } else {
  1336. qh = qh_alloc(GFP_ATOMIC);
  1337. if (!qh) {
  1338. retval = -ENOMEM;
  1339. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1340. qtd_list_free(&new_qtds);
  1341. goto out;
  1342. }
  1343. list_add_tail(&qh->qh_list, ep_queue);
  1344. urb->ep->hcpriv = qh;
  1345. }
  1346. list_splice_tail(&new_qtds, &qh->qtd_list);
  1347. schedule_ptds(hcd);
  1348. out:
  1349. spin_unlock_irqrestore(&priv->lock, spinflags);
  1350. return retval;
  1351. }
  1352. static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
  1353. struct isp1760_qh *qh)
  1354. {
  1355. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1356. int skip_map;
  1357. WARN_ON(qh->slot == -1);
  1358. /* We need to forcefully reclaim the slot since some transfers never
  1359. return, e.g. interrupt transfers and NAKed bulk transfers. */
  1360. if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
  1361. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  1362. skip_map |= (1 << qh->slot);
  1363. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
  1364. priv->atl_slots[qh->slot].qh = NULL;
  1365. priv->atl_slots[qh->slot].qtd = NULL;
  1366. } else {
  1367. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  1368. skip_map |= (1 << qh->slot);
  1369. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
  1370. priv->int_slots[qh->slot].qh = NULL;
  1371. priv->int_slots[qh->slot].qtd = NULL;
  1372. }
  1373. qh->slot = -1;
  1374. }
  1375. /*
  1376. * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
  1377. * any active transfer belonging to the urb in the process.
  1378. */
  1379. static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
  1380. struct isp1760_qtd *qtd)
  1381. {
  1382. struct urb *urb;
  1383. int urb_was_running;
  1384. urb = qtd->urb;
  1385. urb_was_running = 0;
  1386. list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
  1387. if (qtd->urb != urb)
  1388. break;
  1389. if (qtd->status >= QTD_XFER_STARTED)
  1390. urb_was_running = 1;
  1391. if (last_qtd_of_urb(qtd, qh) &&
  1392. (qtd->status >= QTD_XFER_COMPLETE))
  1393. urb_was_running = 0;
  1394. if (qtd->status == QTD_XFER_STARTED)
  1395. kill_transfer(hcd, urb, qh);
  1396. qtd->status = QTD_RETIRE;
  1397. }
  1398. if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
  1399. qh->tt_buffer_dirty = 1;
  1400. if (usb_hub_clear_tt_buffer(urb))
  1401. /* Clear failed; let's hope things work anyway */
  1402. qh->tt_buffer_dirty = 0;
  1403. }
  1404. }
  1405. static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
  1406. int status)
  1407. {
  1408. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1409. unsigned long spinflags;
  1410. struct isp1760_qh *qh;
  1411. struct isp1760_qtd *qtd;
  1412. int retval = 0;
  1413. spin_lock_irqsave(&priv->lock, spinflags);
  1414. retval = usb_hcd_check_unlink_urb(hcd, urb, status);
  1415. if (retval)
  1416. goto out;
  1417. qh = urb->ep->hcpriv;
  1418. if (!qh) {
  1419. retval = -EINVAL;
  1420. goto out;
  1421. }
  1422. list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
  1423. if (qtd->urb == urb) {
  1424. dequeue_urb_from_qtd(hcd, qh, qtd);
  1425. list_move(&qtd->qtd_list, &qh->qtd_list);
  1426. break;
  1427. }
  1428. urb->status = status;
  1429. schedule_ptds(hcd);
  1430. out:
  1431. spin_unlock_irqrestore(&priv->lock, spinflags);
  1432. return retval;
  1433. }
  1434. static void isp1760_endpoint_disable(struct usb_hcd *hcd,
  1435. struct usb_host_endpoint *ep)
  1436. {
  1437. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1438. unsigned long spinflags;
  1439. struct isp1760_qh *qh, *qh_iter;
  1440. int i;
  1441. spin_lock_irqsave(&priv->lock, spinflags);
  1442. qh = ep->hcpriv;
  1443. if (!qh)
  1444. goto out;
  1445. WARN_ON(!list_empty(&qh->qtd_list));
  1446. for (i = 0; i < QH_END; i++)
  1447. list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
  1448. if (qh_iter == qh) {
  1449. list_del(&qh_iter->qh_list);
  1450. i = QH_END;
  1451. break;
  1452. }
  1453. qh_free(qh);
  1454. ep->hcpriv = NULL;
  1455. schedule_ptds(hcd);
  1456. out:
  1457. spin_unlock_irqrestore(&priv->lock, spinflags);
  1458. }
  1459. static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
  1460. {
  1461. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1462. u32 temp, status = 0;
  1463. u32 mask;
  1464. int retval = 1;
  1465. unsigned long flags;
  1466. /* if !PM, root hub timers won't get shut down ... */
  1467. if (!HC_IS_RUNNING(hcd->state))
  1468. return 0;
  1469. /* init status to no-changes */
  1470. buf[0] = 0;
  1471. mask = PORT_CSC;
  1472. spin_lock_irqsave(&priv->lock, flags);
  1473. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1474. if (temp & PORT_OWNER) {
  1475. if (temp & PORT_CSC) {
  1476. temp &= ~PORT_CSC;
  1477. reg_write32(hcd->regs, HC_PORTSC1, temp);
  1478. goto done;
  1479. }
  1480. }
  1481. /*
  1482. * Return status information even for ports with OWNER set.
  1483. * Otherwise hub_wq wouldn't see the disconnect event when a
  1484. * high-speed device is switched over to the companion
  1485. * controller by the user.
  1486. */
  1487. if ((temp & mask) != 0
  1488. || ((temp & PORT_RESUME) != 0
  1489. && time_after_eq(jiffies,
  1490. priv->reset_done))) {
  1491. buf [0] |= 1 << (0 + 1);
  1492. status = STS_PCD;
  1493. }
  1494. /* FIXME autosuspend idle root hubs */
  1495. done:
  1496. spin_unlock_irqrestore(&priv->lock, flags);
  1497. return status ? retval : 0;
  1498. }
  1499. static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
  1500. struct usb_hub_descriptor *desc)
  1501. {
  1502. int ports = HCS_N_PORTS(priv->hcs_params);
  1503. u16 temp;
  1504. desc->bDescriptorType = USB_DT_HUB;
  1505. /* priv 1.0, 2.3.9 says 20ms max */
  1506. desc->bPwrOn2PwrGood = 10;
  1507. desc->bHubContrCurrent = 0;
  1508. desc->bNbrPorts = ports;
  1509. temp = 1 + (ports / 8);
  1510. desc->bDescLength = 7 + 2 * temp;
  1511. /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
  1512. memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
  1513. memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
  1514. /* per-port overcurrent reporting */
  1515. temp = HUB_CHAR_INDV_PORT_OCPM;
  1516. if (HCS_PPC(priv->hcs_params))
  1517. /* per-port power control */
  1518. temp |= HUB_CHAR_INDV_PORT_LPSM;
  1519. else
  1520. /* no power switching */
  1521. temp |= HUB_CHAR_NO_LPSM;
  1522. desc->wHubCharacteristics = cpu_to_le16(temp);
  1523. }
  1524. #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
  1525. static int check_reset_complete(struct usb_hcd *hcd, int index,
  1526. int port_status)
  1527. {
  1528. if (!(port_status & PORT_CONNECT))
  1529. return port_status;
  1530. /* if reset finished and it's still not enabled -- handoff */
  1531. if (!(port_status & PORT_PE)) {
  1532. dev_info(hcd->self.controller,
  1533. "port %d full speed --> companion\n",
  1534. index + 1);
  1535. port_status |= PORT_OWNER;
  1536. port_status &= ~PORT_RWC_BITS;
  1537. reg_write32(hcd->regs, HC_PORTSC1, port_status);
  1538. } else
  1539. dev_info(hcd->self.controller, "port %d high speed\n",
  1540. index + 1);
  1541. return port_status;
  1542. }
  1543. static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
  1544. u16 wValue, u16 wIndex, char *buf, u16 wLength)
  1545. {
  1546. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1547. int ports = HCS_N_PORTS(priv->hcs_params);
  1548. u32 temp, status;
  1549. unsigned long flags;
  1550. int retval = 0;
  1551. /*
  1552. * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
  1553. * HCS_INDICATOR may say we can change LEDs to off/amber/green.
  1554. * (track current state ourselves) ... blink for diagnostics,
  1555. * power, "this is the one", etc. EHCI spec supports this.
  1556. */
  1557. spin_lock_irqsave(&priv->lock, flags);
  1558. switch (typeReq) {
  1559. case ClearHubFeature:
  1560. switch (wValue) {
  1561. case C_HUB_LOCAL_POWER:
  1562. case C_HUB_OVER_CURRENT:
  1563. /* no hub-wide feature/status flags */
  1564. break;
  1565. default:
  1566. goto error;
  1567. }
  1568. break;
  1569. case ClearPortFeature:
  1570. if (!wIndex || wIndex > ports)
  1571. goto error;
  1572. wIndex--;
  1573. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1574. /*
  1575. * Even if OWNER is set, so the port is owned by the
  1576. * companion controller, hub_wq needs to be able to clear
  1577. * the port-change status bits (especially
  1578. * USB_PORT_STAT_C_CONNECTION).
  1579. */
  1580. switch (wValue) {
  1581. case USB_PORT_FEAT_ENABLE:
  1582. reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
  1583. break;
  1584. case USB_PORT_FEAT_C_ENABLE:
  1585. /* XXX error? */
  1586. break;
  1587. case USB_PORT_FEAT_SUSPEND:
  1588. if (temp & PORT_RESET)
  1589. goto error;
  1590. if (temp & PORT_SUSPEND) {
  1591. if ((temp & PORT_PE) == 0)
  1592. goto error;
  1593. /* resume signaling for 20 msec */
  1594. temp &= ~(PORT_RWC_BITS);
  1595. reg_write32(hcd->regs, HC_PORTSC1,
  1596. temp | PORT_RESUME);
  1597. priv->reset_done = jiffies +
  1598. msecs_to_jiffies(USB_RESUME_TIMEOUT);
  1599. }
  1600. break;
  1601. case USB_PORT_FEAT_C_SUSPEND:
  1602. /* we auto-clear this feature */
  1603. break;
  1604. case USB_PORT_FEAT_POWER:
  1605. if (HCS_PPC(priv->hcs_params))
  1606. reg_write32(hcd->regs, HC_PORTSC1,
  1607. temp & ~PORT_POWER);
  1608. break;
  1609. case USB_PORT_FEAT_C_CONNECTION:
  1610. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
  1611. break;
  1612. case USB_PORT_FEAT_C_OVER_CURRENT:
  1613. /* XXX error ?*/
  1614. break;
  1615. case USB_PORT_FEAT_C_RESET:
  1616. /* GetPortStatus clears reset */
  1617. break;
  1618. default:
  1619. goto error;
  1620. }
  1621. reg_read32(hcd->regs, HC_USBCMD);
  1622. break;
  1623. case GetHubDescriptor:
  1624. isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
  1625. buf);
  1626. break;
  1627. case GetHubStatus:
  1628. /* no hub-wide feature/status flags */
  1629. memset(buf, 0, 4);
  1630. break;
  1631. case GetPortStatus:
  1632. if (!wIndex || wIndex > ports)
  1633. goto error;
  1634. wIndex--;
  1635. status = 0;
  1636. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1637. /* wPortChange bits */
  1638. if (temp & PORT_CSC)
  1639. status |= USB_PORT_STAT_C_CONNECTION << 16;
  1640. /* whoever resumes must GetPortStatus to complete it!! */
  1641. if (temp & PORT_RESUME) {
  1642. dev_err(hcd->self.controller, "Port resume should be skipped.\n");
  1643. /* Remote Wakeup received? */
  1644. if (!priv->reset_done) {
  1645. /* resume signaling for 20 msec */
  1646. priv->reset_done = jiffies
  1647. + msecs_to_jiffies(20);
  1648. /* check the port again */
  1649. mod_timer(&hcd->rh_timer, priv->reset_done);
  1650. }
  1651. /* resume completed? */
  1652. else if (time_after_eq(jiffies,
  1653. priv->reset_done)) {
  1654. status |= USB_PORT_STAT_C_SUSPEND << 16;
  1655. priv->reset_done = 0;
  1656. /* stop resume signaling */
  1657. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1658. reg_write32(hcd->regs, HC_PORTSC1,
  1659. temp & ~(PORT_RWC_BITS | PORT_RESUME));
  1660. retval = handshake(hcd, HC_PORTSC1,
  1661. PORT_RESUME, 0, 2000 /* 2msec */);
  1662. if (retval != 0) {
  1663. dev_err(hcd->self.controller,
  1664. "port %d resume error %d\n",
  1665. wIndex + 1, retval);
  1666. goto error;
  1667. }
  1668. temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
  1669. }
  1670. }
  1671. /* whoever resets must GetPortStatus to complete it!! */
  1672. if ((temp & PORT_RESET)
  1673. && time_after_eq(jiffies,
  1674. priv->reset_done)) {
  1675. status |= USB_PORT_STAT_C_RESET << 16;
  1676. priv->reset_done = 0;
  1677. /* force reset to complete */
  1678. reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
  1679. /* REVISIT: some hardware needs 550+ usec to clear
  1680. * this bit; seems too long to spin routinely...
  1681. */
  1682. retval = handshake(hcd, HC_PORTSC1,
  1683. PORT_RESET, 0, 750);
  1684. if (retval != 0) {
  1685. dev_err(hcd->self.controller, "port %d reset error %d\n",
  1686. wIndex + 1, retval);
  1687. goto error;
  1688. }
  1689. /* see what we found out */
  1690. temp = check_reset_complete(hcd, wIndex,
  1691. reg_read32(hcd->regs, HC_PORTSC1));
  1692. }
  1693. /*
  1694. * Even if OWNER is set, there's no harm letting hub_wq
  1695. * see the wPortStatus values (they should all be 0 except
  1696. * for PORT_POWER anyway).
  1697. */
  1698. if (temp & PORT_OWNER)
  1699. dev_err(hcd->self.controller, "PORT_OWNER is set\n");
  1700. if (temp & PORT_CONNECT) {
  1701. status |= USB_PORT_STAT_CONNECTION;
  1702. /* status may be from integrated TT */
  1703. status |= USB_PORT_STAT_HIGH_SPEED;
  1704. }
  1705. if (temp & PORT_PE)
  1706. status |= USB_PORT_STAT_ENABLE;
  1707. if (temp & (PORT_SUSPEND|PORT_RESUME))
  1708. status |= USB_PORT_STAT_SUSPEND;
  1709. if (temp & PORT_RESET)
  1710. status |= USB_PORT_STAT_RESET;
  1711. if (temp & PORT_POWER)
  1712. status |= USB_PORT_STAT_POWER;
  1713. put_unaligned(cpu_to_le32(status), (__le32 *) buf);
  1714. break;
  1715. case SetHubFeature:
  1716. switch (wValue) {
  1717. case C_HUB_LOCAL_POWER:
  1718. case C_HUB_OVER_CURRENT:
  1719. /* no hub-wide feature/status flags */
  1720. break;
  1721. default:
  1722. goto error;
  1723. }
  1724. break;
  1725. case SetPortFeature:
  1726. wIndex &= 0xff;
  1727. if (!wIndex || wIndex > ports)
  1728. goto error;
  1729. wIndex--;
  1730. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1731. if (temp & PORT_OWNER)
  1732. break;
  1733. /* temp &= ~PORT_RWC_BITS; */
  1734. switch (wValue) {
  1735. case USB_PORT_FEAT_ENABLE:
  1736. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
  1737. break;
  1738. case USB_PORT_FEAT_SUSPEND:
  1739. if ((temp & PORT_PE) == 0
  1740. || (temp & PORT_RESET) != 0)
  1741. goto error;
  1742. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
  1743. break;
  1744. case USB_PORT_FEAT_POWER:
  1745. if (HCS_PPC(priv->hcs_params))
  1746. reg_write32(hcd->regs, HC_PORTSC1,
  1747. temp | PORT_POWER);
  1748. break;
  1749. case USB_PORT_FEAT_RESET:
  1750. if (temp & PORT_RESUME)
  1751. goto error;
  1752. /* line status bits may report this as low speed,
  1753. * which can be fine if this root hub has a
  1754. * transaction translator built in.
  1755. */
  1756. if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
  1757. && PORT_USB11(temp)) {
  1758. temp |= PORT_OWNER;
  1759. } else {
  1760. temp |= PORT_RESET;
  1761. temp &= ~PORT_PE;
  1762. /*
  1763. * caller must wait, then call GetPortStatus
  1764. * usb 2.0 spec says 50 ms resets on root
  1765. */
  1766. priv->reset_done = jiffies +
  1767. msecs_to_jiffies(50);
  1768. }
  1769. reg_write32(hcd->regs, HC_PORTSC1, temp);
  1770. break;
  1771. default:
  1772. goto error;
  1773. }
  1774. reg_read32(hcd->regs, HC_USBCMD);
  1775. break;
  1776. default:
  1777. error:
  1778. /* "stall" on error */
  1779. retval = -EPIPE;
  1780. }
  1781. spin_unlock_irqrestore(&priv->lock, flags);
  1782. return retval;
  1783. }
  1784. static int isp1760_get_frame(struct usb_hcd *hcd)
  1785. {
  1786. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1787. u32 fr;
  1788. fr = reg_read32(hcd->regs, HC_FRINDEX);
  1789. return (fr >> 3) % priv->periodic_size;
  1790. }
  1791. static void isp1760_stop(struct usb_hcd *hcd)
  1792. {
  1793. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1794. u32 temp;
  1795. del_timer(&errata2_timer);
  1796. isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
  1797. NULL, 0);
  1798. msleep(20);
  1799. spin_lock_irq(&priv->lock);
  1800. ehci_reset(hcd);
  1801. /* Disable IRQ */
  1802. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1803. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
  1804. spin_unlock_irq(&priv->lock);
  1805. reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
  1806. }
  1807. static void isp1760_shutdown(struct usb_hcd *hcd)
  1808. {
  1809. u32 command, temp;
  1810. isp1760_stop(hcd);
  1811. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1812. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
  1813. command = reg_read32(hcd->regs, HC_USBCMD);
  1814. command &= ~CMD_RUN;
  1815. reg_write32(hcd->regs, HC_USBCMD, command);
  1816. }
  1817. static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
  1818. struct usb_host_endpoint *ep)
  1819. {
  1820. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1821. struct isp1760_qh *qh = ep->hcpriv;
  1822. unsigned long spinflags;
  1823. if (!qh)
  1824. return;
  1825. spin_lock_irqsave(&priv->lock, spinflags);
  1826. qh->tt_buffer_dirty = 0;
  1827. schedule_ptds(hcd);
  1828. spin_unlock_irqrestore(&priv->lock, spinflags);
  1829. }
  1830. static const struct hc_driver isp1760_hc_driver = {
  1831. .description = "isp1760-hcd",
  1832. .product_desc = "NXP ISP1760 USB Host Controller",
  1833. .hcd_priv_size = sizeof(struct isp1760_hcd *),
  1834. .irq = isp1760_irq,
  1835. .flags = HCD_MEMORY | HCD_USB2,
  1836. .reset = isp1760_hc_setup,
  1837. .start = isp1760_run,
  1838. .stop = isp1760_stop,
  1839. .shutdown = isp1760_shutdown,
  1840. .urb_enqueue = isp1760_urb_enqueue,
  1841. .urb_dequeue = isp1760_urb_dequeue,
  1842. .endpoint_disable = isp1760_endpoint_disable,
  1843. .get_frame_number = isp1760_get_frame,
  1844. .hub_status_data = isp1760_hub_status_data,
  1845. .hub_control = isp1760_hub_control,
  1846. .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
  1847. };
  1848. int __init isp1760_init_kmem_once(void)
  1849. {
  1850. urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
  1851. sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
  1852. SLAB_MEM_SPREAD, NULL);
  1853. if (!urb_listitem_cachep)
  1854. return -ENOMEM;
  1855. qtd_cachep = kmem_cache_create("isp1760_qtd",
  1856. sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
  1857. SLAB_MEM_SPREAD, NULL);
  1858. if (!qtd_cachep)
  1859. return -ENOMEM;
  1860. qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
  1861. 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
  1862. if (!qh_cachep) {
  1863. kmem_cache_destroy(qtd_cachep);
  1864. return -ENOMEM;
  1865. }
  1866. return 0;
  1867. }
  1868. void isp1760_deinit_kmem_cache(void)
  1869. {
  1870. kmem_cache_destroy(qtd_cachep);
  1871. kmem_cache_destroy(qh_cachep);
  1872. kmem_cache_destroy(urb_listitem_cachep);
  1873. }
  1874. int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
  1875. struct resource *mem, int irq, unsigned long irqflags,
  1876. struct device *dev)
  1877. {
  1878. struct usb_hcd *hcd;
  1879. int ret;
  1880. hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev_name(dev));
  1881. if (!hcd)
  1882. return -ENOMEM;
  1883. *(struct isp1760_hcd **)hcd->hcd_priv = priv;
  1884. priv->hcd = hcd;
  1885. init_memory(priv);
  1886. hcd->irq = irq;
  1887. hcd->regs = regs;
  1888. hcd->rsrc_start = mem->start;
  1889. hcd->rsrc_len = resource_size(mem);
  1890. /* This driver doesn't support wakeup requests */
  1891. hcd->cant_recv_wakeups = 1;
  1892. ret = usb_add_hcd(hcd, irq, irqflags);
  1893. if (ret)
  1894. goto error;
  1895. device_wakeup_enable(hcd->self.controller);
  1896. return 0;
  1897. error:
  1898. usb_put_hcd(hcd);
  1899. return ret;
  1900. }
  1901. void isp1760_hcd_unregister(struct isp1760_hcd *priv)
  1902. {
  1903. if (!priv->hcd)
  1904. return;
  1905. usb_remove_hcd(priv->hcd);
  1906. usb_put_hcd(priv->hcd);
  1907. }