rtsx_pcr.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Driver for Realtek PCI-Express card reader
  3. *
  4. * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  5. *
  6. * Author:
  7. * Wei WANG <wei_wang@realsil.com.cn>
  8. */
  9. #include <linux/pci.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/delay.h>
  16. #include <linux/idr.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/mfd/core.h>
  19. #include <linux/rtsx_pci.h>
  20. #include <linux/mmc/card.h>
  21. #include <linux/unaligned.h>
  22. #include <linux/pm.h>
  23. #include <linux/pm_runtime.h>
  24. #include "rtsx_pcr.h"
  25. #include "rts5261.h"
  26. #include "rts5228.h"
  27. #include "rts5264.h"
  28. static bool msi_en = true;
  29. module_param(msi_en, bool, S_IRUGO | S_IWUSR);
  30. MODULE_PARM_DESC(msi_en, "Enable MSI");
  31. static DEFINE_IDR(rtsx_pci_idr);
  32. static DEFINE_SPINLOCK(rtsx_pci_lock);
  33. static struct mfd_cell rtsx_pcr_cells[] = {
  34. [RTSX_SD_CARD] = {
  35. .name = DRV_NAME_RTSX_PCI_SDMMC,
  36. },
  37. };
  38. static const struct pci_device_id rtsx_pci_ids[] = {
  39. { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  40. { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  41. { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  42. { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  43. { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  44. { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  45. { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  46. { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  47. { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  48. { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  49. { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  50. { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  51. { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  52. { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
  53. { 0, }
  54. };
  55. MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
  56. static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  57. {
  58. rtsx_pci_write_register(pcr, MSGTXDATA0,
  59. MASK_8_BIT_DEF, (u8) (latency & 0xFF));
  60. rtsx_pci_write_register(pcr, MSGTXDATA1,
  61. MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
  62. rtsx_pci_write_register(pcr, MSGTXDATA2,
  63. MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
  64. rtsx_pci_write_register(pcr, MSGTXDATA3,
  65. MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
  66. rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
  67. LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
  68. return 0;
  69. }
  70. int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
  71. {
  72. return rtsx_comm_set_ltr_latency(pcr, latency);
  73. }
  74. static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
  75. {
  76. if (pcr->aspm_enabled == enable)
  77. return;
  78. if (pcr->aspm_mode == ASPM_MODE_CFG) {
  79. pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
  80. PCI_EXP_LNKCTL_ASPMC,
  81. enable ? pcr->aspm_en : 0);
  82. } else if (pcr->aspm_mode == ASPM_MODE_REG) {
  83. if (pcr->aspm_en & 0x02)
  84. rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  85. FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  86. else
  87. rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
  88. FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
  89. }
  90. if (!enable && (pcr->aspm_en & 0x02))
  91. mdelay(10);
  92. pcr->aspm_enabled = enable;
  93. }
  94. static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
  95. {
  96. if (pcr->ops->set_aspm)
  97. pcr->ops->set_aspm(pcr, false);
  98. else
  99. rtsx_comm_set_aspm(pcr, false);
  100. }
  101. int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
  102. {
  103. rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
  104. return 0;
  105. }
  106. static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
  107. {
  108. if (pcr->ops->set_l1off_cfg_sub_d0)
  109. pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
  110. }
  111. static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
  112. {
  113. struct rtsx_cr_option *option = &pcr->option;
  114. rtsx_disable_aspm(pcr);
  115. /* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
  116. msleep(1);
  117. if (option->ltr_enabled)
  118. rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
  119. if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
  120. rtsx_set_l1off_sub_cfg_d0(pcr, 1);
  121. }
  122. static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
  123. {
  124. rtsx_comm_pm_full_on(pcr);
  125. }
  126. void rtsx_pci_start_run(struct rtsx_pcr *pcr)
  127. {
  128. /* If pci device removed, don't queue idle work any more */
  129. if (pcr->remove_pci)
  130. return;
  131. if (pcr->state != PDEV_STAT_RUN) {
  132. pcr->state = PDEV_STAT_RUN;
  133. if (pcr->ops->enable_auto_blink)
  134. pcr->ops->enable_auto_blink(pcr);
  135. rtsx_pm_full_on(pcr);
  136. }
  137. }
  138. EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
  139. int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
  140. {
  141. int i;
  142. u32 val = HAIMR_WRITE_START;
  143. val |= (u32)(addr & 0x3FFF) << 16;
  144. val |= (u32)mask << 8;
  145. val |= (u32)data;
  146. rtsx_pci_writel(pcr, RTSX_HAIMR, val);
  147. for (i = 0; i < MAX_RW_REG_CNT; i++) {
  148. val = rtsx_pci_readl(pcr, RTSX_HAIMR);
  149. if ((val & HAIMR_TRANS_END) == 0) {
  150. if (data != (u8)val)
  151. return -EIO;
  152. return 0;
  153. }
  154. }
  155. return -ETIMEDOUT;
  156. }
  157. EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
  158. int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
  159. {
  160. u32 val = HAIMR_READ_START;
  161. int i;
  162. val |= (u32)(addr & 0x3FFF) << 16;
  163. rtsx_pci_writel(pcr, RTSX_HAIMR, val);
  164. for (i = 0; i < MAX_RW_REG_CNT; i++) {
  165. val = rtsx_pci_readl(pcr, RTSX_HAIMR);
  166. if ((val & HAIMR_TRANS_END) == 0)
  167. break;
  168. }
  169. if (i >= MAX_RW_REG_CNT)
  170. return -ETIMEDOUT;
  171. if (data)
  172. *data = (u8)(val & 0xFF);
  173. return 0;
  174. }
  175. EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
  176. int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
  177. {
  178. int err, i, finished = 0;
  179. u8 tmp;
  180. rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
  181. rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
  182. rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
  183. rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
  184. for (i = 0; i < 100000; i++) {
  185. err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
  186. if (err < 0)
  187. return err;
  188. if (!(tmp & 0x80)) {
  189. finished = 1;
  190. break;
  191. }
  192. }
  193. if (!finished)
  194. return -ETIMEDOUT;
  195. return 0;
  196. }
  197. int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
  198. {
  199. if (pcr->ops->write_phy)
  200. return pcr->ops->write_phy(pcr, addr, val);
  201. return __rtsx_pci_write_phy_register(pcr, addr, val);
  202. }
  203. EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
  204. int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
  205. {
  206. int err, i, finished = 0;
  207. u16 data;
  208. u8 tmp, val1, val2;
  209. rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
  210. rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
  211. for (i = 0; i < 100000; i++) {
  212. err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
  213. if (err < 0)
  214. return err;
  215. if (!(tmp & 0x80)) {
  216. finished = 1;
  217. break;
  218. }
  219. }
  220. if (!finished)
  221. return -ETIMEDOUT;
  222. rtsx_pci_read_register(pcr, PHYDATA0, &val1);
  223. rtsx_pci_read_register(pcr, PHYDATA1, &val2);
  224. data = val1 | (val2 << 8);
  225. if (val)
  226. *val = data;
  227. return 0;
  228. }
  229. int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
  230. {
  231. if (pcr->ops->read_phy)
  232. return pcr->ops->read_phy(pcr, addr, val);
  233. return __rtsx_pci_read_phy_register(pcr, addr, val);
  234. }
  235. EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
  236. void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
  237. {
  238. if (pcr->ops->stop_cmd)
  239. return pcr->ops->stop_cmd(pcr);
  240. rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
  241. rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
  242. rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
  243. rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
  244. }
  245. EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
  246. void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
  247. u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
  248. {
  249. unsigned long flags;
  250. u32 val = 0;
  251. u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
  252. val |= (u32)(cmd_type & 0x03) << 30;
  253. val |= (u32)(reg_addr & 0x3FFF) << 16;
  254. val |= (u32)mask << 8;
  255. val |= (u32)data;
  256. spin_lock_irqsave(&pcr->lock, flags);
  257. ptr += pcr->ci;
  258. if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
  259. put_unaligned_le32(val, ptr);
  260. ptr++;
  261. pcr->ci++;
  262. }
  263. spin_unlock_irqrestore(&pcr->lock, flags);
  264. }
  265. EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
  266. void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
  267. {
  268. u32 val = 1 << 31;
  269. rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
  270. val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
  271. /* Hardware Auto Response */
  272. val |= 0x40000000;
  273. rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
  274. }
  275. EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
  276. int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
  277. {
  278. struct completion trans_done;
  279. u32 val = 1 << 31;
  280. long timeleft;
  281. unsigned long flags;
  282. int err = 0;
  283. spin_lock_irqsave(&pcr->lock, flags);
  284. /* set up data structures for the wakeup system */
  285. pcr->done = &trans_done;
  286. pcr->trans_result = TRANS_NOT_READY;
  287. init_completion(&trans_done);
  288. rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
  289. val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
  290. /* Hardware Auto Response */
  291. val |= 0x40000000;
  292. rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
  293. spin_unlock_irqrestore(&pcr->lock, flags);
  294. /* Wait for TRANS_OK_INT */
  295. timeleft = wait_for_completion_interruptible_timeout(
  296. &trans_done, msecs_to_jiffies(timeout));
  297. if (timeleft <= 0) {
  298. pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
  299. err = -ETIMEDOUT;
  300. goto finish_send_cmd;
  301. }
  302. spin_lock_irqsave(&pcr->lock, flags);
  303. if (pcr->trans_result == TRANS_RESULT_FAIL)
  304. err = -EINVAL;
  305. else if (pcr->trans_result == TRANS_RESULT_OK)
  306. err = 0;
  307. else if (pcr->trans_result == TRANS_NO_DEVICE)
  308. err = -ENODEV;
  309. spin_unlock_irqrestore(&pcr->lock, flags);
  310. finish_send_cmd:
  311. spin_lock_irqsave(&pcr->lock, flags);
  312. pcr->done = NULL;
  313. spin_unlock_irqrestore(&pcr->lock, flags);
  314. if ((err < 0) && (err != -ENODEV))
  315. rtsx_pci_stop_cmd(pcr);
  316. if (pcr->finish_me)
  317. complete(pcr->finish_me);
  318. return err;
  319. }
  320. EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
  321. static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
  322. dma_addr_t addr, unsigned int len, int end)
  323. {
  324. u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
  325. u64 val;
  326. u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
  327. pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
  328. if (end)
  329. option |= RTSX_SG_END;
  330. if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
  331. if (len > 0xFFFF)
  332. val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
  333. | (((u64)len >> 16) << 6) | option;
  334. else
  335. val = ((u64)addr << 32) | ((u64)len << 16) | option;
  336. } else {
  337. val = ((u64)addr << 32) | ((u64)len << 12) | option;
  338. }
  339. put_unaligned_le64(val, ptr);
  340. pcr->sgi++;
  341. }
  342. int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
  343. int num_sg, bool read, int timeout)
  344. {
  345. int err = 0, count;
  346. pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
  347. count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
  348. if (count < 1)
  349. return -EINVAL;
  350. pcr_dbg(pcr, "DMA mapping count: %d\n", count);
  351. err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
  352. rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
  353. return err;
  354. }
  355. EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
  356. int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
  357. int num_sg, bool read)
  358. {
  359. enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  360. if (pcr->remove_pci)
  361. return -EINVAL;
  362. if ((sglist == NULL) || (num_sg <= 0))
  363. return -EINVAL;
  364. return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
  365. }
  366. EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
  367. void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
  368. int num_sg, bool read)
  369. {
  370. enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  371. dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
  372. }
  373. EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
  374. int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
  375. int count, bool read, int timeout)
  376. {
  377. struct completion trans_done;
  378. struct scatterlist *sg;
  379. dma_addr_t addr;
  380. long timeleft;
  381. unsigned long flags;
  382. unsigned int len;
  383. int i, err = 0;
  384. u32 val;
  385. u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
  386. if (pcr->remove_pci)
  387. return -ENODEV;
  388. if ((sglist == NULL) || (count < 1))
  389. return -EINVAL;
  390. val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
  391. pcr->sgi = 0;
  392. for_each_sg(sglist, sg, count, i) {
  393. addr = sg_dma_address(sg);
  394. len = sg_dma_len(sg);
  395. rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
  396. }
  397. spin_lock_irqsave(&pcr->lock, flags);
  398. pcr->done = &trans_done;
  399. pcr->trans_result = TRANS_NOT_READY;
  400. init_completion(&trans_done);
  401. rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
  402. rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
  403. spin_unlock_irqrestore(&pcr->lock, flags);
  404. timeleft = wait_for_completion_interruptible_timeout(
  405. &trans_done, msecs_to_jiffies(timeout));
  406. if (timeleft <= 0) {
  407. pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
  408. err = -ETIMEDOUT;
  409. goto out;
  410. }
  411. spin_lock_irqsave(&pcr->lock, flags);
  412. if (pcr->trans_result == TRANS_RESULT_FAIL) {
  413. err = -EILSEQ;
  414. if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
  415. pcr->dma_error_count++;
  416. }
  417. else if (pcr->trans_result == TRANS_NO_DEVICE)
  418. err = -ENODEV;
  419. spin_unlock_irqrestore(&pcr->lock, flags);
  420. out:
  421. spin_lock_irqsave(&pcr->lock, flags);
  422. pcr->done = NULL;
  423. spin_unlock_irqrestore(&pcr->lock, flags);
  424. if ((err < 0) && (err != -ENODEV))
  425. rtsx_pci_stop_cmd(pcr);
  426. if (pcr->finish_me)
  427. complete(pcr->finish_me);
  428. return err;
  429. }
  430. EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
  431. int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
  432. {
  433. int err;
  434. int i, j;
  435. u16 reg;
  436. u8 *ptr;
  437. if (buf_len > 512)
  438. buf_len = 512;
  439. ptr = buf;
  440. reg = PPBUF_BASE2;
  441. for (i = 0; i < buf_len / 256; i++) {
  442. rtsx_pci_init_cmd(pcr);
  443. for (j = 0; j < 256; j++)
  444. rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
  445. err = rtsx_pci_send_cmd(pcr, 250);
  446. if (err < 0)
  447. return err;
  448. memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
  449. ptr += 256;
  450. }
  451. if (buf_len % 256) {
  452. rtsx_pci_init_cmd(pcr);
  453. for (j = 0; j < buf_len % 256; j++)
  454. rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
  455. err = rtsx_pci_send_cmd(pcr, 250);
  456. if (err < 0)
  457. return err;
  458. }
  459. memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
  460. return 0;
  461. }
  462. EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
  463. int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
  464. {
  465. int err;
  466. int i, j;
  467. u16 reg;
  468. u8 *ptr;
  469. if (buf_len > 512)
  470. buf_len = 512;
  471. ptr = buf;
  472. reg = PPBUF_BASE2;
  473. for (i = 0; i < buf_len / 256; i++) {
  474. rtsx_pci_init_cmd(pcr);
  475. for (j = 0; j < 256; j++) {
  476. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
  477. reg++, 0xFF, *ptr);
  478. ptr++;
  479. }
  480. err = rtsx_pci_send_cmd(pcr, 250);
  481. if (err < 0)
  482. return err;
  483. }
  484. if (buf_len % 256) {
  485. rtsx_pci_init_cmd(pcr);
  486. for (j = 0; j < buf_len % 256; j++) {
  487. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
  488. reg++, 0xFF, *ptr);
  489. ptr++;
  490. }
  491. err = rtsx_pci_send_cmd(pcr, 250);
  492. if (err < 0)
  493. return err;
  494. }
  495. return 0;
  496. }
  497. EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
  498. static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
  499. {
  500. rtsx_pci_init_cmd(pcr);
  501. while (*tbl & 0xFFFF0000) {
  502. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
  503. (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
  504. tbl++;
  505. }
  506. return rtsx_pci_send_cmd(pcr, 100);
  507. }
  508. int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
  509. {
  510. const u32 *tbl;
  511. if (card == RTSX_SD_CARD)
  512. tbl = pcr->sd_pull_ctl_enable_tbl;
  513. else if (card == RTSX_MS_CARD)
  514. tbl = pcr->ms_pull_ctl_enable_tbl;
  515. else
  516. return -EINVAL;
  517. return rtsx_pci_set_pull_ctl(pcr, tbl);
  518. }
  519. EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
  520. int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
  521. {
  522. const u32 *tbl;
  523. if (card == RTSX_SD_CARD)
  524. tbl = pcr->sd_pull_ctl_disable_tbl;
  525. else if (card == RTSX_MS_CARD)
  526. tbl = pcr->ms_pull_ctl_disable_tbl;
  527. else
  528. return -EINVAL;
  529. return rtsx_pci_set_pull_ctl(pcr, tbl);
  530. }
  531. EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
  532. static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
  533. {
  534. struct rtsx_hw_param *hw_param = &pcr->hw_param;
  535. pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
  536. | hw_param->interrupt_en;
  537. if (pcr->num_slots > 1)
  538. pcr->bier |= MS_INT_EN;
  539. /* Enable Bus Interrupt */
  540. rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
  541. pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
  542. }
  543. static inline u8 double_ssc_depth(u8 depth)
  544. {
  545. return ((depth > 1) ? (depth - 1) : depth);
  546. }
  547. static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
  548. {
  549. if (div > CLK_DIV_1) {
  550. if (ssc_depth > (div - 1))
  551. ssc_depth -= (div - 1);
  552. else
  553. ssc_depth = SSC_DEPTH_4M;
  554. }
  555. return ssc_depth;
  556. }
  557. int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
  558. u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
  559. {
  560. int err, clk;
  561. u8 n, clk_divider, mcu_cnt, div;
  562. static const u8 depth[] = {
  563. [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
  564. [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
  565. [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
  566. [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
  567. [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
  568. };
  569. if (PCI_PID(pcr) == PID_5261)
  570. return rts5261_pci_switch_clock(pcr, card_clock,
  571. ssc_depth, initial_mode, double_clk, vpclk);
  572. if (PCI_PID(pcr) == PID_5228)
  573. return rts5228_pci_switch_clock(pcr, card_clock,
  574. ssc_depth, initial_mode, double_clk, vpclk);
  575. if (PCI_PID(pcr) == PID_5264)
  576. return rts5264_pci_switch_clock(pcr, card_clock,
  577. ssc_depth, initial_mode, double_clk, vpclk);
  578. if (initial_mode) {
  579. /* We use 250k(around) here, in initial stage */
  580. clk_divider = SD_CLK_DIVIDE_128;
  581. card_clock = 30000000;
  582. } else {
  583. clk_divider = SD_CLK_DIVIDE_0;
  584. }
  585. err = rtsx_pci_write_register(pcr, SD_CFG1,
  586. SD_CLK_DIVIDE_MASK, clk_divider);
  587. if (err < 0)
  588. return err;
  589. /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
  590. if (card_clock == UHS_SDR104_MAX_DTR &&
  591. pcr->dma_error_count &&
  592. PCI_PID(pcr) == RTS5227_DEVICE_ID)
  593. card_clock = UHS_SDR104_MAX_DTR -
  594. (pcr->dma_error_count * 20000000);
  595. card_clock /= 1000000;
  596. pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
  597. clk = card_clock;
  598. if (!initial_mode && double_clk)
  599. clk = card_clock * 2;
  600. pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
  601. clk, pcr->cur_clock);
  602. if (clk == pcr->cur_clock)
  603. return 0;
  604. if (pcr->ops->conv_clk_and_div_n)
  605. n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
  606. else
  607. n = (u8)(clk - 2);
  608. if ((clk <= 2) || (n > MAX_DIV_N_PCR))
  609. return -EINVAL;
  610. mcu_cnt = (u8)(125/clk + 3);
  611. if (mcu_cnt > 15)
  612. mcu_cnt = 15;
  613. /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
  614. div = CLK_DIV_1;
  615. while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
  616. if (pcr->ops->conv_clk_and_div_n) {
  617. int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
  618. DIV_N_TO_CLK) * 2;
  619. n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
  620. CLK_TO_DIV_N);
  621. } else {
  622. n = (n + 2) * 2 - 2;
  623. }
  624. div++;
  625. }
  626. pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
  627. ssc_depth = depth[ssc_depth];
  628. if (double_clk)
  629. ssc_depth = double_ssc_depth(ssc_depth);
  630. ssc_depth = revise_ssc_depth(ssc_depth, div);
  631. pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
  632. rtsx_pci_init_cmd(pcr);
  633. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
  634. CLK_LOW_FREQ, CLK_LOW_FREQ);
  635. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
  636. 0xFF, (div << 4) | mcu_cnt);
  637. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
  638. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
  639. SSC_DEPTH_MASK, ssc_depth);
  640. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
  641. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
  642. if (vpclk) {
  643. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
  644. PHASE_NOT_RESET, 0);
  645. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
  646. PHASE_NOT_RESET, PHASE_NOT_RESET);
  647. }
  648. err = rtsx_pci_send_cmd(pcr, 2000);
  649. if (err < 0)
  650. return err;
  651. /* Wait SSC clock stable */
  652. udelay(SSC_CLOCK_STABLE_WAIT);
  653. err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
  654. if (err < 0)
  655. return err;
  656. pcr->cur_clock = clk;
  657. return 0;
  658. }
  659. EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
  660. int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
  661. {
  662. if (pcr->ops->card_power_on)
  663. return pcr->ops->card_power_on(pcr, card);
  664. return 0;
  665. }
  666. EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
  667. int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
  668. {
  669. if (pcr->ops->card_power_off)
  670. return pcr->ops->card_power_off(pcr, card);
  671. return 0;
  672. }
  673. EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
  674. int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
  675. {
  676. static const unsigned int cd_mask[] = {
  677. [RTSX_SD_CARD] = SD_EXIST,
  678. [RTSX_MS_CARD] = MS_EXIST
  679. };
  680. if (!(pcr->flags & PCR_MS_PMOS)) {
  681. /* When using single PMOS, accessing card is not permitted
  682. * if the existing card is not the designated one.
  683. */
  684. if (pcr->card_exist & (~cd_mask[card]))
  685. return -EIO;
  686. }
  687. return 0;
  688. }
  689. EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
  690. int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
  691. {
  692. if (pcr->ops->switch_output_voltage)
  693. return pcr->ops->switch_output_voltage(pcr, voltage);
  694. return 0;
  695. }
  696. EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
  697. unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
  698. {
  699. unsigned int val;
  700. val = rtsx_pci_readl(pcr, RTSX_BIPR);
  701. if (pcr->ops->cd_deglitch)
  702. val = pcr->ops->cd_deglitch(pcr);
  703. return val;
  704. }
  705. EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
  706. void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
  707. {
  708. struct completion finish;
  709. pcr->finish_me = &finish;
  710. init_completion(&finish);
  711. if (pcr->done)
  712. complete(pcr->done);
  713. if (!pcr->remove_pci)
  714. rtsx_pci_stop_cmd(pcr);
  715. wait_for_completion_interruptible_timeout(&finish,
  716. msecs_to_jiffies(2));
  717. pcr->finish_me = NULL;
  718. }
  719. EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
  720. static void rtsx_pci_card_detect(struct work_struct *work)
  721. {
  722. struct delayed_work *dwork;
  723. struct rtsx_pcr *pcr;
  724. unsigned long flags;
  725. unsigned int card_detect = 0, card_inserted, card_removed;
  726. u32 irq_status;
  727. dwork = to_delayed_work(work);
  728. pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
  729. pcr_dbg(pcr, "--> %s\n", __func__);
  730. mutex_lock(&pcr->pcr_mutex);
  731. spin_lock_irqsave(&pcr->lock, flags);
  732. irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
  733. pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
  734. irq_status &= CARD_EXIST;
  735. card_inserted = pcr->card_inserted & irq_status;
  736. card_removed = pcr->card_removed;
  737. pcr->card_inserted = 0;
  738. pcr->card_removed = 0;
  739. spin_unlock_irqrestore(&pcr->lock, flags);
  740. if (card_inserted || card_removed) {
  741. pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
  742. card_inserted, card_removed);
  743. if (pcr->ops->cd_deglitch)
  744. card_inserted = pcr->ops->cd_deglitch(pcr);
  745. card_detect = card_inserted | card_removed;
  746. pcr->card_exist |= card_inserted;
  747. pcr->card_exist &= ~card_removed;
  748. }
  749. mutex_unlock(&pcr->pcr_mutex);
  750. if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
  751. pcr->slots[RTSX_SD_CARD].card_event(
  752. pcr->slots[RTSX_SD_CARD].p_dev);
  753. if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
  754. pcr->slots[RTSX_MS_CARD].card_event(
  755. pcr->slots[RTSX_MS_CARD].p_dev);
  756. }
  757. static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
  758. {
  759. if (pcr->ops->process_ocp) {
  760. pcr->ops->process_ocp(pcr);
  761. } else {
  762. if (!pcr->option.ocp_en)
  763. return;
  764. rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
  765. if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
  766. rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
  767. rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
  768. rtsx_pci_clear_ocpstat(pcr);
  769. pcr->ocp_stat = 0;
  770. }
  771. }
  772. }
  773. static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
  774. {
  775. if (pcr->option.ocp_en)
  776. rtsx_pci_process_ocp(pcr);
  777. return 0;
  778. }
  779. static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
  780. {
  781. struct rtsx_pcr *pcr = dev_id;
  782. u32 int_reg;
  783. if (!pcr)
  784. return IRQ_NONE;
  785. spin_lock(&pcr->lock);
  786. int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
  787. /* Clear interrupt flag */
  788. rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
  789. if ((int_reg & pcr->bier) == 0) {
  790. spin_unlock(&pcr->lock);
  791. return IRQ_NONE;
  792. }
  793. if (int_reg == 0xFFFFFFFF) {
  794. spin_unlock(&pcr->lock);
  795. return IRQ_HANDLED;
  796. }
  797. int_reg &= (pcr->bier | 0x7FFFFF);
  798. if ((int_reg & SD_OC_INT) ||
  799. ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
  800. rtsx_pci_process_ocp_interrupt(pcr);
  801. if (int_reg & SD_INT) {
  802. if (int_reg & SD_EXIST) {
  803. pcr->card_inserted |= SD_EXIST;
  804. } else {
  805. pcr->card_removed |= SD_EXIST;
  806. pcr->card_inserted &= ~SD_EXIST;
  807. }
  808. if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
  809. rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
  810. RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
  811. pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
  812. }
  813. pcr->dma_error_count = 0;
  814. }
  815. if (int_reg & MS_INT) {
  816. if (int_reg & MS_EXIST) {
  817. pcr->card_inserted |= MS_EXIST;
  818. } else {
  819. pcr->card_removed |= MS_EXIST;
  820. pcr->card_inserted &= ~MS_EXIST;
  821. }
  822. }
  823. if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
  824. if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
  825. pcr->trans_result = TRANS_RESULT_FAIL;
  826. if (pcr->done)
  827. complete(pcr->done);
  828. } else if (int_reg & TRANS_OK_INT) {
  829. pcr->trans_result = TRANS_RESULT_OK;
  830. if (pcr->done)
  831. complete(pcr->done);
  832. }
  833. }
  834. if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
  835. schedule_delayed_work(&pcr->carddet_work,
  836. msecs_to_jiffies(200));
  837. spin_unlock(&pcr->lock);
  838. return IRQ_HANDLED;
  839. }
  840. static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
  841. {
  842. pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
  843. __func__, pcr->msi_en, pcr->pci->irq);
  844. if (request_irq(pcr->pci->irq, rtsx_pci_isr,
  845. pcr->msi_en ? 0 : IRQF_SHARED,
  846. DRV_NAME_RTSX_PCI, pcr)) {
  847. dev_err(&(pcr->pci->dev),
  848. "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
  849. pcr->pci->irq);
  850. return -1;
  851. }
  852. pcr->irq = pcr->pci->irq;
  853. pci_intx(pcr->pci, !pcr->msi_en);
  854. return 0;
  855. }
  856. static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
  857. {
  858. /* Set relink_time to 0 */
  859. rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
  860. rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
  861. rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
  862. RELINK_TIME_MASK, 0);
  863. rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
  864. D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
  865. rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
  866. }
  867. static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
  868. {
  869. if (pcr->ops->turn_off_led)
  870. pcr->ops->turn_off_led(pcr);
  871. rtsx_pci_writel(pcr, RTSX_BIER, 0);
  872. pcr->bier = 0;
  873. rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
  874. rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
  875. if (pcr->ops->force_power_down)
  876. pcr->ops->force_power_down(pcr, pm_state, runtime);
  877. else
  878. rtsx_base_force_power_down(pcr);
  879. }
  880. void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
  881. {
  882. u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
  883. if (pcr->ops->enable_ocp) {
  884. pcr->ops->enable_ocp(pcr);
  885. } else {
  886. rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
  887. rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
  888. }
  889. }
  890. void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
  891. {
  892. u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
  893. if (pcr->ops->disable_ocp) {
  894. pcr->ops->disable_ocp(pcr);
  895. } else {
  896. rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
  897. rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
  898. OC_POWER_DOWN);
  899. }
  900. }
  901. void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
  902. {
  903. if (pcr->ops->init_ocp) {
  904. pcr->ops->init_ocp(pcr);
  905. } else {
  906. struct rtsx_cr_option *option = &(pcr->option);
  907. if (option->ocp_en) {
  908. u8 val = option->sd_800mA_ocp_thd;
  909. rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
  910. rtsx_pci_write_register(pcr, REG_OCPPARA1,
  911. SD_OCP_TIME_MASK, SD_OCP_TIME_800);
  912. rtsx_pci_write_register(pcr, REG_OCPPARA2,
  913. SD_OCP_THD_MASK, val);
  914. rtsx_pci_write_register(pcr, REG_OCPGLITCH,
  915. SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
  916. rtsx_pci_enable_ocp(pcr);
  917. }
  918. }
  919. }
  920. int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
  921. {
  922. if (pcr->ops->get_ocpstat)
  923. return pcr->ops->get_ocpstat(pcr, val);
  924. else
  925. return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
  926. }
  927. void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
  928. {
  929. if (pcr->ops->clear_ocpstat) {
  930. pcr->ops->clear_ocpstat(pcr);
  931. } else {
  932. u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
  933. u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
  934. rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
  935. udelay(100);
  936. rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
  937. }
  938. }
  939. void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
  940. {
  941. u16 val;
  942. if ((PCI_PID(pcr) != PID_525A) &&
  943. (PCI_PID(pcr) != PID_5260) &&
  944. (PCI_PID(pcr) != PID_5264)) {
  945. rtsx_pci_read_phy_register(pcr, 0x01, &val);
  946. val |= 1<<9;
  947. rtsx_pci_write_phy_register(pcr, 0x01, val);
  948. }
  949. rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
  950. rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
  951. rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
  952. rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
  953. }
  954. void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
  955. {
  956. u16 val;
  957. if ((PCI_PID(pcr) != PID_525A) &&
  958. (PCI_PID(pcr) != PID_5260) &&
  959. (PCI_PID(pcr) != PID_5264)) {
  960. rtsx_pci_read_phy_register(pcr, 0x01, &val);
  961. val &= ~(1<<9);
  962. rtsx_pci_write_phy_register(pcr, 0x01, val);
  963. }
  964. rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
  965. rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
  966. }
  967. int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
  968. {
  969. rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
  970. MS_CLK_EN | SD40_CLK_EN, 0);
  971. rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
  972. rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
  973. msleep(50);
  974. rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
  975. return 0;
  976. }
  977. int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
  978. {
  979. rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
  980. MS_CLK_EN | SD40_CLK_EN, 0);
  981. rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
  982. rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
  983. rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
  984. return 0;
  985. }
  986. static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
  987. {
  988. struct pci_dev *pdev = pcr->pci;
  989. int err;
  990. if (PCI_PID(pcr) == PID_5228)
  991. rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
  992. RTS5228_LDO1_SR_0_5);
  993. rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
  994. rtsx_pci_enable_bus_int(pcr);
  995. /* Power on SSC */
  996. if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
  997. /* Gating real mcu clock */
  998. err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
  999. RTS5261_MCU_CLOCK_GATING, 0);
  1000. err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
  1001. SSC_POWER_DOWN, 0);
  1002. } else {
  1003. err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
  1004. }
  1005. if (err < 0)
  1006. return err;
  1007. /* Wait SSC power stable */
  1008. udelay(200);
  1009. rtsx_disable_aspm(pcr);
  1010. if (pcr->ops->optimize_phy) {
  1011. err = pcr->ops->optimize_phy(pcr);
  1012. if (err < 0)
  1013. return err;
  1014. }
  1015. rtsx_pci_init_cmd(pcr);
  1016. /* Set mcu_cnt to 7 to ensure data can be sampled properly */
  1017. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
  1018. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
  1019. /* Disable card clock */
  1020. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
  1021. /* Reset delink mode */
  1022. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
  1023. /* Card driving select */
  1024. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
  1025. 0xFF, pcr->card_drive_sel);
  1026. /* Enable SSC Clock */
  1027. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
  1028. 0xFF, SSC_8X_EN | SSC_SEL_4M);
  1029. if (PCI_PID(pcr) == PID_5261)
  1030. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
  1031. RTS5261_SSC_DEPTH_2M);
  1032. else if (PCI_PID(pcr) == PID_5228)
  1033. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
  1034. RTS5228_SSC_DEPTH_2M);
  1035. else if (is_version(pcr, 0x5264, IC_VER_A))
  1036. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
  1037. else if (PCI_PID(pcr) == PID_5264)
  1038. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
  1039. RTS5264_SSC_DEPTH_2M);
  1040. else
  1041. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
  1042. /* Disable cd_pwr_save */
  1043. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
  1044. /* Clear Link Ready Interrupt */
  1045. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
  1046. LINK_RDY_INT, LINK_RDY_INT);
  1047. /* Enlarge the estimation window of PERST# glitch
  1048. * to reduce the chance of invalid card interrupt
  1049. */
  1050. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
  1051. /* Update RC oscillator to 400k
  1052. * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
  1053. * 1: 2M 0: 400k
  1054. */
  1055. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
  1056. /* Set interrupt write clear
  1057. * bit 1: U_elbi_if_rd_clr_en
  1058. * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
  1059. * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
  1060. */
  1061. rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
  1062. err = rtsx_pci_send_cmd(pcr, 100);
  1063. if (err < 0)
  1064. return err;
  1065. switch (PCI_PID(pcr)) {
  1066. case PID_5250:
  1067. case PID_524A:
  1068. case PID_525A:
  1069. case PID_5260:
  1070. case PID_5261:
  1071. case PID_5228:
  1072. case PID_5264:
  1073. rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
  1074. break;
  1075. default:
  1076. break;
  1077. }
  1078. /*init ocp*/
  1079. rtsx_pci_init_ocp(pcr);
  1080. /* Enable clk_request_n to enable clock power management */
  1081. pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
  1082. 0, PCI_EXP_LNKCTL_CLKREQ_EN);
  1083. /* Enter L1 when host tx idle */
  1084. pci_write_config_byte(pdev, 0x70F, 0x5B);
  1085. if (pcr->ops->extra_init_hw) {
  1086. err = pcr->ops->extra_init_hw(pcr);
  1087. if (err < 0)
  1088. return err;
  1089. }
  1090. if (pcr->aspm_mode == ASPM_MODE_REG)
  1091. rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
  1092. /* No CD interrupt if probing driver with card inserted.
  1093. * So we need to initialize pcr->card_exist here.
  1094. */
  1095. if (pcr->ops->cd_deglitch)
  1096. pcr->card_exist = pcr->ops->cd_deglitch(pcr);
  1097. else
  1098. pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
  1099. return 0;
  1100. }
  1101. static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
  1102. {
  1103. struct rtsx_cr_option *option = &(pcr->option);
  1104. int err, l1ss;
  1105. u32 lval;
  1106. u16 cfg_val;
  1107. u8 val;
  1108. spin_lock_init(&pcr->lock);
  1109. mutex_init(&pcr->pcr_mutex);
  1110. switch (PCI_PID(pcr)) {
  1111. default:
  1112. case 0x5209:
  1113. rts5209_init_params(pcr);
  1114. break;
  1115. case 0x5229:
  1116. rts5229_init_params(pcr);
  1117. break;
  1118. case 0x5289:
  1119. rtl8411_init_params(pcr);
  1120. break;
  1121. case 0x5227:
  1122. rts5227_init_params(pcr);
  1123. break;
  1124. case 0x522A:
  1125. rts522a_init_params(pcr);
  1126. break;
  1127. case 0x5249:
  1128. rts5249_init_params(pcr);
  1129. break;
  1130. case 0x524A:
  1131. rts524a_init_params(pcr);
  1132. break;
  1133. case 0x525A:
  1134. rts525a_init_params(pcr);
  1135. break;
  1136. case 0x5287:
  1137. rtl8411b_init_params(pcr);
  1138. break;
  1139. case 0x5286:
  1140. rtl8402_init_params(pcr);
  1141. break;
  1142. case 0x5260:
  1143. rts5260_init_params(pcr);
  1144. break;
  1145. case 0x5261:
  1146. rts5261_init_params(pcr);
  1147. break;
  1148. case 0x5228:
  1149. rts5228_init_params(pcr);
  1150. break;
  1151. case 0x5264:
  1152. rts5264_init_params(pcr);
  1153. break;
  1154. }
  1155. pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
  1156. PCI_PID(pcr), pcr->ic_version);
  1157. pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
  1158. GFP_KERNEL);
  1159. if (!pcr->slots)
  1160. return -ENOMEM;
  1161. if (pcr->aspm_mode == ASPM_MODE_CFG) {
  1162. pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
  1163. if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
  1164. pcr->aspm_enabled = true;
  1165. else
  1166. pcr->aspm_enabled = false;
  1167. } else if (pcr->aspm_mode == ASPM_MODE_REG) {
  1168. rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
  1169. if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
  1170. pcr->aspm_enabled = false;
  1171. else
  1172. pcr->aspm_enabled = true;
  1173. }
  1174. l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
  1175. if (l1ss) {
  1176. pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
  1177. if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
  1178. rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
  1179. else
  1180. rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
  1181. if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
  1182. rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
  1183. else
  1184. rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
  1185. if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
  1186. rtsx_set_dev_flag(pcr, PM_L1_1_EN);
  1187. else
  1188. rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
  1189. if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
  1190. rtsx_set_dev_flag(pcr, PM_L1_2_EN);
  1191. else
  1192. rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
  1193. pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
  1194. if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
  1195. option->ltr_enabled = true;
  1196. option->ltr_active = true;
  1197. } else {
  1198. option->ltr_enabled = false;
  1199. }
  1200. if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
  1201. | PM_L1_1_EN | PM_L1_2_EN))
  1202. option->force_clkreq_0 = false;
  1203. else
  1204. option->force_clkreq_0 = true;
  1205. } else {
  1206. option->ltr_enabled = false;
  1207. option->force_clkreq_0 = true;
  1208. }
  1209. if (pcr->ops->fetch_vendor_settings)
  1210. pcr->ops->fetch_vendor_settings(pcr);
  1211. pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
  1212. pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
  1213. pcr->sd30_drive_sel_1v8);
  1214. pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
  1215. pcr->sd30_drive_sel_3v3);
  1216. pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
  1217. pcr->card_drive_sel);
  1218. pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
  1219. pcr->state = PDEV_STAT_IDLE;
  1220. err = rtsx_pci_init_hw(pcr);
  1221. if (err < 0) {
  1222. kfree(pcr->slots);
  1223. return err;
  1224. }
  1225. return 0;
  1226. }
  1227. static int rtsx_pci_probe(struct pci_dev *pcidev,
  1228. const struct pci_device_id *id)
  1229. {
  1230. struct rtsx_pcr *pcr;
  1231. struct pcr_handle *handle;
  1232. u32 base, len;
  1233. int ret, i, bar = 0;
  1234. dev_dbg(&(pcidev->dev),
  1235. ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
  1236. pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
  1237. (int)pcidev->revision);
  1238. ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
  1239. if (ret < 0)
  1240. return ret;
  1241. ret = pci_enable_device(pcidev);
  1242. if (ret)
  1243. return ret;
  1244. ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
  1245. if (ret)
  1246. goto disable;
  1247. pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
  1248. if (!pcr) {
  1249. ret = -ENOMEM;
  1250. goto release_pci;
  1251. }
  1252. handle = kzalloc(sizeof(*handle), GFP_KERNEL);
  1253. if (!handle) {
  1254. ret = -ENOMEM;
  1255. goto free_pcr;
  1256. }
  1257. handle->pcr = pcr;
  1258. idr_preload(GFP_KERNEL);
  1259. spin_lock(&rtsx_pci_lock);
  1260. ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
  1261. if (ret >= 0)
  1262. pcr->id = ret;
  1263. spin_unlock(&rtsx_pci_lock);
  1264. idr_preload_end();
  1265. if (ret < 0)
  1266. goto free_handle;
  1267. pcr->pci = pcidev;
  1268. dev_set_drvdata(&pcidev->dev, handle);
  1269. if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
  1270. bar = 1;
  1271. len = pci_resource_len(pcidev, bar);
  1272. base = pci_resource_start(pcidev, bar);
  1273. pcr->remap_addr = ioremap(base, len);
  1274. if (!pcr->remap_addr) {
  1275. ret = -ENOMEM;
  1276. goto free_idr;
  1277. }
  1278. pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
  1279. RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
  1280. GFP_KERNEL);
  1281. if (pcr->rtsx_resv_buf == NULL) {
  1282. ret = -ENXIO;
  1283. goto unmap;
  1284. }
  1285. pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
  1286. pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
  1287. pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
  1288. pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
  1289. pcr->card_inserted = 0;
  1290. pcr->card_removed = 0;
  1291. INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
  1292. pcr->msi_en = msi_en;
  1293. if (pcr->msi_en) {
  1294. ret = pci_enable_msi(pcidev);
  1295. if (ret)
  1296. pcr->msi_en = false;
  1297. }
  1298. ret = rtsx_pci_acquire_irq(pcr);
  1299. if (ret < 0)
  1300. goto disable_msi;
  1301. pci_set_master(pcidev);
  1302. synchronize_irq(pcr->irq);
  1303. ret = rtsx_pci_init_chip(pcr);
  1304. if (ret < 0)
  1305. goto disable_irq;
  1306. for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
  1307. rtsx_pcr_cells[i].platform_data = handle;
  1308. rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
  1309. }
  1310. ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
  1311. ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
  1312. if (ret < 0)
  1313. goto free_slots;
  1314. pm_runtime_allow(&pcidev->dev);
  1315. pm_runtime_put(&pcidev->dev);
  1316. return 0;
  1317. free_slots:
  1318. kfree(pcr->slots);
  1319. disable_irq:
  1320. free_irq(pcr->irq, (void *)pcr);
  1321. disable_msi:
  1322. if (pcr->msi_en)
  1323. pci_disable_msi(pcr->pci);
  1324. dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
  1325. pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
  1326. unmap:
  1327. iounmap(pcr->remap_addr);
  1328. free_idr:
  1329. spin_lock(&rtsx_pci_lock);
  1330. idr_remove(&rtsx_pci_idr, pcr->id);
  1331. spin_unlock(&rtsx_pci_lock);
  1332. free_handle:
  1333. kfree(handle);
  1334. free_pcr:
  1335. kfree(pcr);
  1336. release_pci:
  1337. pci_release_regions(pcidev);
  1338. disable:
  1339. pci_disable_device(pcidev);
  1340. return ret;
  1341. }
  1342. static void rtsx_pci_remove(struct pci_dev *pcidev)
  1343. {
  1344. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1345. struct rtsx_pcr *pcr = handle->pcr;
  1346. pcr->remove_pci = true;
  1347. pm_runtime_get_sync(&pcidev->dev);
  1348. pm_runtime_forbid(&pcidev->dev);
  1349. /* Disable interrupts at the pcr level */
  1350. spin_lock_irq(&pcr->lock);
  1351. rtsx_pci_writel(pcr, RTSX_BIER, 0);
  1352. pcr->bier = 0;
  1353. spin_unlock_irq(&pcr->lock);
  1354. cancel_delayed_work_sync(&pcr->carddet_work);
  1355. mfd_remove_devices(&pcidev->dev);
  1356. dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
  1357. pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
  1358. free_irq(pcr->irq, (void *)pcr);
  1359. if (pcr->msi_en)
  1360. pci_disable_msi(pcr->pci);
  1361. iounmap(pcr->remap_addr);
  1362. pci_release_regions(pcidev);
  1363. pci_disable_device(pcidev);
  1364. spin_lock(&rtsx_pci_lock);
  1365. idr_remove(&rtsx_pci_idr, pcr->id);
  1366. spin_unlock(&rtsx_pci_lock);
  1367. kfree(pcr->slots);
  1368. kfree(pcr);
  1369. kfree(handle);
  1370. dev_dbg(&(pcidev->dev),
  1371. ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
  1372. pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
  1373. }
  1374. static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
  1375. {
  1376. struct pci_dev *pcidev = to_pci_dev(dev_d);
  1377. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1378. struct rtsx_pcr *pcr = handle->pcr;
  1379. dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
  1380. cancel_delayed_work_sync(&pcr->carddet_work);
  1381. mutex_lock(&pcr->pcr_mutex);
  1382. rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
  1383. mutex_unlock(&pcr->pcr_mutex);
  1384. return 0;
  1385. }
  1386. static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
  1387. {
  1388. struct pci_dev *pcidev = to_pci_dev(dev_d);
  1389. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1390. struct rtsx_pcr *pcr = handle->pcr;
  1391. int ret = 0;
  1392. dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
  1393. mutex_lock(&pcr->pcr_mutex);
  1394. ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
  1395. if (ret)
  1396. goto out;
  1397. ret = rtsx_pci_init_hw(pcr);
  1398. if (ret)
  1399. goto out;
  1400. out:
  1401. mutex_unlock(&pcr->pcr_mutex);
  1402. return ret;
  1403. }
  1404. #ifdef CONFIG_PM
  1405. static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
  1406. {
  1407. if (pcr->ops->set_aspm)
  1408. pcr->ops->set_aspm(pcr, true);
  1409. else
  1410. rtsx_comm_set_aspm(pcr, true);
  1411. }
  1412. static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
  1413. {
  1414. struct rtsx_cr_option *option = &pcr->option;
  1415. if (option->ltr_enabled) {
  1416. u32 latency = option->ltr_l1off_latency;
  1417. if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
  1418. mdelay(option->l1_snooze_delay);
  1419. rtsx_set_ltr_latency(pcr, latency);
  1420. }
  1421. if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
  1422. rtsx_set_l1off_sub_cfg_d0(pcr, 0);
  1423. rtsx_enable_aspm(pcr);
  1424. }
  1425. static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
  1426. {
  1427. rtsx_comm_pm_power_saving(pcr);
  1428. }
  1429. static void rtsx_pci_shutdown(struct pci_dev *pcidev)
  1430. {
  1431. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1432. struct rtsx_pcr *pcr = handle->pcr;
  1433. dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
  1434. rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
  1435. pci_disable_device(pcidev);
  1436. free_irq(pcr->irq, (void *)pcr);
  1437. if (pcr->msi_en)
  1438. pci_disable_msi(pcr->pci);
  1439. }
  1440. static int rtsx_pci_runtime_idle(struct device *device)
  1441. {
  1442. struct pci_dev *pcidev = to_pci_dev(device);
  1443. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1444. struct rtsx_pcr *pcr = handle->pcr;
  1445. dev_dbg(device, "--> %s\n", __func__);
  1446. mutex_lock(&pcr->pcr_mutex);
  1447. pcr->state = PDEV_STAT_IDLE;
  1448. if (pcr->ops->disable_auto_blink)
  1449. pcr->ops->disable_auto_blink(pcr);
  1450. if (pcr->ops->turn_off_led)
  1451. pcr->ops->turn_off_led(pcr);
  1452. rtsx_pm_power_saving(pcr);
  1453. mutex_unlock(&pcr->pcr_mutex);
  1454. if (pcr->rtd3_en)
  1455. pm_schedule_suspend(device, 10000);
  1456. return -EBUSY;
  1457. }
  1458. static int rtsx_pci_runtime_suspend(struct device *device)
  1459. {
  1460. struct pci_dev *pcidev = to_pci_dev(device);
  1461. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1462. struct rtsx_pcr *pcr = handle->pcr;
  1463. dev_dbg(device, "--> %s\n", __func__);
  1464. cancel_delayed_work_sync(&pcr->carddet_work);
  1465. mutex_lock(&pcr->pcr_mutex);
  1466. rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
  1467. mutex_unlock(&pcr->pcr_mutex);
  1468. return 0;
  1469. }
  1470. static int rtsx_pci_runtime_resume(struct device *device)
  1471. {
  1472. struct pci_dev *pcidev = to_pci_dev(device);
  1473. struct pcr_handle *handle = pci_get_drvdata(pcidev);
  1474. struct rtsx_pcr *pcr = handle->pcr;
  1475. dev_dbg(device, "--> %s\n", __func__);
  1476. mutex_lock(&pcr->pcr_mutex);
  1477. rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
  1478. rtsx_pci_init_hw(pcr);
  1479. if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
  1480. pcr->slots[RTSX_SD_CARD].card_event(
  1481. pcr->slots[RTSX_SD_CARD].p_dev);
  1482. }
  1483. mutex_unlock(&pcr->pcr_mutex);
  1484. return 0;
  1485. }
  1486. #else /* CONFIG_PM */
  1487. #define rtsx_pci_shutdown NULL
  1488. #define rtsx_pci_runtime_suspend NULL
  1489. #define rtsx_pic_runtime_resume NULL
  1490. #endif /* CONFIG_PM */
  1491. static const struct dev_pm_ops rtsx_pci_pm_ops = {
  1492. SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
  1493. SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
  1494. };
  1495. static struct pci_driver rtsx_pci_driver = {
  1496. .name = DRV_NAME_RTSX_PCI,
  1497. .id_table = rtsx_pci_ids,
  1498. .probe = rtsx_pci_probe,
  1499. .remove = rtsx_pci_remove,
  1500. .driver.pm = &rtsx_pci_pm_ops,
  1501. .shutdown = rtsx_pci_shutdown,
  1502. };
  1503. module_pci_driver(rtsx_pci_driver);
  1504. MODULE_LICENSE("GPL");
  1505. MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
  1506. MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");