exynos5422-dmc.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2019 Samsung Electronics Co., Ltd.
  4. * Author: Lukasz Luba <l.luba@partner.samsung.com>
  5. */
  6. #include <linux/cleanup.h>
  7. #include <linux/clk.h>
  8. #include <linux/devfreq.h>
  9. #include <linux/devfreq-event.h>
  10. #include <linux/device.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/mfd/syscon.h>
  14. #include <linux/module.h>
  15. #include <linux/moduleparam.h>
  16. #include <linux/of.h>
  17. #include <linux/pm_opp.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/regmap.h>
  20. #include <linux/regulator/consumer.h>
  21. #include <linux/slab.h>
  22. #include "../jedec_ddr.h"
  23. #include "../of_memory.h"
  24. static int irqmode;
  25. module_param(irqmode, int, 0644);
  26. MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)");
  27. #define EXYNOS5_DREXI_TIMINGAREF (0x0030)
  28. #define EXYNOS5_DREXI_TIMINGROW0 (0x0034)
  29. #define EXYNOS5_DREXI_TIMINGDATA0 (0x0038)
  30. #define EXYNOS5_DREXI_TIMINGPOWER0 (0x003C)
  31. #define EXYNOS5_DREXI_TIMINGROW1 (0x00E4)
  32. #define EXYNOS5_DREXI_TIMINGDATA1 (0x00E8)
  33. #define EXYNOS5_DREXI_TIMINGPOWER1 (0x00EC)
  34. #define CDREX_PAUSE (0x2091c)
  35. #define CDREX_LPDDR3PHY_CON3 (0x20a20)
  36. #define CDREX_LPDDR3PHY_CLKM_SRC (0x20700)
  37. #define EXYNOS5_TIMING_SET_SWI BIT(28)
  38. #define USE_MX_MSPLL_TIMINGS (1)
  39. #define USE_BPLL_TIMINGS (0)
  40. #define EXYNOS5_AREF_NORMAL (0x2e)
  41. #define DREX_PPCCLKCON (0x0130)
  42. #define DREX_PEREV2CONFIG (0x013c)
  43. #define DREX_PMNC_PPC (0xE000)
  44. #define DREX_CNTENS_PPC (0xE010)
  45. #define DREX_CNTENC_PPC (0xE020)
  46. #define DREX_INTENS_PPC (0xE030)
  47. #define DREX_INTENC_PPC (0xE040)
  48. #define DREX_FLAG_PPC (0xE050)
  49. #define DREX_PMCNT2_PPC (0xE130)
  50. /*
  51. * A value for register DREX_PMNC_PPC which should be written to reset
  52. * the cycle counter CCNT (a reference wall clock). It sets zero to the
  53. * CCNT counter.
  54. */
  55. #define CC_RESET BIT(2)
  56. /*
  57. * A value for register DREX_PMNC_PPC which does the reset of all performance
  58. * counters to zero.
  59. */
  60. #define PPC_COUNTER_RESET BIT(1)
  61. /*
  62. * Enables all configured counters (including cycle counter). The value should
  63. * be written to the register DREX_PMNC_PPC.
  64. */
  65. #define PPC_ENABLE BIT(0)
  66. /* A value for register DREX_PPCCLKCON which enables performance events clock.
  67. * Must be written before first access to the performance counters register
  68. * set, otherwise it could crash.
  69. */
  70. #define PEREV_CLK_EN BIT(0)
  71. /*
  72. * Values which are used to enable counters, interrupts or configure flags of
  73. * the performance counters. They configure counter 2 and cycle counter.
  74. */
  75. #define PERF_CNT2 BIT(2)
  76. #define PERF_CCNT BIT(31)
  77. /*
  78. * Performance event types which are used for setting the preferred event
  79. * to track in the counters.
  80. * There is a set of different types, the values are from range 0 to 0x6f.
  81. * These settings should be written to the configuration register which manages
  82. * the type of the event (register DREX_PEREV2CONFIG).
  83. */
  84. #define READ_TRANSFER_CH0 (0x6d)
  85. #define READ_TRANSFER_CH1 (0x6f)
  86. #define PERF_COUNTER_START_VALUE 0xff000000
  87. #define PERF_EVENT_UP_DOWN_THRESHOLD 900000000ULL
  88. /**
  89. * struct dmc_opp_table - Operating level desciption
  90. * @freq_hz: target frequency in Hz
  91. * @volt_uv: target voltage in uV
  92. *
  93. * Covers frequency and voltage settings of the DMC operating mode.
  94. */
  95. struct dmc_opp_table {
  96. u32 freq_hz;
  97. u32 volt_uv;
  98. };
  99. /**
  100. * struct exynos5_dmc - main structure describing DMC device
  101. * @dev: DMC device
  102. * @df: devfreq device structure returned by devfreq framework
  103. * @gov_data: configuration of devfreq governor
  104. * @base_drexi0: DREX0 registers mapping
  105. * @base_drexi1: DREX1 registers mapping
  106. * @clk_regmap: regmap for clock controller registers
  107. * @lock: protects curr_rate and frequency/voltage setting section
  108. * @curr_rate: current frequency
  109. * @curr_volt: current voltage
  110. * @opp: OPP table
  111. * @opp_count: number of 'opp' elements
  112. * @timings_arr_size: number of 'timings' elements
  113. * @timing_row: values for timing row register, for each OPP
  114. * @timing_data: values for timing data register, for each OPP
  115. * @timing_power: balues for timing power register, for each OPP
  116. * @timings: DDR memory timings, from device tree
  117. * @min_tck: DDR memory minimum timing values, from device tree
  118. * @bypass_timing_row: value for timing row register for bypass timings
  119. * @bypass_timing_data: value for timing data register for bypass timings
  120. * @bypass_timing_power: value for timing power register for bypass
  121. * timings
  122. * @vdd_mif: Memory interface regulator
  123. * @fout_spll: clock: SPLL
  124. * @fout_bpll: clock: BPLL
  125. * @mout_spll: clock: mux SPLL
  126. * @mout_bpll: clock: mux BPLL
  127. * @mout_mclk_cdrex: clock: mux mclk_cdrex
  128. * @mout_mx_mspll_ccore: clock: mux mx_mspll_ccore
  129. * @counter: devfreq events
  130. * @num_counters: number of 'counter' elements
  131. * @last_overflow_ts: time (in ns) of last overflow of each DREX
  132. * @load: utilization in percents
  133. * @total: total time between devfreq events
  134. * @in_irq_mode: whether running in interrupt mode (true)
  135. * or polling (false)
  136. *
  137. * The main structure for the Dynamic Memory Controller which covers clocks,
  138. * memory regions, HW information, parameters and current operating mode.
  139. */
  140. struct exynos5_dmc {
  141. struct device *dev;
  142. struct devfreq *df;
  143. struct devfreq_simple_ondemand_data gov_data;
  144. void __iomem *base_drexi0;
  145. void __iomem *base_drexi1;
  146. struct regmap *clk_regmap;
  147. /* Protects curr_rate and frequency/voltage setting section */
  148. struct mutex lock;
  149. unsigned long curr_rate;
  150. unsigned long curr_volt;
  151. struct dmc_opp_table *opp;
  152. int opp_count;
  153. u32 timings_arr_size;
  154. u32 *timing_row;
  155. u32 *timing_data;
  156. u32 *timing_power;
  157. const struct lpddr3_timings *timings;
  158. const struct lpddr3_min_tck *min_tck;
  159. u32 bypass_timing_row;
  160. u32 bypass_timing_data;
  161. u32 bypass_timing_power;
  162. struct regulator *vdd_mif;
  163. struct clk *fout_spll;
  164. struct clk *fout_bpll;
  165. struct clk *mout_spll;
  166. struct clk *mout_bpll;
  167. struct clk *mout_mclk_cdrex;
  168. struct clk *mout_mx_mspll_ccore;
  169. struct devfreq_event_dev **counter;
  170. int num_counters;
  171. u64 last_overflow_ts[2];
  172. unsigned long load;
  173. unsigned long total;
  174. bool in_irq_mode;
  175. };
  176. #define TIMING_FIELD(t_name, t_bit_beg, t_bit_end) \
  177. { .name = t_name, .bit_beg = t_bit_beg, .bit_end = t_bit_end }
  178. #define TIMING_VAL2REG(timing, t_val) \
  179. ({ \
  180. u32 __val; \
  181. __val = (t_val) << (timing)->bit_beg; \
  182. __val; \
  183. })
  184. struct timing_reg {
  185. char *name;
  186. int bit_beg;
  187. int bit_end;
  188. unsigned int val;
  189. };
  190. static const struct timing_reg timing_row_reg_fields[] = {
  191. TIMING_FIELD("tRFC", 24, 31),
  192. TIMING_FIELD("tRRD", 20, 23),
  193. TIMING_FIELD("tRP", 16, 19),
  194. TIMING_FIELD("tRCD", 12, 15),
  195. TIMING_FIELD("tRC", 6, 11),
  196. TIMING_FIELD("tRAS", 0, 5),
  197. };
  198. static const struct timing_reg timing_data_reg_fields[] = {
  199. TIMING_FIELD("tWTR", 28, 31),
  200. TIMING_FIELD("tWR", 24, 27),
  201. TIMING_FIELD("tRTP", 20, 23),
  202. TIMING_FIELD("tW2W-C2C", 14, 14),
  203. TIMING_FIELD("tR2R-C2C", 12, 12),
  204. TIMING_FIELD("WL", 8, 11),
  205. TIMING_FIELD("tDQSCK", 4, 7),
  206. TIMING_FIELD("RL", 0, 3),
  207. };
  208. static const struct timing_reg timing_power_reg_fields[] = {
  209. TIMING_FIELD("tFAW", 26, 31),
  210. TIMING_FIELD("tXSR", 16, 25),
  211. TIMING_FIELD("tXP", 8, 15),
  212. TIMING_FIELD("tCKE", 4, 7),
  213. TIMING_FIELD("tMRD", 0, 3),
  214. };
  215. #define TIMING_COUNT (ARRAY_SIZE(timing_row_reg_fields) + \
  216. ARRAY_SIZE(timing_data_reg_fields) + \
  217. ARRAY_SIZE(timing_power_reg_fields))
  218. static int exynos5_counters_set_event(struct exynos5_dmc *dmc)
  219. {
  220. int i, ret;
  221. for (i = 0; i < dmc->num_counters; i++) {
  222. if (!dmc->counter[i])
  223. continue;
  224. ret = devfreq_event_set_event(dmc->counter[i]);
  225. if (ret < 0)
  226. return ret;
  227. }
  228. return 0;
  229. }
  230. static int exynos5_counters_enable_edev(struct exynos5_dmc *dmc)
  231. {
  232. int i, ret;
  233. for (i = 0; i < dmc->num_counters; i++) {
  234. if (!dmc->counter[i])
  235. continue;
  236. ret = devfreq_event_enable_edev(dmc->counter[i]);
  237. if (ret < 0)
  238. return ret;
  239. }
  240. return 0;
  241. }
  242. static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc)
  243. {
  244. int i, ret;
  245. for (i = 0; i < dmc->num_counters; i++) {
  246. if (!dmc->counter[i])
  247. continue;
  248. ret = devfreq_event_disable_edev(dmc->counter[i]);
  249. if (ret < 0)
  250. return ret;
  251. }
  252. return 0;
  253. }
  254. /**
  255. * find_target_freq_idx() - Finds requested frequency in local DMC configuration
  256. * @dmc: device for which the information is checked
  257. * @target_rate: requested frequency in KHz
  258. *
  259. * Seeks in the local DMC driver structure for the requested frequency value
  260. * and returns index or error value.
  261. */
  262. static int find_target_freq_idx(struct exynos5_dmc *dmc,
  263. unsigned long target_rate)
  264. {
  265. int i;
  266. for (i = dmc->opp_count - 1; i >= 0; i--)
  267. if (dmc->opp[i].freq_hz <= target_rate)
  268. return i;
  269. return -EINVAL;
  270. }
  271. /**
  272. * exynos5_switch_timing_regs() - Changes bank register set for DRAM timings
  273. * @dmc: device for which the new settings is going to be applied
  274. * @set: boolean variable passing set value
  275. *
  276. * Changes the register set, which holds timing parameters.
  277. * There is two register sets: 0 and 1. The register set 0
  278. * is used in normal operation when the clock is provided from main PLL.
  279. * The bank register set 1 is used when the main PLL frequency is going to be
  280. * changed and the clock is taken from alternative, stable source.
  281. * This function switches between these banks according to the
  282. * currently used clock source.
  283. */
  284. static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
  285. {
  286. unsigned int reg;
  287. int ret;
  288. ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
  289. if (ret)
  290. return ret;
  291. if (set)
  292. reg |= EXYNOS5_TIMING_SET_SWI;
  293. else
  294. reg &= ~EXYNOS5_TIMING_SET_SWI;
  295. regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
  296. return 0;
  297. }
  298. /**
  299. * exynos5_init_freq_table() - Initialized PM OPP framework
  300. * @dmc: DMC device for which the frequencies are used for OPP init
  301. * @profile: devfreq device's profile
  302. *
  303. * Populate the devfreq device's OPP table based on current frequency, voltage.
  304. */
  305. static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
  306. struct devfreq_dev_profile *profile)
  307. {
  308. struct device *dev = dmc->dev;
  309. int i, ret;
  310. int idx;
  311. unsigned long freq;
  312. ret = devm_pm_opp_of_add_table(dev);
  313. if (ret < 0) {
  314. dev_err(dev, "Failed to get OPP table\n");
  315. return ret;
  316. }
  317. dmc->opp_count = dev_pm_opp_get_opp_count(dev);
  318. dmc->opp = devm_kmalloc_array(dev, dmc->opp_count,
  319. sizeof(struct dmc_opp_table), GFP_KERNEL);
  320. if (!dmc->opp)
  321. return -ENOMEM;
  322. idx = dmc->opp_count - 1;
  323. for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) {
  324. struct dev_pm_opp *opp;
  325. opp = dev_pm_opp_find_freq_floor(dev, &freq);
  326. if (IS_ERR(opp))
  327. return PTR_ERR(opp);
  328. dmc->opp[idx - i].freq_hz = freq;
  329. dmc->opp[idx - i].volt_uv = dev_pm_opp_get_voltage(opp);
  330. dev_pm_opp_put(opp);
  331. }
  332. return 0;
  333. }
  334. /**
  335. * exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings
  336. * @dmc: device for which the new settings is going to be applied
  337. *
  338. * Low-level function for changing timings for DRAM memory clocking from
  339. * 'bypass' clock source (fixed frequency @400MHz).
  340. * It uses timing bank registers set 1.
  341. */
  342. static void exynos5_set_bypass_dram_timings(struct exynos5_dmc *dmc)
  343. {
  344. writel(EXYNOS5_AREF_NORMAL,
  345. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
  346. writel(dmc->bypass_timing_row,
  347. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW1);
  348. writel(dmc->bypass_timing_row,
  349. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW1);
  350. writel(dmc->bypass_timing_data,
  351. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA1);
  352. writel(dmc->bypass_timing_data,
  353. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA1);
  354. writel(dmc->bypass_timing_power,
  355. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER1);
  356. writel(dmc->bypass_timing_power,
  357. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER1);
  358. }
  359. /**
  360. * exynos5_dram_change_timings() - Low-level changes of the DRAM final timings
  361. * @dmc: device for which the new settings is going to be applied
  362. * @target_rate: target frequency of the DMC
  363. *
  364. * Low-level function for changing timings for DRAM memory operating from main
  365. * clock source (BPLL), which can have different frequencies. Thus, each
  366. * frequency must have corresponding timings register values in order to keep
  367. * the needed delays.
  368. * It uses timing bank registers set 0.
  369. */
  370. static int exynos5_dram_change_timings(struct exynos5_dmc *dmc,
  371. unsigned long target_rate)
  372. {
  373. int idx;
  374. for (idx = dmc->opp_count - 1; idx >= 0; idx--)
  375. if (dmc->opp[idx].freq_hz <= target_rate)
  376. break;
  377. if (idx < 0)
  378. return -EINVAL;
  379. writel(EXYNOS5_AREF_NORMAL,
  380. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
  381. writel(dmc->timing_row[idx],
  382. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW0);
  383. writel(dmc->timing_row[idx],
  384. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW0);
  385. writel(dmc->timing_data[idx],
  386. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA0);
  387. writel(dmc->timing_data[idx],
  388. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA0);
  389. writel(dmc->timing_power[idx],
  390. dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER0);
  391. writel(dmc->timing_power[idx],
  392. dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER0);
  393. return 0;
  394. }
  395. /**
  396. * exynos5_dmc_align_target_voltage() - Sets the final voltage for the DMC
  397. * @dmc: device for which it is going to be set
  398. * @target_volt: new voltage which is chosen to be final
  399. *
  400. * Function tries to align voltage to the safe level for 'normal' mode.
  401. * It checks the need of higher voltage and changes the value. The target
  402. * voltage might be lower that currently set and still the system will be
  403. * stable.
  404. */
  405. static int exynos5_dmc_align_target_voltage(struct exynos5_dmc *dmc,
  406. unsigned long target_volt)
  407. {
  408. int ret = 0;
  409. if (dmc->curr_volt <= target_volt)
  410. return 0;
  411. ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
  412. target_volt);
  413. if (!ret)
  414. dmc->curr_volt = target_volt;
  415. return ret;
  416. }
  417. /**
  418. * exynos5_dmc_align_bypass_voltage() - Sets the voltage for the DMC
  419. * @dmc: device for which it is going to be set
  420. * @target_volt: new voltage which is chosen to be final
  421. *
  422. * Function tries to align voltage to the safe level for the 'bypass' mode.
  423. * It checks the need of higher voltage and changes the value.
  424. * The target voltage must not be less than currently needed, because
  425. * for current frequency the device might become unstable.
  426. */
  427. static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc,
  428. unsigned long target_volt)
  429. {
  430. int ret = 0;
  431. if (dmc->curr_volt >= target_volt)
  432. return 0;
  433. ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
  434. target_volt);
  435. if (!ret)
  436. dmc->curr_volt = target_volt;
  437. return ret;
  438. }
  439. /**
  440. * exynos5_dmc_align_bypass_dram_timings() - Chooses and sets DRAM timings
  441. * @dmc: device for which it is going to be set
  442. * @target_rate: new frequency which is chosen to be final
  443. *
  444. * Function changes the DRAM timings for the temporary 'bypass' mode.
  445. */
  446. static int exynos5_dmc_align_bypass_dram_timings(struct exynos5_dmc *dmc,
  447. unsigned long target_rate)
  448. {
  449. int idx = find_target_freq_idx(dmc, target_rate);
  450. if (idx < 0)
  451. return -EINVAL;
  452. exynos5_set_bypass_dram_timings(dmc);
  453. return 0;
  454. }
  455. /**
  456. * exynos5_dmc_switch_to_bypass_configuration() - Switching to temporary clock
  457. * @dmc: DMC device for which the switching is going to happen
  458. * @target_rate: new frequency which is going to be set as a final
  459. * @target_volt: new voltage which is going to be set as a final
  460. *
  461. * Function configures DMC and clocks for operating in temporary 'bypass' mode.
  462. * This mode is used only temporary but if required, changes voltage and timings
  463. * for DRAM chips. It switches the main clock to stable clock source for the
  464. * period of the main PLL reconfiguration.
  465. */
  466. static int
  467. exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
  468. unsigned long target_rate,
  469. unsigned long target_volt)
  470. {
  471. int ret;
  472. /*
  473. * Having higher voltage for a particular frequency does not harm
  474. * the chip. Use it for the temporary frequency change when one
  475. * voltage manipulation might be avoided.
  476. */
  477. ret = exynos5_dmc_align_bypass_voltage(dmc, target_volt);
  478. if (ret)
  479. return ret;
  480. /*
  481. * Longer delays for DRAM does not cause crash, the opposite does.
  482. */
  483. ret = exynos5_dmc_align_bypass_dram_timings(dmc, target_rate);
  484. if (ret)
  485. return ret;
  486. /*
  487. * Delays are long enough, so use them for the new coming clock.
  488. */
  489. ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
  490. return ret;
  491. }
  492. /**
  493. * exynos5_dmc_change_freq_and_volt() - Changes voltage and frequency of the DMC
  494. * using safe procedure
  495. * @dmc: device for which the frequency is going to be changed
  496. * @target_rate: requested new frequency
  497. * @target_volt: requested voltage which corresponds to the new frequency
  498. *
  499. * The DMC frequency change procedure requires a few steps.
  500. * The main requirement is to change the clock source in the clk mux
  501. * for the time of main clock PLL locking. The assumption is that the
  502. * alternative clock source set as parent is stable.
  503. * The second parent's clock frequency is fixed to 400MHz, it is named 'bypass'
  504. * clock. This requires alignment in DRAM timing parameters for the new
  505. * T-period. There is two bank sets for keeping DRAM
  506. * timings: set 0 and set 1. The set 0 is used when main clock source is
  507. * chosen. The 2nd set of regs is used for 'bypass' clock. Switching between
  508. * the two bank sets is part of the process.
  509. * The voltage must also be aligned to the minimum required level. There is
  510. * this intermediate step with switching to 'bypass' parent clock source.
  511. * if the old voltage is lower, it requires an increase of the voltage level.
  512. * The complexity of the voltage manipulation is hidden in low level function.
  513. * In this function there is last alignment of the voltage level at the end.
  514. */
  515. static int
  516. exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
  517. unsigned long target_rate,
  518. unsigned long target_volt)
  519. {
  520. int ret;
  521. ret = exynos5_dmc_switch_to_bypass_configuration(dmc, target_rate,
  522. target_volt);
  523. if (ret)
  524. return ret;
  525. /*
  526. * Voltage is set at least to a level needed for this frequency,
  527. * so switching clock source is safe now.
  528. */
  529. clk_prepare_enable(dmc->fout_spll);
  530. clk_prepare_enable(dmc->mout_spll);
  531. clk_prepare_enable(dmc->mout_mx_mspll_ccore);
  532. ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_mx_mspll_ccore);
  533. if (ret)
  534. goto disable_clocks;
  535. /*
  536. * We are safe to increase the timings for current bypass frequency.
  537. * Thanks to this the settings will be ready for the upcoming clock
  538. * source change.
  539. */
  540. exynos5_dram_change_timings(dmc, target_rate);
  541. clk_set_rate(dmc->fout_bpll, target_rate);
  542. ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
  543. if (ret)
  544. goto disable_clocks;
  545. ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
  546. if (ret)
  547. goto disable_clocks;
  548. /*
  549. * Make sure if the voltage is not from 'bypass' settings and align to
  550. * the right level for power efficiency.
  551. */
  552. ret = exynos5_dmc_align_target_voltage(dmc, target_volt);
  553. disable_clocks:
  554. clk_disable_unprepare(dmc->mout_mx_mspll_ccore);
  555. clk_disable_unprepare(dmc->mout_spll);
  556. clk_disable_unprepare(dmc->fout_spll);
  557. return ret;
  558. }
  559. /**
  560. * exynos5_dmc_get_volt_freq() - Gets the frequency and voltage from the OPP
  561. * table.
  562. * @dmc: device for which the frequency is going to be changed
  563. * @freq: requested frequency in KHz
  564. * @target_rate: returned frequency which is the same or lower than
  565. * requested
  566. * @target_volt: returned voltage which corresponds to the returned
  567. * frequency
  568. * @flags: devfreq flags provided for this frequency change request
  569. *
  570. * Function gets requested frequency and checks OPP framework for needed
  571. * frequency and voltage. It populates the values 'target_rate' and
  572. * 'target_volt' or returns error value when OPP framework fails.
  573. */
  574. static int exynos5_dmc_get_volt_freq(struct exynos5_dmc *dmc,
  575. unsigned long *freq,
  576. unsigned long *target_rate,
  577. unsigned long *target_volt, u32 flags)
  578. {
  579. struct dev_pm_opp *opp;
  580. opp = devfreq_recommended_opp(dmc->dev, freq, flags);
  581. if (IS_ERR(opp))
  582. return PTR_ERR(opp);
  583. *target_rate = dev_pm_opp_get_freq(opp);
  584. *target_volt = dev_pm_opp_get_voltage(opp);
  585. dev_pm_opp_put(opp);
  586. return 0;
  587. }
  588. /**
  589. * exynos5_dmc_target() - Function responsible for changing frequency of DMC
  590. * @dev: device for which the frequency is going to be changed
  591. * @freq: requested frequency in KHz
  592. * @flags: flags provided for this frequency change request
  593. *
  594. * An entry function provided to the devfreq framework which provides frequency
  595. * change of the DMC. The function gets the possible rate from OPP table based
  596. * on requested frequency. It calls the next function responsible for the
  597. * frequency and voltage change. In case of failure, does not set 'curr_rate'
  598. * and returns error value to the framework.
  599. */
  600. static int exynos5_dmc_target(struct device *dev, unsigned long *freq,
  601. u32 flags)
  602. {
  603. struct exynos5_dmc *dmc = dev_get_drvdata(dev);
  604. unsigned long target_rate = 0;
  605. unsigned long target_volt = 0;
  606. int ret;
  607. ret = exynos5_dmc_get_volt_freq(dmc, freq, &target_rate, &target_volt,
  608. flags);
  609. if (ret)
  610. return ret;
  611. if (target_rate == dmc->curr_rate)
  612. return 0;
  613. mutex_lock(&dmc->lock);
  614. ret = exynos5_dmc_change_freq_and_volt(dmc, target_rate, target_volt);
  615. if (ret) {
  616. mutex_unlock(&dmc->lock);
  617. return ret;
  618. }
  619. dmc->curr_rate = target_rate;
  620. mutex_unlock(&dmc->lock);
  621. return 0;
  622. }
  623. /**
  624. * exynos5_counters_get() - Gets the performance counters values.
  625. * @dmc: device for which the counters are going to be checked
  626. * @load_count: variable which is populated with counter value
  627. * @total_count: variable which is used as 'wall clock' reference
  628. *
  629. * Function which provides performance counters values. It sums up counters for
  630. * two DMC channels. The 'total_count' is used as a reference and max value.
  631. * The ratio 'load_count/total_count' shows the busy percentage [0%, 100%].
  632. */
  633. static int exynos5_counters_get(struct exynos5_dmc *dmc,
  634. unsigned long *load_count,
  635. unsigned long *total_count)
  636. {
  637. unsigned long total = 0;
  638. struct devfreq_event_data event;
  639. int ret, i;
  640. *load_count = 0;
  641. /* Take into account only read+write counters, but stop all */
  642. for (i = 0; i < dmc->num_counters; i++) {
  643. if (!dmc->counter[i])
  644. continue;
  645. ret = devfreq_event_get_event(dmc->counter[i], &event);
  646. if (ret < 0)
  647. return ret;
  648. *load_count += event.load_count;
  649. if (total < event.total_count)
  650. total = event.total_count;
  651. }
  652. *total_count = total;
  653. return 0;
  654. }
  655. /**
  656. * exynos5_dmc_start_perf_events() - Setup and start performance event counters
  657. * @dmc: device for which the counters are going to be checked
  658. * @beg_value: initial value for the counter
  659. *
  660. * Function which enables needed counters, interrupts and sets initial values
  661. * then starts the counters.
  662. */
  663. static void exynos5_dmc_start_perf_events(struct exynos5_dmc *dmc,
  664. u32 beg_value)
  665. {
  666. /* Enable interrupts for counter 2 */
  667. writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENS_PPC);
  668. writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENS_PPC);
  669. /* Enable counter 2 and CCNT */
  670. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENS_PPC);
  671. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENS_PPC);
  672. /* Clear overflow flag for all counters */
  673. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
  674. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
  675. /* Reset all counters */
  676. writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi0 + DREX_PMNC_PPC);
  677. writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi1 + DREX_PMNC_PPC);
  678. /*
  679. * Set start value for the counters, the number of samples that
  680. * will be gathered is calculated as: 0xffffffff - beg_value
  681. */
  682. writel(beg_value, dmc->base_drexi0 + DREX_PMCNT2_PPC);
  683. writel(beg_value, dmc->base_drexi1 + DREX_PMCNT2_PPC);
  684. /* Start all counters */
  685. writel(PPC_ENABLE, dmc->base_drexi0 + DREX_PMNC_PPC);
  686. writel(PPC_ENABLE, dmc->base_drexi1 + DREX_PMNC_PPC);
  687. }
  688. /**
  689. * exynos5_dmc_perf_events_calc() - Calculate utilization
  690. * @dmc: device for which the counters are going to be checked
  691. * @diff_ts: time between last interrupt and current one
  692. *
  693. * Function which calculates needed utilization for the devfreq governor.
  694. * It prepares values for 'busy_time' and 'total_time' based on elapsed time
  695. * between interrupts, which approximates utilization.
  696. */
  697. static void exynos5_dmc_perf_events_calc(struct exynos5_dmc *dmc, u64 diff_ts)
  698. {
  699. /*
  700. * This is a simple algorithm for managing traffic on DMC.
  701. * When there is almost no load the counters overflow every 4s,
  702. * no mater the DMC frequency.
  703. * The high load might be approximated using linear function.
  704. * Knowing that, simple calculation can provide 'busy_time' and
  705. * 'total_time' to the devfreq governor which picks up target
  706. * frequency.
  707. * We want a fast ramp up and slow decay in frequency change function.
  708. */
  709. if (diff_ts < PERF_EVENT_UP_DOWN_THRESHOLD) {
  710. /*
  711. * Set higher utilization for the simple_ondemand governor.
  712. * The governor should increase the frequency of the DMC.
  713. */
  714. dmc->load = 70;
  715. dmc->total = 100;
  716. } else {
  717. /*
  718. * Set low utilization for the simple_ondemand governor.
  719. * The governor should decrease the frequency of the DMC.
  720. */
  721. dmc->load = 35;
  722. dmc->total = 100;
  723. }
  724. dev_dbg(dmc->dev, "diff_ts=%llu\n", diff_ts);
  725. }
  726. /**
  727. * exynos5_dmc_perf_events_check() - Checks the status of the counters
  728. * @dmc: device for which the counters are going to be checked
  729. *
  730. * Function which is called from threaded IRQ to check the counters state
  731. * and to call approximation for the needed utilization.
  732. */
  733. static void exynos5_dmc_perf_events_check(struct exynos5_dmc *dmc)
  734. {
  735. u32 val;
  736. u64 diff_ts, ts;
  737. ts = ktime_get_ns();
  738. /* Stop all counters */
  739. writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
  740. writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
  741. /* Check the source in interrupt flag registers (which channel) */
  742. val = readl(dmc->base_drexi0 + DREX_FLAG_PPC);
  743. if (val) {
  744. diff_ts = ts - dmc->last_overflow_ts[0];
  745. dmc->last_overflow_ts[0] = ts;
  746. dev_dbg(dmc->dev, "drex0 0xE050 val= 0x%08x\n", val);
  747. } else {
  748. val = readl(dmc->base_drexi1 + DREX_FLAG_PPC);
  749. diff_ts = ts - dmc->last_overflow_ts[1];
  750. dmc->last_overflow_ts[1] = ts;
  751. dev_dbg(dmc->dev, "drex1 0xE050 val= 0x%08x\n", val);
  752. }
  753. exynos5_dmc_perf_events_calc(dmc, diff_ts);
  754. exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
  755. }
  756. /**
  757. * exynos5_dmc_enable_perf_events() - Enable performance events
  758. * @dmc: device for which the counters are going to be checked
  759. *
  760. * Function which is setup needed environment and enables counters.
  761. */
  762. static void exynos5_dmc_enable_perf_events(struct exynos5_dmc *dmc)
  763. {
  764. u64 ts;
  765. /* Enable Performance Event Clock */
  766. writel(PEREV_CLK_EN, dmc->base_drexi0 + DREX_PPCCLKCON);
  767. writel(PEREV_CLK_EN, dmc->base_drexi1 + DREX_PPCCLKCON);
  768. /* Select read transfers as performance event2 */
  769. writel(READ_TRANSFER_CH0, dmc->base_drexi0 + DREX_PEREV2CONFIG);
  770. writel(READ_TRANSFER_CH1, dmc->base_drexi1 + DREX_PEREV2CONFIG);
  771. ts = ktime_get_ns();
  772. dmc->last_overflow_ts[0] = ts;
  773. dmc->last_overflow_ts[1] = ts;
  774. /* Devfreq shouldn't be faster than initialization, play safe though. */
  775. dmc->load = 99;
  776. dmc->total = 100;
  777. }
  778. /**
  779. * exynos5_dmc_disable_perf_events() - Disable performance events
  780. * @dmc: device for which the counters are going to be checked
  781. *
  782. * Function which stops, disables performance event counters and interrupts.
  783. */
  784. static void exynos5_dmc_disable_perf_events(struct exynos5_dmc *dmc)
  785. {
  786. /* Stop all counters */
  787. writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
  788. writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
  789. /* Disable interrupts for counter 2 */
  790. writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENC_PPC);
  791. writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENC_PPC);
  792. /* Disable counter 2 and CCNT */
  793. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENC_PPC);
  794. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENC_PPC);
  795. /* Clear overflow flag for all counters */
  796. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
  797. writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
  798. }
  799. /**
  800. * exynos5_dmc_get_status() - Read current DMC performance statistics.
  801. * @dev: device for which the statistics are requested
  802. * @stat: structure which has statistic fields
  803. *
  804. * Function reads the DMC performance counters and calculates 'busy_time'
  805. * and 'total_time'. To protect from overflow, the values are shifted right
  806. * by 10. After read out the counters are setup to count again.
  807. */
  808. static int exynos5_dmc_get_status(struct device *dev,
  809. struct devfreq_dev_status *stat)
  810. {
  811. struct exynos5_dmc *dmc = dev_get_drvdata(dev);
  812. unsigned long load, total;
  813. int ret;
  814. if (dmc->in_irq_mode) {
  815. mutex_lock(&dmc->lock);
  816. stat->current_frequency = dmc->curr_rate;
  817. mutex_unlock(&dmc->lock);
  818. stat->busy_time = dmc->load;
  819. stat->total_time = dmc->total;
  820. } else {
  821. ret = exynos5_counters_get(dmc, &load, &total);
  822. if (ret < 0)
  823. return -EINVAL;
  824. /* To protect from overflow, divide by 1024 */
  825. stat->busy_time = load >> 10;
  826. stat->total_time = total >> 10;
  827. ret = exynos5_counters_set_event(dmc);
  828. if (ret < 0) {
  829. dev_err(dev, "could not set event counter\n");
  830. return ret;
  831. }
  832. }
  833. return 0;
  834. }
  835. /**
  836. * exynos5_dmc_get_cur_freq() - Function returns current DMC frequency
  837. * @dev: device for which the framework checks operating frequency
  838. * @freq: returned frequency value
  839. *
  840. * It returns the currently used frequency of the DMC. The real operating
  841. * frequency might be lower when the clock source value could not be divided
  842. * to the requested value.
  843. */
  844. static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
  845. {
  846. struct exynos5_dmc *dmc = dev_get_drvdata(dev);
  847. mutex_lock(&dmc->lock);
  848. *freq = dmc->curr_rate;
  849. mutex_unlock(&dmc->lock);
  850. return 0;
  851. }
  852. /*
  853. * exynos5_dmc_df_profile - Devfreq governor's profile structure
  854. *
  855. * It provides to the devfreq framework needed functions and polling period.
  856. */
  857. static struct devfreq_dev_profile exynos5_dmc_df_profile = {
  858. .timer = DEVFREQ_TIMER_DELAYED,
  859. .target = exynos5_dmc_target,
  860. .get_dev_status = exynos5_dmc_get_status,
  861. .get_cur_freq = exynos5_dmc_get_cur_freq,
  862. };
  863. /**
  864. * exynos5_dmc_align_init_freq() - Align initial frequency value
  865. * @dmc: device for which the frequency is going to be set
  866. * @bootloader_init_freq: initial frequency set by the bootloader in KHz
  867. *
  868. * The initial bootloader frequency, which is present during boot, might be
  869. * different that supported frequency values in the driver. It is possible
  870. * due to different PLL settings or used PLL as a source.
  871. * This function provides the 'initial_freq' for the devfreq framework
  872. * statistics engine which supports only registered values. Thus, some alignment
  873. * must be made.
  874. */
  875. static unsigned long
  876. exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc,
  877. unsigned long bootloader_init_freq)
  878. {
  879. unsigned long aligned_freq;
  880. int idx;
  881. idx = find_target_freq_idx(dmc, bootloader_init_freq);
  882. if (idx >= 0)
  883. aligned_freq = dmc->opp[idx].freq_hz;
  884. else
  885. aligned_freq = dmc->opp[dmc->opp_count - 1].freq_hz;
  886. return aligned_freq;
  887. }
  888. /**
  889. * create_timings_aligned() - Create register values and align with standard
  890. * @dmc: device for which the frequency is going to be set
  891. * @reg_timing_row: array to fill with values for timing row register
  892. * @reg_timing_data: array to fill with values for timing data register
  893. * @reg_timing_power: array to fill with values for timing power register
  894. * @clk_period_ps: the period of the clock, known as tCK
  895. *
  896. * The function calculates timings and creates a register value ready for
  897. * a frequency transition. The register contains a few timings. They are
  898. * shifted by a known offset. The timing value is calculated based on memory
  899. * specyfication: minimal time required and minimal cycles required.
  900. */
  901. static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
  902. u32 *reg_timing_data, u32 *reg_timing_power,
  903. u32 clk_period_ps)
  904. {
  905. u32 val;
  906. const struct timing_reg *reg;
  907. if (clk_period_ps == 0)
  908. return -EINVAL;
  909. *reg_timing_row = 0;
  910. *reg_timing_data = 0;
  911. *reg_timing_power = 0;
  912. val = dmc->timings->tRFC / clk_period_ps;
  913. val += dmc->timings->tRFC % clk_period_ps ? 1 : 0;
  914. val = max(val, dmc->min_tck->tRFC);
  915. reg = &timing_row_reg_fields[0];
  916. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  917. val = dmc->timings->tRRD / clk_period_ps;
  918. val += dmc->timings->tRRD % clk_period_ps ? 1 : 0;
  919. val = max(val, dmc->min_tck->tRRD);
  920. reg = &timing_row_reg_fields[1];
  921. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  922. val = dmc->timings->tRPab / clk_period_ps;
  923. val += dmc->timings->tRPab % clk_period_ps ? 1 : 0;
  924. val = max(val, dmc->min_tck->tRPab);
  925. reg = &timing_row_reg_fields[2];
  926. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  927. val = dmc->timings->tRCD / clk_period_ps;
  928. val += dmc->timings->tRCD % clk_period_ps ? 1 : 0;
  929. val = max(val, dmc->min_tck->tRCD);
  930. reg = &timing_row_reg_fields[3];
  931. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  932. val = dmc->timings->tRC / clk_period_ps;
  933. val += dmc->timings->tRC % clk_period_ps ? 1 : 0;
  934. val = max(val, dmc->min_tck->tRC);
  935. reg = &timing_row_reg_fields[4];
  936. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  937. val = dmc->timings->tRAS / clk_period_ps;
  938. val += dmc->timings->tRAS % clk_period_ps ? 1 : 0;
  939. val = max(val, dmc->min_tck->tRAS);
  940. reg = &timing_row_reg_fields[5];
  941. *reg_timing_row |= TIMING_VAL2REG(reg, val);
  942. /* data related timings */
  943. val = dmc->timings->tWTR / clk_period_ps;
  944. val += dmc->timings->tWTR % clk_period_ps ? 1 : 0;
  945. val = max(val, dmc->min_tck->tWTR);
  946. reg = &timing_data_reg_fields[0];
  947. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  948. val = dmc->timings->tWR / clk_period_ps;
  949. val += dmc->timings->tWR % clk_period_ps ? 1 : 0;
  950. val = max(val, dmc->min_tck->tWR);
  951. reg = &timing_data_reg_fields[1];
  952. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  953. val = dmc->timings->tRTP / clk_period_ps;
  954. val += dmc->timings->tRTP % clk_period_ps ? 1 : 0;
  955. val = max(val, dmc->min_tck->tRTP);
  956. reg = &timing_data_reg_fields[2];
  957. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  958. val = dmc->timings->tW2W_C2C / clk_period_ps;
  959. val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0;
  960. val = max(val, dmc->min_tck->tW2W_C2C);
  961. reg = &timing_data_reg_fields[3];
  962. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  963. val = dmc->timings->tR2R_C2C / clk_period_ps;
  964. val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0;
  965. val = max(val, dmc->min_tck->tR2R_C2C);
  966. reg = &timing_data_reg_fields[4];
  967. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  968. val = dmc->timings->tWL / clk_period_ps;
  969. val += dmc->timings->tWL % clk_period_ps ? 1 : 0;
  970. val = max(val, dmc->min_tck->tWL);
  971. reg = &timing_data_reg_fields[5];
  972. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  973. val = dmc->timings->tDQSCK / clk_period_ps;
  974. val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0;
  975. val = max(val, dmc->min_tck->tDQSCK);
  976. reg = &timing_data_reg_fields[6];
  977. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  978. val = dmc->timings->tRL / clk_period_ps;
  979. val += dmc->timings->tRL % clk_period_ps ? 1 : 0;
  980. val = max(val, dmc->min_tck->tRL);
  981. reg = &timing_data_reg_fields[7];
  982. *reg_timing_data |= TIMING_VAL2REG(reg, val);
  983. /* power related timings */
  984. val = dmc->timings->tFAW / clk_period_ps;
  985. val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
  986. val = max(val, dmc->min_tck->tFAW);
  987. reg = &timing_power_reg_fields[0];
  988. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  989. val = dmc->timings->tXSR / clk_period_ps;
  990. val += dmc->timings->tXSR % clk_period_ps ? 1 : 0;
  991. val = max(val, dmc->min_tck->tXSR);
  992. reg = &timing_power_reg_fields[1];
  993. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  994. val = dmc->timings->tXP / clk_period_ps;
  995. val += dmc->timings->tXP % clk_period_ps ? 1 : 0;
  996. val = max(val, dmc->min_tck->tXP);
  997. reg = &timing_power_reg_fields[2];
  998. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  999. val = dmc->timings->tCKE / clk_period_ps;
  1000. val += dmc->timings->tCKE % clk_period_ps ? 1 : 0;
  1001. val = max(val, dmc->min_tck->tCKE);
  1002. reg = &timing_power_reg_fields[3];
  1003. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  1004. val = dmc->timings->tMRD / clk_period_ps;
  1005. val += dmc->timings->tMRD % clk_period_ps ? 1 : 0;
  1006. val = max(val, dmc->min_tck->tMRD);
  1007. reg = &timing_power_reg_fields[4];
  1008. *reg_timing_power |= TIMING_VAL2REG(reg, val);
  1009. return 0;
  1010. }
  1011. /**
  1012. * of_get_dram_timings() - helper function for parsing DT settings for DRAM
  1013. * @dmc: device for which the frequency is going to be set
  1014. *
  1015. * The function parses DT entries with DRAM information.
  1016. */
  1017. static int of_get_dram_timings(struct exynos5_dmc *dmc)
  1018. {
  1019. int ret = 0;
  1020. struct device *dev = dmc->dev;
  1021. int idx;
  1022. u32 freq_mhz, clk_period_ps;
  1023. struct device_node *np_ddr __free(device_node) =
  1024. of_parse_phandle(dev->of_node, "device-handle", 0);
  1025. if (!np_ddr) {
  1026. dev_warn(dev, "could not find 'device-handle' in DT\n");
  1027. return -EINVAL;
  1028. }
  1029. dmc->timing_row = devm_kmalloc_array(dev, TIMING_COUNT,
  1030. sizeof(u32), GFP_KERNEL);
  1031. if (!dmc->timing_row)
  1032. return -ENOMEM;
  1033. dmc->timing_data = devm_kmalloc_array(dev, TIMING_COUNT,
  1034. sizeof(u32), GFP_KERNEL);
  1035. if (!dmc->timing_data)
  1036. return -ENOMEM;
  1037. dmc->timing_power = devm_kmalloc_array(dev, TIMING_COUNT,
  1038. sizeof(u32), GFP_KERNEL);
  1039. if (!dmc->timing_power)
  1040. return -ENOMEM;
  1041. dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dev,
  1042. DDR_TYPE_LPDDR3,
  1043. &dmc->timings_arr_size);
  1044. if (!dmc->timings) {
  1045. dev_warn(dev, "could not get timings from DT\n");
  1046. return -EINVAL;
  1047. }
  1048. dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dev);
  1049. if (!dmc->min_tck) {
  1050. dev_warn(dev, "could not get tck from DT\n");
  1051. return -EINVAL;
  1052. }
  1053. /* Sorted array of OPPs with frequency ascending */
  1054. for (idx = 0; idx < dmc->opp_count; idx++) {
  1055. freq_mhz = dmc->opp[idx].freq_hz / 1000000;
  1056. clk_period_ps = 1000000 / freq_mhz;
  1057. ret = create_timings_aligned(dmc, &dmc->timing_row[idx],
  1058. &dmc->timing_data[idx],
  1059. &dmc->timing_power[idx],
  1060. clk_period_ps);
  1061. }
  1062. /* Take the highest frequency's timings as 'bypass' */
  1063. dmc->bypass_timing_row = dmc->timing_row[idx - 1];
  1064. dmc->bypass_timing_data = dmc->timing_data[idx - 1];
  1065. dmc->bypass_timing_power = dmc->timing_power[idx - 1];
  1066. return ret;
  1067. }
  1068. /**
  1069. * exynos5_dmc_init_clks() - Initialize clocks needed for DMC operation.
  1070. * @dmc: DMC structure containing needed fields
  1071. *
  1072. * Get the needed clocks defined in DT device, enable and set the right parents.
  1073. * Read current frequency and initialize the initial rate for governor.
  1074. */
  1075. static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
  1076. {
  1077. int ret;
  1078. struct device *dev = dmc->dev;
  1079. unsigned long target_volt = 0;
  1080. unsigned long target_rate = 0;
  1081. unsigned int tmp;
  1082. dmc->fout_spll = devm_clk_get(dev, "fout_spll");
  1083. if (IS_ERR(dmc->fout_spll))
  1084. return PTR_ERR(dmc->fout_spll);
  1085. dmc->fout_bpll = devm_clk_get(dev, "fout_bpll");
  1086. if (IS_ERR(dmc->fout_bpll))
  1087. return PTR_ERR(dmc->fout_bpll);
  1088. dmc->mout_mclk_cdrex = devm_clk_get(dev, "mout_mclk_cdrex");
  1089. if (IS_ERR(dmc->mout_mclk_cdrex))
  1090. return PTR_ERR(dmc->mout_mclk_cdrex);
  1091. dmc->mout_bpll = devm_clk_get(dev, "mout_bpll");
  1092. if (IS_ERR(dmc->mout_bpll))
  1093. return PTR_ERR(dmc->mout_bpll);
  1094. dmc->mout_mx_mspll_ccore = devm_clk_get(dev, "mout_mx_mspll_ccore");
  1095. if (IS_ERR(dmc->mout_mx_mspll_ccore))
  1096. return PTR_ERR(dmc->mout_mx_mspll_ccore);
  1097. dmc->mout_spll = devm_clk_get(dev, "ff_dout_spll2");
  1098. if (IS_ERR(dmc->mout_spll)) {
  1099. dmc->mout_spll = devm_clk_get(dev, "mout_sclk_spll");
  1100. if (IS_ERR(dmc->mout_spll))
  1101. return PTR_ERR(dmc->mout_spll);
  1102. }
  1103. /*
  1104. * Convert frequency to KHz values and set it for the governor.
  1105. */
  1106. dmc->curr_rate = clk_get_rate(dmc->mout_mclk_cdrex);
  1107. dmc->curr_rate = exynos5_dmc_align_init_freq(dmc, dmc->curr_rate);
  1108. exynos5_dmc_df_profile.initial_freq = dmc->curr_rate;
  1109. ret = exynos5_dmc_get_volt_freq(dmc, &dmc->curr_rate, &target_rate,
  1110. &target_volt, 0);
  1111. if (ret)
  1112. return ret;
  1113. dmc->curr_volt = target_volt;
  1114. ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
  1115. if (ret)
  1116. return ret;
  1117. clk_prepare_enable(dmc->fout_bpll);
  1118. clk_prepare_enable(dmc->mout_bpll);
  1119. /*
  1120. * Some bootloaders do not set clock routes correctly.
  1121. * Stop one path in clocks to PHY.
  1122. */
  1123. regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, &tmp);
  1124. tmp &= ~(BIT(1) | BIT(0));
  1125. regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, tmp);
  1126. return 0;
  1127. }
  1128. /**
  1129. * exynos5_performance_counters_init() - Initializes performance DMC's counters
  1130. * @dmc: DMC for which it does the setup
  1131. *
  1132. * Initialization of performance counters in DMC for estimating usage.
  1133. * The counter's values are used for calculation of a memory bandwidth and based
  1134. * on that the governor changes the frequency.
  1135. * The counters are not used when the governor is GOVERNOR_USERSPACE.
  1136. */
  1137. static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
  1138. {
  1139. struct device *dev = dmc->dev;
  1140. int ret, i;
  1141. dmc->num_counters = devfreq_event_get_edev_count(dev, "devfreq-events");
  1142. if (dmc->num_counters < 0) {
  1143. dev_err(dev, "could not get devfreq-event counters\n");
  1144. return dmc->num_counters;
  1145. }
  1146. dmc->counter = devm_kcalloc(dev, dmc->num_counters,
  1147. sizeof(*dmc->counter), GFP_KERNEL);
  1148. if (!dmc->counter)
  1149. return -ENOMEM;
  1150. for (i = 0; i < dmc->num_counters; i++) {
  1151. dmc->counter[i] =
  1152. devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i);
  1153. if (IS_ERR_OR_NULL(dmc->counter[i]))
  1154. return -EPROBE_DEFER;
  1155. }
  1156. ret = exynos5_counters_enable_edev(dmc);
  1157. if (ret < 0) {
  1158. dev_err(dev, "could not enable event counter\n");
  1159. return ret;
  1160. }
  1161. ret = exynos5_counters_set_event(dmc);
  1162. if (ret < 0) {
  1163. exynos5_counters_disable_edev(dmc);
  1164. dev_err(dev, "could not set event counter\n");
  1165. return ret;
  1166. }
  1167. return 0;
  1168. }
  1169. /**
  1170. * exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC
  1171. * @dmc: device which is used for changing this feature
  1172. *
  1173. * There is a need of pausing DREX DMC when divider or MUX in clock tree
  1174. * changes its configuration. In such situation access to the memory is blocked
  1175. * in DMC automatically. This feature is used when clock frequency change
  1176. * request appears and touches clock tree.
  1177. */
  1178. static inline int exynos5_dmc_set_pause_on_switching(struct exynos5_dmc *dmc)
  1179. {
  1180. unsigned int val;
  1181. int ret;
  1182. ret = regmap_read(dmc->clk_regmap, CDREX_PAUSE, &val);
  1183. if (ret)
  1184. return ret;
  1185. val |= 1UL;
  1186. regmap_write(dmc->clk_regmap, CDREX_PAUSE, val);
  1187. return 0;
  1188. }
  1189. static irqreturn_t dmc_irq_thread(int irq, void *priv)
  1190. {
  1191. int res;
  1192. struct exynos5_dmc *dmc = priv;
  1193. mutex_lock(&dmc->df->lock);
  1194. exynos5_dmc_perf_events_check(dmc);
  1195. res = update_devfreq(dmc->df);
  1196. mutex_unlock(&dmc->df->lock);
  1197. if (res)
  1198. dev_warn(dmc->dev, "devfreq failed with %d\n", res);
  1199. return IRQ_HANDLED;
  1200. }
  1201. /**
  1202. * exynos5_dmc_probe() - Probe function for the DMC driver
  1203. * @pdev: platform device for which the driver is going to be initialized
  1204. *
  1205. * Initialize basic components: clocks, regulators, performance counters, etc.
  1206. * Read out product version and based on the information setup
  1207. * internal structures for the controller (frequency and voltage) and for DRAM
  1208. * memory parameters: timings for each operating frequency.
  1209. * Register new devfreq device for controlling DVFS of the DMC.
  1210. */
  1211. static int exynos5_dmc_probe(struct platform_device *pdev)
  1212. {
  1213. int ret = 0;
  1214. struct device *dev = &pdev->dev;
  1215. struct device_node *np = dev->of_node;
  1216. struct exynos5_dmc *dmc;
  1217. int irq[2];
  1218. dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
  1219. if (!dmc)
  1220. return -ENOMEM;
  1221. mutex_init(&dmc->lock);
  1222. dmc->dev = dev;
  1223. platform_set_drvdata(pdev, dmc);
  1224. dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0);
  1225. if (IS_ERR(dmc->base_drexi0))
  1226. return PTR_ERR(dmc->base_drexi0);
  1227. dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1);
  1228. if (IS_ERR(dmc->base_drexi1))
  1229. return PTR_ERR(dmc->base_drexi1);
  1230. dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
  1231. "samsung,syscon-clk");
  1232. if (IS_ERR(dmc->clk_regmap))
  1233. return PTR_ERR(dmc->clk_regmap);
  1234. ret = exynos5_init_freq_table(dmc, &exynos5_dmc_df_profile);
  1235. if (ret) {
  1236. dev_warn(dev, "couldn't initialize frequency settings\n");
  1237. return ret;
  1238. }
  1239. dmc->vdd_mif = devm_regulator_get(dev, "vdd");
  1240. if (IS_ERR(dmc->vdd_mif)) {
  1241. ret = PTR_ERR(dmc->vdd_mif);
  1242. return ret;
  1243. }
  1244. ret = exynos5_dmc_init_clks(dmc);
  1245. if (ret)
  1246. return ret;
  1247. ret = of_get_dram_timings(dmc);
  1248. if (ret) {
  1249. dev_warn(dev, "couldn't initialize timings settings\n");
  1250. goto remove_clocks;
  1251. }
  1252. ret = exynos5_dmc_set_pause_on_switching(dmc);
  1253. if (ret) {
  1254. dev_warn(dev, "couldn't get access to PAUSE register\n");
  1255. goto remove_clocks;
  1256. }
  1257. /* There is two modes in which the driver works: polling or IRQ */
  1258. irq[0] = platform_get_irq_byname(pdev, "drex_0");
  1259. irq[1] = platform_get_irq_byname(pdev, "drex_1");
  1260. if (irq[0] > 0 && irq[1] > 0 && irqmode) {
  1261. ret = devm_request_threaded_irq(dev, irq[0], NULL,
  1262. dmc_irq_thread, IRQF_ONESHOT,
  1263. dev_name(dev), dmc);
  1264. if (ret) {
  1265. dev_err(dev, "couldn't grab IRQ\n");
  1266. goto remove_clocks;
  1267. }
  1268. ret = devm_request_threaded_irq(dev, irq[1], NULL,
  1269. dmc_irq_thread, IRQF_ONESHOT,
  1270. dev_name(dev), dmc);
  1271. if (ret) {
  1272. dev_err(dev, "couldn't grab IRQ\n");
  1273. goto remove_clocks;
  1274. }
  1275. /*
  1276. * Setup default thresholds for the devfreq governor.
  1277. * The values are chosen based on experiments.
  1278. */
  1279. dmc->gov_data.upthreshold = 55;
  1280. dmc->gov_data.downdifferential = 5;
  1281. exynos5_dmc_enable_perf_events(dmc);
  1282. dmc->in_irq_mode = 1;
  1283. } else {
  1284. ret = exynos5_performance_counters_init(dmc);
  1285. if (ret) {
  1286. dev_warn(dev, "couldn't probe performance counters\n");
  1287. goto remove_clocks;
  1288. }
  1289. /*
  1290. * Setup default thresholds for the devfreq governor.
  1291. * The values are chosen based on experiments.
  1292. */
  1293. dmc->gov_data.upthreshold = 10;
  1294. dmc->gov_data.downdifferential = 5;
  1295. exynos5_dmc_df_profile.polling_ms = 100;
  1296. }
  1297. dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
  1298. DEVFREQ_GOV_SIMPLE_ONDEMAND,
  1299. &dmc->gov_data);
  1300. if (IS_ERR(dmc->df)) {
  1301. ret = PTR_ERR(dmc->df);
  1302. goto err_devfreq_add;
  1303. }
  1304. if (dmc->in_irq_mode)
  1305. exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
  1306. dev_info(dev, "DMC initialized, in irq mode: %d\n", dmc->in_irq_mode);
  1307. return 0;
  1308. err_devfreq_add:
  1309. if (dmc->in_irq_mode)
  1310. exynos5_dmc_disable_perf_events(dmc);
  1311. else
  1312. exynos5_counters_disable_edev(dmc);
  1313. remove_clocks:
  1314. clk_disable_unprepare(dmc->mout_bpll);
  1315. clk_disable_unprepare(dmc->fout_bpll);
  1316. return ret;
  1317. }
  1318. /**
  1319. * exynos5_dmc_remove() - Remove function for the platform device
  1320. * @pdev: platform device which is going to be removed
  1321. *
  1322. * The function relies on 'devm' framework function which automatically
  1323. * clean the device's resources. It just calls explicitly disable function for
  1324. * the performance counters.
  1325. */
  1326. static void exynos5_dmc_remove(struct platform_device *pdev)
  1327. {
  1328. struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev);
  1329. if (dmc->in_irq_mode)
  1330. exynos5_dmc_disable_perf_events(dmc);
  1331. else
  1332. exynos5_counters_disable_edev(dmc);
  1333. clk_disable_unprepare(dmc->mout_bpll);
  1334. clk_disable_unprepare(dmc->fout_bpll);
  1335. }
  1336. static const struct of_device_id exynos5_dmc_of_match[] = {
  1337. { .compatible = "samsung,exynos5422-dmc", },
  1338. { },
  1339. };
  1340. MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match);
  1341. static struct platform_driver exynos5_dmc_platdrv = {
  1342. .probe = exynos5_dmc_probe,
  1343. .remove_new = exynos5_dmc_remove,
  1344. .driver = {
  1345. .name = "exynos5-dmc",
  1346. .of_match_table = exynos5_dmc_of_match,
  1347. },
  1348. };
  1349. module_platform_driver(exynos5_dmc_platdrv);
  1350. MODULE_DESCRIPTION("Driver for Exynos5422 Dynamic Memory Controller dynamic frequency and voltage change");
  1351. MODULE_LICENSE("GPL v2");
  1352. MODULE_AUTHOR("Lukasz Luba");