tegra20-emc.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Tegra20 External Memory Controller driver
  4. *
  5. * Author: Dmitry Osipenko <digetx@gmail.com>
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/clk.h>
  9. #include <linux/clk/tegra.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/devfreq.h>
  12. #include <linux/err.h>
  13. #include <linux/interconnect-provider.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/mutex.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_opp.h>
  23. #include <linux/slab.h>
  24. #include <linux/sort.h>
  25. #include <linux/types.h>
  26. #include <soc/tegra/common.h>
  27. #include <soc/tegra/fuse.h>
  28. #include "../jedec_ddr.h"
  29. #include "../of_memory.h"
  30. #include "mc.h"
  31. #define EMC_INTSTATUS 0x000
  32. #define EMC_INTMASK 0x004
  33. #define EMC_DBG 0x008
  34. #define EMC_ADR_CFG_0 0x010
  35. #define EMC_TIMING_CONTROL 0x028
  36. #define EMC_RC 0x02c
  37. #define EMC_RFC 0x030
  38. #define EMC_RAS 0x034
  39. #define EMC_RP 0x038
  40. #define EMC_R2W 0x03c
  41. #define EMC_W2R 0x040
  42. #define EMC_R2P 0x044
  43. #define EMC_W2P 0x048
  44. #define EMC_RD_RCD 0x04c
  45. #define EMC_WR_RCD 0x050
  46. #define EMC_RRD 0x054
  47. #define EMC_REXT 0x058
  48. #define EMC_WDV 0x05c
  49. #define EMC_QUSE 0x060
  50. #define EMC_QRST 0x064
  51. #define EMC_QSAFE 0x068
  52. #define EMC_RDV 0x06c
  53. #define EMC_REFRESH 0x070
  54. #define EMC_BURST_REFRESH_NUM 0x074
  55. #define EMC_PDEX2WR 0x078
  56. #define EMC_PDEX2RD 0x07c
  57. #define EMC_PCHG2PDEN 0x080
  58. #define EMC_ACT2PDEN 0x084
  59. #define EMC_AR2PDEN 0x088
  60. #define EMC_RW2PDEN 0x08c
  61. #define EMC_TXSR 0x090
  62. #define EMC_TCKE 0x094
  63. #define EMC_TFAW 0x098
  64. #define EMC_TRPAB 0x09c
  65. #define EMC_TCLKSTABLE 0x0a0
  66. #define EMC_TCLKSTOP 0x0a4
  67. #define EMC_TREFBW 0x0a8
  68. #define EMC_QUSE_EXTRA 0x0ac
  69. #define EMC_ODT_WRITE 0x0b0
  70. #define EMC_ODT_READ 0x0b4
  71. #define EMC_MRR 0x0ec
  72. #define EMC_FBIO_CFG5 0x104
  73. #define EMC_FBIO_CFG6 0x114
  74. #define EMC_STAT_CONTROL 0x160
  75. #define EMC_STAT_LLMC_CONTROL 0x178
  76. #define EMC_STAT_PWR_CLOCK_LIMIT 0x198
  77. #define EMC_STAT_PWR_CLOCKS 0x19c
  78. #define EMC_STAT_PWR_COUNT 0x1a0
  79. #define EMC_AUTO_CAL_INTERVAL 0x2a8
  80. #define EMC_CFG_2 0x2b8
  81. #define EMC_CFG_DIG_DLL 0x2bc
  82. #define EMC_DLL_XFORM_DQS 0x2c0
  83. #define EMC_DLL_XFORM_QUSE 0x2c4
  84. #define EMC_ZCAL_REF_CNT 0x2e0
  85. #define EMC_ZCAL_WAIT_CNT 0x2e4
  86. #define EMC_CFG_CLKTRIM_0 0x2d0
  87. #define EMC_CFG_CLKTRIM_1 0x2d4
  88. #define EMC_CFG_CLKTRIM_2 0x2d8
  89. #define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
  90. #define EMC_CLKCHANGE_PD_ENABLE BIT(1)
  91. #define EMC_CLKCHANGE_SR_ENABLE BIT(2)
  92. #define EMC_TIMING_UPDATE BIT(0)
  93. #define EMC_REFRESH_OVERFLOW_INT BIT(3)
  94. #define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
  95. #define EMC_MRR_DIVLD_INT BIT(5)
  96. #define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
  97. #define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
  98. #define EMC_DBG_FORCE_UPDATE BIT(2)
  99. #define EMC_DBG_READ_DQM_CTRL BIT(9)
  100. #define EMC_DBG_CFG_PRIORITY BIT(24)
  101. #define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
  102. #define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
  103. #define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
  104. #define EMC_MRR_MRR_MA GENMASK(23, 16)
  105. #define EMC_MRR_MRR_DATA GENMASK(15, 0)
  106. #define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
  107. #define EMC_PWR_GATHER_CLEAR (1 << 8)
  108. #define EMC_PWR_GATHER_DISABLE (2 << 8)
  109. #define EMC_PWR_GATHER_ENABLE (3 << 8)
  110. enum emc_dram_type {
  111. DRAM_TYPE_RESERVED,
  112. DRAM_TYPE_DDR1,
  113. DRAM_TYPE_LPDDR2,
  114. DRAM_TYPE_DDR2,
  115. };
  116. static const u16 emc_timing_registers[] = {
  117. EMC_RC,
  118. EMC_RFC,
  119. EMC_RAS,
  120. EMC_RP,
  121. EMC_R2W,
  122. EMC_W2R,
  123. EMC_R2P,
  124. EMC_W2P,
  125. EMC_RD_RCD,
  126. EMC_WR_RCD,
  127. EMC_RRD,
  128. EMC_REXT,
  129. EMC_WDV,
  130. EMC_QUSE,
  131. EMC_QRST,
  132. EMC_QSAFE,
  133. EMC_RDV,
  134. EMC_REFRESH,
  135. EMC_BURST_REFRESH_NUM,
  136. EMC_PDEX2WR,
  137. EMC_PDEX2RD,
  138. EMC_PCHG2PDEN,
  139. EMC_ACT2PDEN,
  140. EMC_AR2PDEN,
  141. EMC_RW2PDEN,
  142. EMC_TXSR,
  143. EMC_TCKE,
  144. EMC_TFAW,
  145. EMC_TRPAB,
  146. EMC_TCLKSTABLE,
  147. EMC_TCLKSTOP,
  148. EMC_TREFBW,
  149. EMC_QUSE_EXTRA,
  150. EMC_FBIO_CFG6,
  151. EMC_ODT_WRITE,
  152. EMC_ODT_READ,
  153. EMC_FBIO_CFG5,
  154. EMC_CFG_DIG_DLL,
  155. EMC_DLL_XFORM_DQS,
  156. EMC_DLL_XFORM_QUSE,
  157. EMC_ZCAL_REF_CNT,
  158. EMC_ZCAL_WAIT_CNT,
  159. EMC_AUTO_CAL_INTERVAL,
  160. EMC_CFG_CLKTRIM_0,
  161. EMC_CFG_CLKTRIM_1,
  162. EMC_CFG_CLKTRIM_2,
  163. };
  164. struct emc_timing {
  165. unsigned long rate;
  166. u32 data[ARRAY_SIZE(emc_timing_registers)];
  167. };
  168. enum emc_rate_request_type {
  169. EMC_RATE_DEVFREQ,
  170. EMC_RATE_DEBUG,
  171. EMC_RATE_ICC,
  172. EMC_RATE_TYPE_MAX,
  173. };
  174. struct emc_rate_request {
  175. unsigned long min_rate;
  176. unsigned long max_rate;
  177. };
  178. struct tegra_emc {
  179. struct device *dev;
  180. struct tegra_mc *mc;
  181. struct icc_provider provider;
  182. struct notifier_block clk_nb;
  183. struct clk *clk;
  184. void __iomem *regs;
  185. unsigned int dram_bus_width;
  186. struct emc_timing *timings;
  187. unsigned int num_timings;
  188. struct {
  189. struct dentry *root;
  190. unsigned long min_rate;
  191. unsigned long max_rate;
  192. } debugfs;
  193. /*
  194. * There are multiple sources in the EMC driver which could request
  195. * a min/max clock rate, these rates are contained in this array.
  196. */
  197. struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
  198. /* protect shared rate-change code path */
  199. struct mutex rate_lock;
  200. struct devfreq_simple_ondemand_data ondemand_data;
  201. /* memory chip identity information */
  202. union lpddr2_basic_config4 basic_conf4;
  203. unsigned int manufacturer_id;
  204. unsigned int revision_id1;
  205. unsigned int revision_id2;
  206. bool mrr_error;
  207. };
  208. static irqreturn_t tegra_emc_isr(int irq, void *data)
  209. {
  210. struct tegra_emc *emc = data;
  211. u32 intmask = EMC_REFRESH_OVERFLOW_INT;
  212. u32 status;
  213. status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
  214. if (!status)
  215. return IRQ_NONE;
  216. /* notify about HW problem */
  217. if (status & EMC_REFRESH_OVERFLOW_INT)
  218. dev_err_ratelimited(emc->dev,
  219. "refresh request overflow timeout\n");
  220. /* clear interrupts */
  221. writel_relaxed(status, emc->regs + EMC_INTSTATUS);
  222. return IRQ_HANDLED;
  223. }
  224. static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
  225. unsigned long rate)
  226. {
  227. struct emc_timing *timing = NULL;
  228. unsigned int i;
  229. for (i = 0; i < emc->num_timings; i++) {
  230. if (emc->timings[i].rate >= rate) {
  231. timing = &emc->timings[i];
  232. break;
  233. }
  234. }
  235. if (!timing) {
  236. dev_err(emc->dev, "no timing for rate %lu\n", rate);
  237. return NULL;
  238. }
  239. return timing;
  240. }
  241. static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
  242. {
  243. struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
  244. unsigned int i;
  245. if (!timing)
  246. return -EINVAL;
  247. dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
  248. __func__, timing->rate, rate);
  249. /* program shadow registers */
  250. for (i = 0; i < ARRAY_SIZE(timing->data); i++)
  251. writel_relaxed(timing->data[i],
  252. emc->regs + emc_timing_registers[i]);
  253. /* wait until programming has settled */
  254. readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
  255. return 0;
  256. }
  257. static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
  258. {
  259. int err;
  260. u32 v;
  261. dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
  262. if (flush) {
  263. /* manually initiate memory timing update */
  264. writel_relaxed(EMC_TIMING_UPDATE,
  265. emc->regs + EMC_TIMING_CONTROL);
  266. return 0;
  267. }
  268. err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
  269. v & EMC_CLKCHANGE_COMPLETE_INT,
  270. 1, 100);
  271. if (err) {
  272. dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
  273. return err;
  274. }
  275. return 0;
  276. }
  277. static int tegra_emc_clk_change_notify(struct notifier_block *nb,
  278. unsigned long msg, void *data)
  279. {
  280. struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
  281. struct clk_notifier_data *cnd = data;
  282. int err;
  283. switch (msg) {
  284. case PRE_RATE_CHANGE:
  285. err = emc_prepare_timing_change(emc, cnd->new_rate);
  286. break;
  287. case ABORT_RATE_CHANGE:
  288. err = emc_prepare_timing_change(emc, cnd->old_rate);
  289. if (err)
  290. break;
  291. err = emc_complete_timing_change(emc, true);
  292. break;
  293. case POST_RATE_CHANGE:
  294. err = emc_complete_timing_change(emc, false);
  295. break;
  296. default:
  297. return NOTIFY_DONE;
  298. }
  299. return notifier_from_errno(err);
  300. }
  301. static int load_one_timing_from_dt(struct tegra_emc *emc,
  302. struct emc_timing *timing,
  303. struct device_node *node)
  304. {
  305. u32 rate;
  306. int err;
  307. if (!of_device_is_compatible(node, "nvidia,tegra20-emc-table")) {
  308. dev_err(emc->dev, "incompatible DT node: %pOF\n", node);
  309. return -EINVAL;
  310. }
  311. err = of_property_read_u32(node, "clock-frequency", &rate);
  312. if (err) {
  313. dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
  314. node, err);
  315. return err;
  316. }
  317. err = of_property_read_u32_array(node, "nvidia,emc-registers",
  318. timing->data,
  319. ARRAY_SIZE(emc_timing_registers));
  320. if (err) {
  321. dev_err(emc->dev,
  322. "timing %pOF: failed to read emc timing data: %d\n",
  323. node, err);
  324. return err;
  325. }
  326. /*
  327. * The EMC clock rate is twice the bus rate, and the bus rate is
  328. * measured in kHz.
  329. */
  330. timing->rate = rate * 2 * 1000;
  331. dev_dbg(emc->dev, "%s: %pOF: EMC rate %lu\n",
  332. __func__, node, timing->rate);
  333. return 0;
  334. }
  335. static int cmp_timings(const void *_a, const void *_b)
  336. {
  337. const struct emc_timing *a = _a;
  338. const struct emc_timing *b = _b;
  339. if (a->rate < b->rate)
  340. return -1;
  341. if (a->rate > b->rate)
  342. return 1;
  343. return 0;
  344. }
  345. static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
  346. struct device_node *node)
  347. {
  348. struct emc_timing *timing;
  349. int child_count;
  350. int err;
  351. child_count = of_get_child_count(node);
  352. if (!child_count) {
  353. dev_err(emc->dev, "no memory timings in DT node: %pOF\n", node);
  354. return -EINVAL;
  355. }
  356. emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
  357. GFP_KERNEL);
  358. if (!emc->timings)
  359. return -ENOMEM;
  360. timing = emc->timings;
  361. for_each_child_of_node_scoped(node, child) {
  362. if (of_node_name_eq(child, "lpddr2"))
  363. continue;
  364. err = load_one_timing_from_dt(emc, timing++, child);
  365. if (err)
  366. return err;
  367. emc->num_timings++;
  368. }
  369. sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
  370. NULL);
  371. dev_info_once(emc->dev,
  372. "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
  373. emc->num_timings,
  374. tegra_read_ram_code(),
  375. emc->timings[0].rate / 1000000,
  376. emc->timings[emc->num_timings - 1].rate / 1000000);
  377. return 0;
  378. }
  379. static struct device_node *
  380. tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
  381. {
  382. struct device *dev = emc->dev;
  383. struct device_node *np;
  384. u32 value, ram_code;
  385. int err;
  386. if (emc->mrr_error) {
  387. dev_warn(dev, "memory timings skipped due to MRR error\n");
  388. return NULL;
  389. }
  390. if (of_get_child_count(dev->of_node) == 0) {
  391. dev_info_once(dev, "device-tree doesn't have memory timings\n");
  392. return NULL;
  393. }
  394. if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code"))
  395. return of_node_get(dev->of_node);
  396. ram_code = tegra_read_ram_code();
  397. for_each_child_of_node(dev->of_node, np) {
  398. if (!of_node_name_eq(np, "emc-tables"))
  399. continue;
  400. err = of_property_read_u32(np, "nvidia,ram-code", &value);
  401. if (err || value != ram_code) {
  402. struct device_node *lpddr2_np;
  403. bool cfg_mismatches = false;
  404. lpddr2_np = of_get_child_by_name(np, "lpddr2");
  405. if (lpddr2_np) {
  406. const struct lpddr2_info *info;
  407. info = of_lpddr2_get_info(lpddr2_np, dev);
  408. if (info) {
  409. if (info->manufacturer_id >= 0 &&
  410. info->manufacturer_id != emc->manufacturer_id)
  411. cfg_mismatches = true;
  412. if (info->revision_id1 >= 0 &&
  413. info->revision_id1 != emc->revision_id1)
  414. cfg_mismatches = true;
  415. if (info->revision_id2 >= 0 &&
  416. info->revision_id2 != emc->revision_id2)
  417. cfg_mismatches = true;
  418. if (info->density != emc->basic_conf4.density)
  419. cfg_mismatches = true;
  420. if (info->io_width != emc->basic_conf4.io_width)
  421. cfg_mismatches = true;
  422. if (info->arch_type != emc->basic_conf4.arch_type)
  423. cfg_mismatches = true;
  424. } else {
  425. dev_err(dev, "failed to parse %pOF\n", lpddr2_np);
  426. cfg_mismatches = true;
  427. }
  428. of_node_put(lpddr2_np);
  429. } else {
  430. cfg_mismatches = true;
  431. }
  432. if (cfg_mismatches) {
  433. continue;
  434. }
  435. }
  436. return np;
  437. }
  438. dev_err(dev, "no memory timings for RAM code %u found in device tree\n",
  439. ram_code);
  440. return NULL;
  441. }
  442. static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
  443. unsigned int emem_dev,
  444. unsigned int register_addr,
  445. unsigned int *register_data)
  446. {
  447. u32 memory_dev = emem_dev ? 1 : 2;
  448. u32 val, mr_mask = 0xff;
  449. int err;
  450. /* clear data-valid interrupt status */
  451. writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
  452. /* issue mode register read request */
  453. val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
  454. val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
  455. writel_relaxed(val, emc->regs + EMC_MRR);
  456. /* wait for the LPDDR2 data-valid interrupt */
  457. err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
  458. val & EMC_MRR_DIVLD_INT,
  459. 1, 100);
  460. if (err) {
  461. dev_err(emc->dev, "mode register %u read failed: %d\n",
  462. register_addr, err);
  463. emc->mrr_error = true;
  464. return err;
  465. }
  466. /* read out mode register data */
  467. val = readl_relaxed(emc->regs + EMC_MRR);
  468. *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
  469. return 0;
  470. }
  471. static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
  472. unsigned int emem_dev,
  473. bool print_out)
  474. {
  475. /* these registers are standard for all LPDDR JEDEC memory chips */
  476. emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id);
  477. emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1);
  478. emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2);
  479. emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value);
  480. if (!print_out)
  481. return;
  482. dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
  483. emem_dev, emc->manufacturer_id,
  484. lpddr2_jedec_manufacturer(emc->manufacturer_id),
  485. emc->revision_id1, emc->revision_id2,
  486. 4 >> emc->basic_conf4.arch_type,
  487. 64 << emc->basic_conf4.density,
  488. 32 >> emc->basic_conf4.io_width);
  489. }
  490. static int emc_setup_hw(struct tegra_emc *emc)
  491. {
  492. u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg;
  493. u32 intmask = EMC_REFRESH_OVERFLOW_INT;
  494. static bool print_sdram_info_once;
  495. enum emc_dram_type dram_type;
  496. const char *dram_type_str;
  497. unsigned int emem_numdev;
  498. emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
  499. /*
  500. * Depending on a memory type, DRAM should enter either self-refresh
  501. * or power-down state on EMC clock change.
  502. */
  503. if (!(emc_cfg & EMC_CLKCHANGE_PD_ENABLE) &&
  504. !(emc_cfg & EMC_CLKCHANGE_SR_ENABLE)) {
  505. dev_err(emc->dev,
  506. "bootloader didn't specify DRAM auto-suspend mode\n");
  507. return -EINVAL;
  508. }
  509. /* enable EMC and CAR to handshake on PLL divider/source changes */
  510. emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
  511. writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
  512. /* initialize interrupt */
  513. writel_relaxed(intmask, emc->regs + EMC_INTMASK);
  514. writel_relaxed(intmask, emc->regs + EMC_INTSTATUS);
  515. /* ensure that unwanted debug features are disabled */
  516. emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
  517. emc_dbg |= EMC_DBG_CFG_PRIORITY;
  518. emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
  519. emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
  520. emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
  521. writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
  522. emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
  523. if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16)
  524. emc->dram_bus_width = 16;
  525. else
  526. emc->dram_bus_width = 32;
  527. dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio);
  528. switch (dram_type) {
  529. case DRAM_TYPE_RESERVED:
  530. dram_type_str = "INVALID";
  531. break;
  532. case DRAM_TYPE_DDR1:
  533. dram_type_str = "DDR1";
  534. break;
  535. case DRAM_TYPE_LPDDR2:
  536. dram_type_str = "LPDDR2";
  537. break;
  538. case DRAM_TYPE_DDR2:
  539. dram_type_str = "DDR2";
  540. break;
  541. }
  542. emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0);
  543. emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1;
  544. dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n",
  545. emc->dram_bus_width, emem_numdev, dram_type_str,
  546. emem_numdev == 2 ? "devices" : "device");
  547. if (dram_type == DRAM_TYPE_LPDDR2) {
  548. while (emem_numdev--)
  549. emc_read_lpddr_sdram_info(emc, emem_numdev,
  550. !print_sdram_info_once);
  551. print_sdram_info_once = true;
  552. }
  553. return 0;
  554. }
  555. static long emc_round_rate(unsigned long rate,
  556. unsigned long min_rate,
  557. unsigned long max_rate,
  558. void *arg)
  559. {
  560. struct emc_timing *timing = NULL;
  561. struct tegra_emc *emc = arg;
  562. unsigned int i;
  563. if (!emc->num_timings)
  564. return clk_get_rate(emc->clk);
  565. min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
  566. for (i = 0; i < emc->num_timings; i++) {
  567. if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
  568. continue;
  569. if (emc->timings[i].rate > max_rate) {
  570. i = max(i, 1u) - 1;
  571. if (emc->timings[i].rate < min_rate)
  572. break;
  573. }
  574. if (emc->timings[i].rate < min_rate)
  575. continue;
  576. timing = &emc->timings[i];
  577. break;
  578. }
  579. if (!timing) {
  580. dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
  581. rate, min_rate, max_rate);
  582. return -EINVAL;
  583. }
  584. return timing->rate;
  585. }
  586. static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
  587. {
  588. unsigned int i;
  589. for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
  590. emc->requested_rate[i].min_rate = 0;
  591. emc->requested_rate[i].max_rate = ULONG_MAX;
  592. }
  593. }
  594. static int emc_request_rate(struct tegra_emc *emc,
  595. unsigned long new_min_rate,
  596. unsigned long new_max_rate,
  597. enum emc_rate_request_type type)
  598. {
  599. struct emc_rate_request *req = emc->requested_rate;
  600. unsigned long min_rate = 0, max_rate = ULONG_MAX;
  601. unsigned int i;
  602. int err;
  603. /* select minimum and maximum rates among the requested rates */
  604. for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
  605. if (i == type) {
  606. min_rate = max(new_min_rate, min_rate);
  607. max_rate = min(new_max_rate, max_rate);
  608. } else {
  609. min_rate = max(req->min_rate, min_rate);
  610. max_rate = min(req->max_rate, max_rate);
  611. }
  612. }
  613. if (min_rate > max_rate) {
  614. dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
  615. __func__, type, min_rate, max_rate);
  616. return -ERANGE;
  617. }
  618. /*
  619. * EMC rate-changes should go via OPP API because it manages voltage
  620. * changes.
  621. */
  622. err = dev_pm_opp_set_rate(emc->dev, min_rate);
  623. if (err)
  624. return err;
  625. emc->requested_rate[type].min_rate = new_min_rate;
  626. emc->requested_rate[type].max_rate = new_max_rate;
  627. return 0;
  628. }
  629. static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
  630. enum emc_rate_request_type type)
  631. {
  632. struct emc_rate_request *req = &emc->requested_rate[type];
  633. int ret;
  634. mutex_lock(&emc->rate_lock);
  635. ret = emc_request_rate(emc, rate, req->max_rate, type);
  636. mutex_unlock(&emc->rate_lock);
  637. return ret;
  638. }
  639. static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
  640. enum emc_rate_request_type type)
  641. {
  642. struct emc_rate_request *req = &emc->requested_rate[type];
  643. int ret;
  644. mutex_lock(&emc->rate_lock);
  645. ret = emc_request_rate(emc, req->min_rate, rate, type);
  646. mutex_unlock(&emc->rate_lock);
  647. return ret;
  648. }
  649. /*
  650. * debugfs interface
  651. *
  652. * The memory controller driver exposes some files in debugfs that can be used
  653. * to control the EMC frequency. The top-level directory can be found here:
  654. *
  655. * /sys/kernel/debug/emc
  656. *
  657. * It contains the following files:
  658. *
  659. * - available_rates: This file contains a list of valid, space-separated
  660. * EMC frequencies.
  661. *
  662. * - min_rate: Writing a value to this file sets the given frequency as the
  663. * floor of the permitted range. If this is higher than the currently
  664. * configured EMC frequency, this will cause the frequency to be
  665. * increased so that it stays within the valid range.
  666. *
  667. * - max_rate: Similarily to the min_rate file, writing a value to this file
  668. * sets the given frequency as the ceiling of the permitted range. If
  669. * the value is lower than the currently configured EMC frequency, this
  670. * will cause the frequency to be decreased so that it stays within the
  671. * valid range.
  672. */
  673. static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
  674. {
  675. unsigned int i;
  676. for (i = 0; i < emc->num_timings; i++)
  677. if (rate == emc->timings[i].rate)
  678. return true;
  679. return false;
  680. }
  681. static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
  682. {
  683. struct tegra_emc *emc = s->private;
  684. const char *prefix = "";
  685. unsigned int i;
  686. for (i = 0; i < emc->num_timings; i++) {
  687. seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
  688. prefix = " ";
  689. }
  690. seq_puts(s, "\n");
  691. return 0;
  692. }
  693. DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
  694. static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
  695. {
  696. struct tegra_emc *emc = data;
  697. *rate = emc->debugfs.min_rate;
  698. return 0;
  699. }
  700. static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
  701. {
  702. struct tegra_emc *emc = data;
  703. int err;
  704. if (!tegra_emc_validate_rate(emc, rate))
  705. return -EINVAL;
  706. err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
  707. if (err < 0)
  708. return err;
  709. emc->debugfs.min_rate = rate;
  710. return 0;
  711. }
  712. DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
  713. tegra_emc_debug_min_rate_get,
  714. tegra_emc_debug_min_rate_set, "%llu\n");
  715. static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
  716. {
  717. struct tegra_emc *emc = data;
  718. *rate = emc->debugfs.max_rate;
  719. return 0;
  720. }
  721. static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
  722. {
  723. struct tegra_emc *emc = data;
  724. int err;
  725. if (!tegra_emc_validate_rate(emc, rate))
  726. return -EINVAL;
  727. err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
  728. if (err < 0)
  729. return err;
  730. emc->debugfs.max_rate = rate;
  731. return 0;
  732. }
  733. DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
  734. tegra_emc_debug_max_rate_get,
  735. tegra_emc_debug_max_rate_set, "%llu\n");
  736. static void tegra_emc_debugfs_init(struct tegra_emc *emc)
  737. {
  738. struct device *dev = emc->dev;
  739. unsigned int i;
  740. int err;
  741. emc->debugfs.min_rate = ULONG_MAX;
  742. emc->debugfs.max_rate = 0;
  743. for (i = 0; i < emc->num_timings; i++) {
  744. if (emc->timings[i].rate < emc->debugfs.min_rate)
  745. emc->debugfs.min_rate = emc->timings[i].rate;
  746. if (emc->timings[i].rate > emc->debugfs.max_rate)
  747. emc->debugfs.max_rate = emc->timings[i].rate;
  748. }
  749. if (!emc->num_timings) {
  750. emc->debugfs.min_rate = clk_get_rate(emc->clk);
  751. emc->debugfs.max_rate = emc->debugfs.min_rate;
  752. }
  753. err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
  754. emc->debugfs.max_rate);
  755. if (err < 0) {
  756. dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
  757. emc->debugfs.min_rate, emc->debugfs.max_rate,
  758. emc->clk);
  759. }
  760. emc->debugfs.root = debugfs_create_dir("emc", NULL);
  761. debugfs_create_file("available_rates", 0444, emc->debugfs.root,
  762. emc, &tegra_emc_debug_available_rates_fops);
  763. debugfs_create_file("min_rate", 0644, emc->debugfs.root,
  764. emc, &tegra_emc_debug_min_rate_fops);
  765. debugfs_create_file("max_rate", 0644, emc->debugfs.root,
  766. emc, &tegra_emc_debug_max_rate_fops);
  767. }
  768. static inline struct tegra_emc *
  769. to_tegra_emc_provider(struct icc_provider *provider)
  770. {
  771. return container_of(provider, struct tegra_emc, provider);
  772. }
  773. static struct icc_node_data *
  774. emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
  775. {
  776. struct icc_provider *provider = data;
  777. struct icc_node_data *ndata;
  778. struct icc_node *node;
  779. /* External Memory is the only possible ICC route */
  780. list_for_each_entry(node, &provider->nodes, node_list) {
  781. if (node->id != TEGRA_ICC_EMEM)
  782. continue;
  783. ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
  784. if (!ndata)
  785. return ERR_PTR(-ENOMEM);
  786. /*
  787. * SRC and DST nodes should have matching TAG in order to have
  788. * it set by default for a requested path.
  789. */
  790. ndata->tag = TEGRA_MC_ICC_TAG_ISO;
  791. ndata->node = node;
  792. return ndata;
  793. }
  794. return ERR_PTR(-EPROBE_DEFER);
  795. }
  796. static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
  797. {
  798. struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
  799. unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
  800. unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
  801. unsigned long long rate = max(avg_bw, peak_bw);
  802. unsigned int dram_data_bus_width_bytes;
  803. int err;
  804. /*
  805. * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
  806. * is sampled on both clock edges. This means that EMC clock rate
  807. * equals to the peak data-rate.
  808. */
  809. dram_data_bus_width_bytes = emc->dram_bus_width / 8;
  810. do_div(rate, dram_data_bus_width_bytes);
  811. rate = min_t(u64, rate, U32_MAX);
  812. err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
  813. if (err)
  814. return err;
  815. return 0;
  816. }
  817. static int tegra_emc_interconnect_init(struct tegra_emc *emc)
  818. {
  819. const struct tegra_mc_soc *soc;
  820. struct icc_node *node;
  821. int err;
  822. emc->mc = devm_tegra_memory_controller_get(emc->dev);
  823. if (IS_ERR(emc->mc))
  824. return PTR_ERR(emc->mc);
  825. soc = emc->mc->soc;
  826. emc->provider.dev = emc->dev;
  827. emc->provider.set = emc_icc_set;
  828. emc->provider.data = &emc->provider;
  829. emc->provider.aggregate = soc->icc_ops->aggregate;
  830. emc->provider.xlate_extended = emc_of_icc_xlate_extended;
  831. icc_provider_init(&emc->provider);
  832. /* create External Memory Controller node */
  833. node = icc_node_create(TEGRA_ICC_EMC);
  834. if (IS_ERR(node)) {
  835. err = PTR_ERR(node);
  836. goto err_msg;
  837. }
  838. node->name = "External Memory Controller";
  839. icc_node_add(node, &emc->provider);
  840. /* link External Memory Controller to External Memory (DRAM) */
  841. err = icc_link_create(node, TEGRA_ICC_EMEM);
  842. if (err)
  843. goto remove_nodes;
  844. /* create External Memory node */
  845. node = icc_node_create(TEGRA_ICC_EMEM);
  846. if (IS_ERR(node)) {
  847. err = PTR_ERR(node);
  848. goto remove_nodes;
  849. }
  850. node->name = "External Memory (DRAM)";
  851. icc_node_add(node, &emc->provider);
  852. err = icc_provider_register(&emc->provider);
  853. if (err)
  854. goto remove_nodes;
  855. return 0;
  856. remove_nodes:
  857. icc_nodes_remove(&emc->provider);
  858. err_msg:
  859. dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
  860. return err;
  861. }
  862. static void devm_tegra_emc_unset_callback(void *data)
  863. {
  864. tegra20_clk_set_emc_round_callback(NULL, NULL);
  865. }
  866. static void devm_tegra_emc_unreg_clk_notifier(void *data)
  867. {
  868. struct tegra_emc *emc = data;
  869. clk_notifier_unregister(emc->clk, &emc->clk_nb);
  870. }
  871. static int tegra_emc_init_clk(struct tegra_emc *emc)
  872. {
  873. int err;
  874. tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
  875. err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
  876. NULL);
  877. if (err)
  878. return err;
  879. emc->clk = devm_clk_get(emc->dev, NULL);
  880. if (IS_ERR(emc->clk)) {
  881. dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
  882. return PTR_ERR(emc->clk);
  883. }
  884. err = clk_notifier_register(emc->clk, &emc->clk_nb);
  885. if (err) {
  886. dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
  887. return err;
  888. }
  889. err = devm_add_action_or_reset(emc->dev,
  890. devm_tegra_emc_unreg_clk_notifier, emc);
  891. if (err)
  892. return err;
  893. return 0;
  894. }
  895. static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
  896. u32 flags)
  897. {
  898. struct tegra_emc *emc = dev_get_drvdata(dev);
  899. struct dev_pm_opp *opp;
  900. unsigned long rate;
  901. opp = devfreq_recommended_opp(dev, freq, flags);
  902. if (IS_ERR(opp)) {
  903. dev_err(dev, "failed to find opp for %lu Hz\n", *freq);
  904. return PTR_ERR(opp);
  905. }
  906. rate = dev_pm_opp_get_freq(opp);
  907. dev_pm_opp_put(opp);
  908. return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
  909. }
  910. static int tegra_emc_devfreq_get_dev_status(struct device *dev,
  911. struct devfreq_dev_status *stat)
  912. {
  913. struct tegra_emc *emc = dev_get_drvdata(dev);
  914. /* freeze counters */
  915. writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL);
  916. /*
  917. * busy_time: number of clocks EMC request was accepted
  918. * total_time: number of clocks PWR_GATHER control was set to ENABLE
  919. */
  920. stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT);
  921. stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS);
  922. stat->current_frequency = clk_get_rate(emc->clk);
  923. /* clear counters and restart */
  924. writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL);
  925. writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL);
  926. return 0;
  927. }
  928. static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
  929. .polling_ms = 30,
  930. .target = tegra_emc_devfreq_target,
  931. .get_dev_status = tegra_emc_devfreq_get_dev_status,
  932. };
  933. static int tegra_emc_devfreq_init(struct tegra_emc *emc)
  934. {
  935. struct devfreq *devfreq;
  936. /*
  937. * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
  938. * should be less than 50. Secondly, multiple active memory clients
  939. * may cause over 20% of lost clock cycles due to stalls caused by
  940. * competing memory accesses. This means that threshold should be
  941. * set to a less than 30 in order to have a properly working governor.
  942. */
  943. emc->ondemand_data.upthreshold = 20;
  944. /*
  945. * Reset statistic gathers state, select global bandwidth for the
  946. * statistics collection mode and set clocks counter saturation
  947. * limit to maximum.
  948. */
  949. writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL);
  950. writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
  951. writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
  952. devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
  953. DEVFREQ_GOV_SIMPLE_ONDEMAND,
  954. &emc->ondemand_data);
  955. if (IS_ERR(devfreq)) {
  956. dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
  957. return PTR_ERR(devfreq);
  958. }
  959. return 0;
  960. }
  961. static int tegra_emc_probe(struct platform_device *pdev)
  962. {
  963. struct tegra_core_opp_params opp_params = {};
  964. struct device_node *np;
  965. struct tegra_emc *emc;
  966. int irq, err;
  967. irq = platform_get_irq(pdev, 0);
  968. if (irq < 0) {
  969. dev_err(&pdev->dev, "please update your device tree\n");
  970. return irq;
  971. }
  972. emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
  973. if (!emc)
  974. return -ENOMEM;
  975. mutex_init(&emc->rate_lock);
  976. emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
  977. emc->dev = &pdev->dev;
  978. emc->regs = devm_platform_ioremap_resource(pdev, 0);
  979. if (IS_ERR(emc->regs))
  980. return PTR_ERR(emc->regs);
  981. err = emc_setup_hw(emc);
  982. if (err)
  983. return err;
  984. np = tegra_emc_find_node_by_ram_code(emc);
  985. if (np) {
  986. err = tegra_emc_load_timings_from_dt(emc, np);
  987. of_node_put(np);
  988. if (err)
  989. return err;
  990. }
  991. err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
  992. dev_name(&pdev->dev), emc);
  993. if (err) {
  994. dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
  995. return err;
  996. }
  997. err = tegra_emc_init_clk(emc);
  998. if (err)
  999. return err;
  1000. opp_params.init_state = true;
  1001. err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
  1002. if (err)
  1003. return err;
  1004. platform_set_drvdata(pdev, emc);
  1005. tegra_emc_rate_requests_init(emc);
  1006. tegra_emc_debugfs_init(emc);
  1007. tegra_emc_interconnect_init(emc);
  1008. tegra_emc_devfreq_init(emc);
  1009. /*
  1010. * Don't allow the kernel module to be unloaded. Unloading adds some
  1011. * extra complexity which doesn't really worth the effort in a case of
  1012. * this driver.
  1013. */
  1014. try_module_get(THIS_MODULE);
  1015. return 0;
  1016. }
  1017. static const struct of_device_id tegra_emc_of_match[] = {
  1018. { .compatible = "nvidia,tegra20-emc", },
  1019. {},
  1020. };
  1021. MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
  1022. static struct platform_driver tegra_emc_driver = {
  1023. .probe = tegra_emc_probe,
  1024. .driver = {
  1025. .name = "tegra20-emc",
  1026. .of_match_table = tegra_emc_of_match,
  1027. .suppress_bind_attrs = true,
  1028. .sync_state = icc_sync_state,
  1029. },
  1030. };
  1031. module_platform_driver(tegra_emc_driver);
  1032. MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
  1033. MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
  1034. MODULE_SOFTDEP("pre: governor_simpleondemand");
  1035. MODULE_LICENSE("GPL v2");