qcom-cpufreq-hw.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitfield.h>
  6. #include <linux/clk-provider.h>
  7. #include <linux/cpufreq.h>
  8. #include <linux/init.h>
  9. #include <linux/interconnect.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/of.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/pm_opp.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/units.h>
  20. #define LUT_MAX_ENTRIES 40U
  21. #define LUT_SRC GENMASK(31, 30)
  22. #define LUT_L_VAL GENMASK(7, 0)
  23. #define LUT_CORE_COUNT GENMASK(18, 16)
  24. #define LUT_VOLT GENMASK(11, 0)
  25. #define CLK_HW_DIV 2
  26. #define LUT_TURBO_IND 1
  27. #define GT_IRQ_STATUS BIT(2)
  28. #define MAX_FREQ_DOMAINS 4
  29. struct qcom_cpufreq_soc_data {
  30. u32 reg_enable;
  31. u32 reg_domain_state;
  32. u32 reg_dcvs_ctrl;
  33. u32 reg_freq_lut;
  34. u32 reg_volt_lut;
  35. u32 reg_intr_clr;
  36. u32 reg_current_vote;
  37. u32 reg_perf_state;
  38. u8 lut_row_size;
  39. };
  40. struct qcom_cpufreq_data {
  41. void __iomem *base;
  42. /*
  43. * Mutex to synchronize between de-init sequence and re-starting LMh
  44. * polling/interrupts
  45. */
  46. struct mutex throttle_lock;
  47. int throttle_irq;
  48. char irq_name[15];
  49. bool cancel_throttle;
  50. struct delayed_work throttle_work;
  51. struct cpufreq_policy *policy;
  52. struct clk_hw cpu_clk;
  53. bool per_core_dcvs;
  54. };
  55. static struct {
  56. struct qcom_cpufreq_data *data;
  57. const struct qcom_cpufreq_soc_data *soc_data;
  58. } qcom_cpufreq;
  59. static unsigned long cpu_hw_rate, xo_rate;
  60. static bool icc_scaling_enabled;
  61. static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
  62. unsigned long freq_khz)
  63. {
  64. unsigned long freq_hz = freq_khz * 1000;
  65. struct dev_pm_opp *opp;
  66. struct device *dev;
  67. int ret;
  68. dev = get_cpu_device(policy->cpu);
  69. if (!dev)
  70. return -ENODEV;
  71. opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
  72. if (IS_ERR(opp))
  73. return PTR_ERR(opp);
  74. ret = dev_pm_opp_set_opp(dev, opp);
  75. dev_pm_opp_put(opp);
  76. return ret;
  77. }
  78. static int qcom_cpufreq_update_opp(struct device *cpu_dev,
  79. unsigned long freq_khz,
  80. unsigned long volt)
  81. {
  82. unsigned long freq_hz = freq_khz * 1000;
  83. int ret;
  84. /* Skip voltage update if the opp table is not available */
  85. if (!icc_scaling_enabled)
  86. return dev_pm_opp_add(cpu_dev, freq_hz, volt);
  87. ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
  88. if (ret) {
  89. dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
  90. return ret;
  91. }
  92. return dev_pm_opp_enable(cpu_dev, freq_hz);
  93. }
  94. static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
  95. unsigned int index)
  96. {
  97. struct qcom_cpufreq_data *data = policy->driver_data;
  98. const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
  99. unsigned long freq = policy->freq_table[index].frequency;
  100. unsigned int i;
  101. writel_relaxed(index, data->base + soc_data->reg_perf_state);
  102. if (data->per_core_dcvs)
  103. for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
  104. writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
  105. if (icc_scaling_enabled)
  106. qcom_cpufreq_set_bw(policy, freq);
  107. return 0;
  108. }
  109. static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
  110. {
  111. unsigned int lval;
  112. if (qcom_cpufreq.soc_data->reg_current_vote)
  113. lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_current_vote) & 0x3ff;
  114. else
  115. lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_domain_state) & 0xff;
  116. return lval * xo_rate;
  117. }
  118. /* Get the frequency requested by the cpufreq core for the CPU */
  119. static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
  120. {
  121. struct qcom_cpufreq_data *data;
  122. const struct qcom_cpufreq_soc_data *soc_data;
  123. unsigned int index;
  124. if (!policy)
  125. return 0;
  126. data = policy->driver_data;
  127. soc_data = qcom_cpufreq.soc_data;
  128. index = readl_relaxed(data->base + soc_data->reg_perf_state);
  129. index = min(index, LUT_MAX_ENTRIES - 1);
  130. return policy->freq_table[index].frequency;
  131. }
  132. static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
  133. {
  134. struct qcom_cpufreq_data *data;
  135. if (!policy)
  136. return 0;
  137. data = policy->driver_data;
  138. if (data->throttle_irq >= 0)
  139. return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
  140. return qcom_cpufreq_get_freq(policy);
  141. }
  142. static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
  143. {
  144. return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
  145. }
  146. static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
  147. unsigned int target_freq)
  148. {
  149. struct qcom_cpufreq_data *data = policy->driver_data;
  150. const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
  151. unsigned int index;
  152. unsigned int i;
  153. index = policy->cached_resolved_idx;
  154. writel_relaxed(index, data->base + soc_data->reg_perf_state);
  155. if (data->per_core_dcvs)
  156. for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
  157. writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
  158. return policy->freq_table[index].frequency;
  159. }
  160. static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
  161. struct cpufreq_policy *policy)
  162. {
  163. u32 data, src, lval, i, core_count, prev_freq = 0, freq;
  164. u32 volt;
  165. struct cpufreq_frequency_table *table;
  166. struct dev_pm_opp *opp;
  167. unsigned long rate;
  168. int ret;
  169. struct qcom_cpufreq_data *drv_data = policy->driver_data;
  170. const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
  171. table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
  172. if (!table)
  173. return -ENOMEM;
  174. ret = dev_pm_opp_of_add_table(cpu_dev);
  175. if (!ret) {
  176. /* Disable all opps and cross-validate against LUT later */
  177. icc_scaling_enabled = true;
  178. for (rate = 0; ; rate++) {
  179. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
  180. if (IS_ERR(opp))
  181. break;
  182. dev_pm_opp_put(opp);
  183. dev_pm_opp_disable(cpu_dev, rate);
  184. }
  185. } else if (ret != -ENODEV) {
  186. dev_err(cpu_dev, "Invalid opp table in device tree\n");
  187. kfree(table);
  188. return ret;
  189. } else {
  190. policy->fast_switch_possible = true;
  191. icc_scaling_enabled = false;
  192. }
  193. for (i = 0; i < LUT_MAX_ENTRIES; i++) {
  194. data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
  195. i * soc_data->lut_row_size);
  196. src = FIELD_GET(LUT_SRC, data);
  197. lval = FIELD_GET(LUT_L_VAL, data);
  198. core_count = FIELD_GET(LUT_CORE_COUNT, data);
  199. data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
  200. i * soc_data->lut_row_size);
  201. volt = FIELD_GET(LUT_VOLT, data) * 1000;
  202. if (src)
  203. freq = xo_rate * lval / 1000;
  204. else
  205. freq = cpu_hw_rate / 1000;
  206. if (freq != prev_freq && core_count != LUT_TURBO_IND) {
  207. if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
  208. table[i].frequency = freq;
  209. dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
  210. freq, core_count);
  211. } else {
  212. dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
  213. table[i].frequency = CPUFREQ_ENTRY_INVALID;
  214. }
  215. } else if (core_count == LUT_TURBO_IND) {
  216. table[i].frequency = CPUFREQ_ENTRY_INVALID;
  217. }
  218. /*
  219. * Two of the same frequencies with the same core counts means
  220. * end of table
  221. */
  222. if (i > 0 && prev_freq == freq) {
  223. struct cpufreq_frequency_table *prev = &table[i - 1];
  224. /*
  225. * Only treat the last frequency that might be a boost
  226. * as the boost frequency
  227. */
  228. if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
  229. if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
  230. prev->frequency = prev_freq;
  231. prev->flags = CPUFREQ_BOOST_FREQ;
  232. } else {
  233. dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
  234. freq);
  235. }
  236. }
  237. break;
  238. }
  239. prev_freq = freq;
  240. }
  241. table[i].frequency = CPUFREQ_TABLE_END;
  242. policy->freq_table = table;
  243. dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
  244. return 0;
  245. }
  246. static void qcom_get_related_cpus(int index, struct cpumask *m)
  247. {
  248. struct device_node *cpu_np;
  249. struct of_phandle_args args;
  250. int cpu, ret;
  251. for_each_possible_cpu(cpu) {
  252. cpu_np = of_cpu_device_node_get(cpu);
  253. if (!cpu_np)
  254. continue;
  255. ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
  256. "#freq-domain-cells", 0,
  257. &args);
  258. of_node_put(cpu_np);
  259. if (ret < 0)
  260. continue;
  261. if (index == args.args[0])
  262. cpumask_set_cpu(cpu, m);
  263. }
  264. }
  265. static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
  266. {
  267. struct cpufreq_policy *policy = data->policy;
  268. int cpu = cpumask_first(policy->related_cpus);
  269. struct device *dev = get_cpu_device(cpu);
  270. unsigned long freq_hz, throttled_freq;
  271. struct dev_pm_opp *opp;
  272. /*
  273. * Get the h/w throttled frequency, normalize it using the
  274. * registered opp table and use it to calculate thermal pressure.
  275. */
  276. freq_hz = qcom_lmh_get_throttle_freq(data);
  277. opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
  278. if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
  279. opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
  280. if (IS_ERR(opp)) {
  281. dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
  282. } else {
  283. dev_pm_opp_put(opp);
  284. }
  285. throttled_freq = freq_hz / HZ_PER_KHZ;
  286. /* Update HW pressure (the boost frequencies are accepted) */
  287. arch_update_hw_pressure(policy->related_cpus, throttled_freq);
  288. /*
  289. * In the unlikely case policy is unregistered do not enable
  290. * polling or h/w interrupt
  291. */
  292. mutex_lock(&data->throttle_lock);
  293. if (data->cancel_throttle)
  294. goto out;
  295. /*
  296. * If h/w throttled frequency is higher than what cpufreq has requested
  297. * for, then stop polling and switch back to interrupt mechanism.
  298. */
  299. if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
  300. enable_irq(data->throttle_irq);
  301. else
  302. mod_delayed_work(system_highpri_wq, &data->throttle_work,
  303. msecs_to_jiffies(10));
  304. out:
  305. mutex_unlock(&data->throttle_lock);
  306. }
  307. static void qcom_lmh_dcvs_poll(struct work_struct *work)
  308. {
  309. struct qcom_cpufreq_data *data;
  310. data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
  311. qcom_lmh_dcvs_notify(data);
  312. }
  313. static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
  314. {
  315. struct qcom_cpufreq_data *c_data = data;
  316. /* Disable interrupt and enable polling */
  317. disable_irq_nosync(c_data->throttle_irq);
  318. schedule_delayed_work(&c_data->throttle_work, 0);
  319. if (qcom_cpufreq.soc_data->reg_intr_clr)
  320. writel_relaxed(GT_IRQ_STATUS,
  321. c_data->base + qcom_cpufreq.soc_data->reg_intr_clr);
  322. return IRQ_HANDLED;
  323. }
  324. static const struct qcom_cpufreq_soc_data qcom_soc_data = {
  325. .reg_enable = 0x0,
  326. .reg_dcvs_ctrl = 0xbc,
  327. .reg_freq_lut = 0x110,
  328. .reg_volt_lut = 0x114,
  329. .reg_current_vote = 0x704,
  330. .reg_perf_state = 0x920,
  331. .lut_row_size = 32,
  332. };
  333. static const struct qcom_cpufreq_soc_data epss_soc_data = {
  334. .reg_enable = 0x0,
  335. .reg_domain_state = 0x20,
  336. .reg_dcvs_ctrl = 0xb0,
  337. .reg_freq_lut = 0x100,
  338. .reg_volt_lut = 0x200,
  339. .reg_intr_clr = 0x308,
  340. .reg_perf_state = 0x320,
  341. .lut_row_size = 4,
  342. };
  343. static const struct of_device_id qcom_cpufreq_hw_match[] = {
  344. { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
  345. { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
  346. {}
  347. };
  348. MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
  349. static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
  350. {
  351. struct qcom_cpufreq_data *data = policy->driver_data;
  352. struct platform_device *pdev = cpufreq_get_driver_data();
  353. int ret;
  354. /*
  355. * Look for LMh interrupt. If no interrupt line is specified /
  356. * if there is an error, allow cpufreq to be enabled as usual.
  357. */
  358. data->throttle_irq = platform_get_irq_optional(pdev, index);
  359. if (data->throttle_irq == -ENXIO)
  360. return 0;
  361. if (data->throttle_irq < 0)
  362. return data->throttle_irq;
  363. data->cancel_throttle = false;
  364. mutex_init(&data->throttle_lock);
  365. INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
  366. snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
  367. ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
  368. IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
  369. if (ret) {
  370. dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
  371. return 0;
  372. }
  373. ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
  374. if (ret)
  375. dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
  376. data->irq_name, data->throttle_irq);
  377. return 0;
  378. }
  379. static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
  380. {
  381. struct qcom_cpufreq_data *data = policy->driver_data;
  382. struct platform_device *pdev = cpufreq_get_driver_data();
  383. int ret;
  384. if (data->throttle_irq <= 0)
  385. return 0;
  386. mutex_lock(&data->throttle_lock);
  387. data->cancel_throttle = false;
  388. mutex_unlock(&data->throttle_lock);
  389. ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
  390. if (ret)
  391. dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
  392. data->irq_name, data->throttle_irq);
  393. return ret;
  394. }
  395. static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
  396. {
  397. struct qcom_cpufreq_data *data = policy->driver_data;
  398. if (data->throttle_irq <= 0)
  399. return 0;
  400. mutex_lock(&data->throttle_lock);
  401. data->cancel_throttle = true;
  402. mutex_unlock(&data->throttle_lock);
  403. cancel_delayed_work_sync(&data->throttle_work);
  404. irq_set_affinity_and_hint(data->throttle_irq, NULL);
  405. disable_irq_nosync(data->throttle_irq);
  406. return 0;
  407. }
  408. static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
  409. {
  410. if (data->throttle_irq <= 0)
  411. return;
  412. free_irq(data->throttle_irq, data);
  413. }
  414. static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
  415. {
  416. struct platform_device *pdev = cpufreq_get_driver_data();
  417. struct device *dev = &pdev->dev;
  418. struct of_phandle_args args;
  419. struct device_node *cpu_np;
  420. struct device *cpu_dev;
  421. struct qcom_cpufreq_data *data;
  422. int ret, index;
  423. cpu_dev = get_cpu_device(policy->cpu);
  424. if (!cpu_dev) {
  425. pr_err("%s: failed to get cpu%d device\n", __func__,
  426. policy->cpu);
  427. return -ENODEV;
  428. }
  429. cpu_np = of_cpu_device_node_get(policy->cpu);
  430. if (!cpu_np)
  431. return -EINVAL;
  432. ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
  433. "#freq-domain-cells", 0, &args);
  434. of_node_put(cpu_np);
  435. if (ret)
  436. return ret;
  437. index = args.args[0];
  438. data = &qcom_cpufreq.data[index];
  439. /* HW should be in enabled state to proceed */
  440. if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) {
  441. dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
  442. return -ENODEV;
  443. }
  444. if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1)
  445. data->per_core_dcvs = true;
  446. qcom_get_related_cpus(index, policy->cpus);
  447. policy->driver_data = data;
  448. policy->dvfs_possible_from_any_cpu = true;
  449. data->policy = policy;
  450. ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
  451. if (ret) {
  452. dev_err(dev, "Domain-%d failed to read LUT\n", index);
  453. return ret;
  454. }
  455. ret = dev_pm_opp_get_opp_count(cpu_dev);
  456. if (ret <= 0) {
  457. dev_err(cpu_dev, "Failed to add OPPs\n");
  458. return -ENODEV;
  459. }
  460. if (policy_has_boost_freq(policy)) {
  461. ret = cpufreq_enable_boost_support();
  462. if (ret)
  463. dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
  464. }
  465. return qcom_cpufreq_hw_lmh_init(policy, index);
  466. }
  467. static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
  468. {
  469. struct device *cpu_dev = get_cpu_device(policy->cpu);
  470. struct qcom_cpufreq_data *data = policy->driver_data;
  471. dev_pm_opp_remove_all_dynamic(cpu_dev);
  472. dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
  473. qcom_cpufreq_hw_lmh_exit(data);
  474. kfree(policy->freq_table);
  475. kfree(data);
  476. }
  477. static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
  478. {
  479. struct qcom_cpufreq_data *data = policy->driver_data;
  480. if (data->throttle_irq >= 0)
  481. enable_irq(data->throttle_irq);
  482. }
  483. static struct freq_attr *qcom_cpufreq_hw_attr[] = {
  484. &cpufreq_freq_attr_scaling_available_freqs,
  485. &cpufreq_freq_attr_scaling_boost_freqs,
  486. NULL
  487. };
  488. static struct cpufreq_driver cpufreq_qcom_hw_driver = {
  489. .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
  490. CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
  491. CPUFREQ_IS_COOLING_DEV,
  492. .verify = cpufreq_generic_frequency_table_verify,
  493. .target_index = qcom_cpufreq_hw_target_index,
  494. .get = qcom_cpufreq_hw_get,
  495. .init = qcom_cpufreq_hw_cpu_init,
  496. .exit = qcom_cpufreq_hw_cpu_exit,
  497. .online = qcom_cpufreq_hw_cpu_online,
  498. .offline = qcom_cpufreq_hw_cpu_offline,
  499. .register_em = cpufreq_register_em_with_opp,
  500. .fast_switch = qcom_cpufreq_hw_fast_switch,
  501. .name = "qcom-cpufreq-hw",
  502. .attr = qcom_cpufreq_hw_attr,
  503. .ready = qcom_cpufreq_ready,
  504. };
  505. static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  506. {
  507. struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
  508. return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
  509. }
  510. /*
  511. * Since we cannot determine the closest rate of the target rate, let's just
  512. * return the actual rate at which the clock is running at. This is needed to
  513. * make clk_set_rate() API work properly.
  514. */
  515. static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  516. {
  517. req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
  518. return 0;
  519. }
  520. static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
  521. .recalc_rate = qcom_cpufreq_hw_recalc_rate,
  522. .determine_rate = qcom_cpufreq_hw_determine_rate,
  523. };
  524. static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
  525. {
  526. struct clk_hw_onecell_data *clk_data;
  527. struct device *dev = &pdev->dev;
  528. struct device *cpu_dev;
  529. struct clk *clk;
  530. int ret, i, num_domains;
  531. clk = clk_get(dev, "xo");
  532. if (IS_ERR(clk))
  533. return PTR_ERR(clk);
  534. xo_rate = clk_get_rate(clk);
  535. clk_put(clk);
  536. clk = clk_get(dev, "alternate");
  537. if (IS_ERR(clk))
  538. return PTR_ERR(clk);
  539. cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
  540. clk_put(clk);
  541. cpufreq_qcom_hw_driver.driver_data = pdev;
  542. /* Check for optional interconnect paths on CPU0 */
  543. cpu_dev = get_cpu_device(0);
  544. if (!cpu_dev)
  545. return -EPROBE_DEFER;
  546. ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
  547. if (ret)
  548. return dev_err_probe(dev, ret, "Failed to find icc paths\n");
  549. for (num_domains = 0; num_domains < MAX_FREQ_DOMAINS; num_domains++)
  550. if (!platform_get_resource(pdev, IORESOURCE_MEM, num_domains))
  551. break;
  552. qcom_cpufreq.data = devm_kzalloc(dev, sizeof(struct qcom_cpufreq_data) * num_domains,
  553. GFP_KERNEL);
  554. if (!qcom_cpufreq.data)
  555. return -ENOMEM;
  556. qcom_cpufreq.soc_data = of_device_get_match_data(dev);
  557. if (!qcom_cpufreq.soc_data)
  558. return -ENODEV;
  559. clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
  560. if (!clk_data)
  561. return -ENOMEM;
  562. clk_data->num = num_domains;
  563. for (i = 0; i < num_domains; i++) {
  564. struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
  565. struct clk_init_data clk_init = {};
  566. void __iomem *base;
  567. base = devm_platform_ioremap_resource(pdev, i);
  568. if (IS_ERR(base)) {
  569. dev_err(dev, "Failed to map resource index %d\n", i);
  570. return PTR_ERR(base);
  571. }
  572. data->base = base;
  573. /* Register CPU clock for each frequency domain */
  574. clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i);
  575. if (!clk_init.name)
  576. return -ENOMEM;
  577. clk_init.flags = CLK_GET_RATE_NOCACHE;
  578. clk_init.ops = &qcom_cpufreq_hw_clk_ops;
  579. data->cpu_clk.init = &clk_init;
  580. ret = devm_clk_hw_register(dev, &data->cpu_clk);
  581. if (ret < 0) {
  582. dev_err(dev, "Failed to register clock %d: %d\n", i, ret);
  583. kfree(clk_init.name);
  584. return ret;
  585. }
  586. clk_data->hws[i] = &data->cpu_clk;
  587. kfree(clk_init.name);
  588. }
  589. ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
  590. if (ret < 0) {
  591. dev_err(dev, "Failed to add clock provider\n");
  592. return ret;
  593. }
  594. ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
  595. if (ret)
  596. dev_err(dev, "CPUFreq HW driver failed to register\n");
  597. else
  598. dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
  599. return ret;
  600. }
  601. static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
  602. {
  603. cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
  604. }
  605. static struct platform_driver qcom_cpufreq_hw_driver = {
  606. .probe = qcom_cpufreq_hw_driver_probe,
  607. .remove_new = qcom_cpufreq_hw_driver_remove,
  608. .driver = {
  609. .name = "qcom-cpufreq-hw",
  610. .of_match_table = qcom_cpufreq_hw_match,
  611. },
  612. };
  613. static int __init qcom_cpufreq_hw_init(void)
  614. {
  615. return platform_driver_register(&qcom_cpufreq_hw_driver);
  616. }
  617. postcore_initcall(qcom_cpufreq_hw_init);
  618. static void __exit qcom_cpufreq_hw_exit(void)
  619. {
  620. platform_driver_unregister(&qcom_cpufreq_hw_driver);
  621. }
  622. module_exit(qcom_cpufreq_hw_exit);
  623. MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
  624. MODULE_LICENSE("GPL v2");