cppc_acpi.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
  4. *
  5. * (C) Copyright 2014, 2015 Linaro Ltd.
  6. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
  7. *
  8. * CPPC describes a few methods for controlling CPU performance using
  9. * information from a per CPU table called CPC. This table is described in
  10. * the ACPI v5.0+ specification. The table consists of a list of
  11. * registers which may be memory mapped or hardware registers and also may
  12. * include some static integer values.
  13. *
  14. * CPU performance is on an abstract continuous scale as against a discretized
  15. * P-state scale which is tied to CPU frequency only. In brief, the basic
  16. * operation involves:
  17. *
  18. * - OS makes a CPU performance request. (Can provide min and max bounds)
  19. *
  20. * - Platform (such as BMC) is free to optimize request within requested bounds
  21. * depending on power/thermal budgets etc.
  22. *
  23. * - Platform conveys its decision back to OS
  24. *
  25. * The communication between OS and platform occurs through another medium
  26. * called (PCC) Platform Communication Channel. This is a generic mailbox like
  27. * mechanism which includes doorbell semantics to indicate register updates.
  28. * See drivers/mailbox/pcc.c for details on PCC.
  29. *
  30. * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  31. * above specifications.
  32. */
  33. #define pr_fmt(fmt) "ACPI CPPC: " fmt
  34. #include <linux/delay.h>
  35. #include <linux/iopoll.h>
  36. #include <linux/ktime.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/wait.h>
  39. #include <linux/topology.h>
  40. #include <linux/dmi.h>
  41. #include <linux/units.h>
  42. #include <linux/unaligned.h>
  43. #include <acpi/cppc_acpi.h>
  44. struct cppc_pcc_data {
  45. struct pcc_mbox_chan *pcc_channel;
  46. void __iomem *pcc_comm_addr;
  47. bool pcc_channel_acquired;
  48. unsigned int deadline_us;
  49. unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  50. bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
  51. bool platform_owns_pcc; /* Ownership of PCC subspace */
  52. unsigned int pcc_write_cnt; /* Running count of PCC write commands */
  53. /*
  54. * Lock to provide controlled access to the PCC channel.
  55. *
  56. * For performance critical usecases(currently cppc_set_perf)
  57. * We need to take read_lock and check if channel belongs to OSPM
  58. * before reading or writing to PCC subspace
  59. * We need to take write_lock before transferring the channel
  60. * ownership to the platform via a Doorbell
  61. * This allows us to batch a number of CPPC requests if they happen
  62. * to originate in about the same time
  63. *
  64. * For non-performance critical usecases(init)
  65. * Take write_lock for all purposes which gives exclusive access
  66. */
  67. struct rw_semaphore pcc_lock;
  68. /* Wait queue for CPUs whose requests were batched */
  69. wait_queue_head_t pcc_write_wait_q;
  70. ktime_t last_cmd_cmpl_time;
  71. ktime_t last_mpar_reset;
  72. int mpar_count;
  73. int refcount;
  74. };
  75. /* Array to represent the PCC channel per subspace ID */
  76. static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
  77. /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
  78. static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
  79. /*
  80. * The cpc_desc structure contains the ACPI register details
  81. * as described in the per CPU _CPC tables. The details
  82. * include the type of register (e.g. PCC, System IO, FFH etc.)
  83. * and destination addresses which lets us READ/WRITE CPU performance
  84. * information using the appropriate I/O methods.
  85. */
  86. static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  87. /* pcc mapped address + header size + offset within PCC subspace */
  88. #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
  89. 0x8 + (offs))
  90. /* Check if a CPC register is in PCC */
  91. #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
  92. (cpc)->cpc_entry.reg.space_id == \
  93. ACPI_ADR_SPACE_PLATFORM_COMM)
  94. /* Check if a CPC register is in FFH */
  95. #define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
  96. (cpc)->cpc_entry.reg.space_id == \
  97. ACPI_ADR_SPACE_FIXED_HARDWARE)
  98. /* Check if a CPC register is in SystemMemory */
  99. #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
  100. (cpc)->cpc_entry.reg.space_id == \
  101. ACPI_ADR_SPACE_SYSTEM_MEMORY)
  102. /* Check if a CPC register is in SystemIo */
  103. #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
  104. (cpc)->cpc_entry.reg.space_id == \
  105. ACPI_ADR_SPACE_SYSTEM_IO)
  106. /* Evaluates to True if reg is a NULL register descriptor */
  107. #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
  108. (reg)->address == 0 && \
  109. (reg)->bit_width == 0 && \
  110. (reg)->bit_offset == 0 && \
  111. (reg)->access_width == 0)
  112. /* Evaluates to True if an optional cpc field is supported */
  113. #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
  114. !!(cpc)->cpc_entry.int_value : \
  115. !IS_NULL_REG(&(cpc)->cpc_entry.reg))
  116. /*
  117. * Arbitrary Retries in case the remote processor is slow to respond
  118. * to PCC commands. Keeping it high enough to cover emulators where
  119. * the processors run painfully slow.
  120. */
  121. #define NUM_RETRIES 500ULL
  122. #define OVER_16BTS_MASK ~0xFFFFULL
  123. #define define_one_cppc_ro(_name) \
  124. static struct kobj_attribute _name = \
  125. __ATTR(_name, 0444, show_##_name, NULL)
  126. #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
  127. #define show_cppc_data(access_fn, struct_name, member_name) \
  128. static ssize_t show_##member_name(struct kobject *kobj, \
  129. struct kobj_attribute *attr, char *buf) \
  130. { \
  131. struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
  132. struct struct_name st_name = {0}; \
  133. int ret; \
  134. \
  135. ret = access_fn(cpc_ptr->cpu_id, &st_name); \
  136. if (ret) \
  137. return ret; \
  138. \
  139. return sysfs_emit(buf, "%llu\n", \
  140. (u64)st_name.member_name); \
  141. } \
  142. define_one_cppc_ro(member_name)
  143. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
  144. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
  145. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
  146. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
  147. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
  148. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
  149. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
  150. show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
  151. show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
  152. /* Check for valid access_width, otherwise, fallback to using bit_width */
  153. #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
  154. /* Shift and apply the mask for CPC reads/writes */
  155. #define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
  156. GENMASK(((reg)->bit_width) - 1, 0))
  157. #define MASK_VAL_WRITE(reg, prev_val, val) \
  158. ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
  159. ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
  160. static ssize_t show_feedback_ctrs(struct kobject *kobj,
  161. struct kobj_attribute *attr, char *buf)
  162. {
  163. struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
  164. struct cppc_perf_fb_ctrs fb_ctrs = {0};
  165. int ret;
  166. ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
  167. if (ret)
  168. return ret;
  169. return sysfs_emit(buf, "ref:%llu del:%llu\n",
  170. fb_ctrs.reference, fb_ctrs.delivered);
  171. }
  172. define_one_cppc_ro(feedback_ctrs);
  173. static struct attribute *cppc_attrs[] = {
  174. &feedback_ctrs.attr,
  175. &reference_perf.attr,
  176. &wraparound_time.attr,
  177. &highest_perf.attr,
  178. &lowest_perf.attr,
  179. &lowest_nonlinear_perf.attr,
  180. &guaranteed_perf.attr,
  181. &nominal_perf.attr,
  182. &nominal_freq.attr,
  183. &lowest_freq.attr,
  184. NULL
  185. };
  186. ATTRIBUTE_GROUPS(cppc);
  187. static const struct kobj_type cppc_ktype = {
  188. .sysfs_ops = &kobj_sysfs_ops,
  189. .default_groups = cppc_groups,
  190. };
  191. static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
  192. {
  193. int ret, status;
  194. struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
  195. struct acpi_pcct_shared_memory __iomem *generic_comm_base =
  196. pcc_ss_data->pcc_comm_addr;
  197. if (!pcc_ss_data->platform_owns_pcc)
  198. return 0;
  199. /*
  200. * Poll PCC status register every 3us(delay_us) for maximum of
  201. * deadline_us(timeout_us) until PCC command complete bit is set(cond)
  202. */
  203. ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
  204. status & PCC_CMD_COMPLETE_MASK, 3,
  205. pcc_ss_data->deadline_us);
  206. if (likely(!ret)) {
  207. pcc_ss_data->platform_owns_pcc = false;
  208. if (chk_err_bit && (status & PCC_ERROR_MASK))
  209. ret = -EIO;
  210. }
  211. if (unlikely(ret))
  212. pr_err("PCC check channel failed for ss: %d. ret=%d\n",
  213. pcc_ss_id, ret);
  214. return ret;
  215. }
  216. /*
  217. * This function transfers the ownership of the PCC to the platform
  218. * So it must be called while holding write_lock(pcc_lock)
  219. */
  220. static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
  221. {
  222. int ret = -EIO, i;
  223. struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
  224. struct acpi_pcct_shared_memory __iomem *generic_comm_base =
  225. pcc_ss_data->pcc_comm_addr;
  226. unsigned int time_delta;
  227. /*
  228. * For CMD_WRITE we know for a fact the caller should have checked
  229. * the channel before writing to PCC space
  230. */
  231. if (cmd == CMD_READ) {
  232. /*
  233. * If there are pending cpc_writes, then we stole the channel
  234. * before write completion, so first send a WRITE command to
  235. * platform
  236. */
  237. if (pcc_ss_data->pending_pcc_write_cmd)
  238. send_pcc_cmd(pcc_ss_id, CMD_WRITE);
  239. ret = check_pcc_chan(pcc_ss_id, false);
  240. if (ret)
  241. goto end;
  242. } else /* CMD_WRITE */
  243. pcc_ss_data->pending_pcc_write_cmd = FALSE;
  244. /*
  245. * Handle the Minimum Request Turnaround Time(MRTT)
  246. * "The minimum amount of time that OSPM must wait after the completion
  247. * of a command before issuing the next command, in microseconds"
  248. */
  249. if (pcc_ss_data->pcc_mrtt) {
  250. time_delta = ktime_us_delta(ktime_get(),
  251. pcc_ss_data->last_cmd_cmpl_time);
  252. if (pcc_ss_data->pcc_mrtt > time_delta)
  253. udelay(pcc_ss_data->pcc_mrtt - time_delta);
  254. }
  255. /*
  256. * Handle the non-zero Maximum Periodic Access Rate(MPAR)
  257. * "The maximum number of periodic requests that the subspace channel can
  258. * support, reported in commands per minute. 0 indicates no limitation."
  259. *
  260. * This parameter should be ideally zero or large enough so that it can
  261. * handle maximum number of requests that all the cores in the system can
  262. * collectively generate. If it is not, we will follow the spec and just
  263. * not send the request to the platform after hitting the MPAR limit in
  264. * any 60s window
  265. */
  266. if (pcc_ss_data->pcc_mpar) {
  267. if (pcc_ss_data->mpar_count == 0) {
  268. time_delta = ktime_ms_delta(ktime_get(),
  269. pcc_ss_data->last_mpar_reset);
  270. if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
  271. pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
  272. pcc_ss_id);
  273. ret = -EIO;
  274. goto end;
  275. }
  276. pcc_ss_data->last_mpar_reset = ktime_get();
  277. pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
  278. }
  279. pcc_ss_data->mpar_count--;
  280. }
  281. /* Write to the shared comm region. */
  282. writew_relaxed(cmd, &generic_comm_base->command);
  283. /* Flip CMD COMPLETE bit */
  284. writew_relaxed(0, &generic_comm_base->status);
  285. pcc_ss_data->platform_owns_pcc = true;
  286. /* Ring doorbell */
  287. ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
  288. if (ret < 0) {
  289. pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
  290. pcc_ss_id, cmd, ret);
  291. goto end;
  292. }
  293. /* wait for completion and check for PCC error bit */
  294. ret = check_pcc_chan(pcc_ss_id, true);
  295. if (pcc_ss_data->pcc_mrtt)
  296. pcc_ss_data->last_cmd_cmpl_time = ktime_get();
  297. if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
  298. mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
  299. else
  300. mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
  301. end:
  302. if (cmd == CMD_WRITE) {
  303. if (unlikely(ret)) {
  304. for_each_possible_cpu(i) {
  305. struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
  306. if (!desc)
  307. continue;
  308. if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
  309. desc->write_cmd_status = ret;
  310. }
  311. }
  312. pcc_ss_data->pcc_write_cnt++;
  313. wake_up_all(&pcc_ss_data->pcc_write_wait_q);
  314. }
  315. return ret;
  316. }
  317. static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
  318. {
  319. if (ret < 0)
  320. pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
  321. *(u16 *)msg, ret);
  322. else
  323. pr_debug("TX completed. CMD sent:%x, ret:%d\n",
  324. *(u16 *)msg, ret);
  325. }
  326. static struct mbox_client cppc_mbox_cl = {
  327. .tx_done = cppc_chan_tx_done,
  328. .knows_txdone = true,
  329. };
  330. static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
  331. {
  332. int result = -EFAULT;
  333. acpi_status status = AE_OK;
  334. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  335. struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
  336. struct acpi_buffer state = {0, NULL};
  337. union acpi_object *psd = NULL;
  338. struct acpi_psd_package *pdomain;
  339. status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
  340. &buffer, ACPI_TYPE_PACKAGE);
  341. if (status == AE_NOT_FOUND) /* _PSD is optional */
  342. return 0;
  343. if (ACPI_FAILURE(status))
  344. return -ENODEV;
  345. psd = buffer.pointer;
  346. if (!psd || psd->package.count != 1) {
  347. pr_debug("Invalid _PSD data\n");
  348. goto end;
  349. }
  350. pdomain = &(cpc_ptr->domain_info);
  351. state.length = sizeof(struct acpi_psd_package);
  352. state.pointer = pdomain;
  353. status = acpi_extract_package(&(psd->package.elements[0]),
  354. &format, &state);
  355. if (ACPI_FAILURE(status)) {
  356. pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
  357. goto end;
  358. }
  359. if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
  360. pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
  361. goto end;
  362. }
  363. if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
  364. pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
  365. goto end;
  366. }
  367. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  368. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  369. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  370. pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
  371. goto end;
  372. }
  373. result = 0;
  374. end:
  375. kfree(buffer.pointer);
  376. return result;
  377. }
  378. bool acpi_cpc_valid(void)
  379. {
  380. struct cpc_desc *cpc_ptr;
  381. int cpu;
  382. if (acpi_disabled)
  383. return false;
  384. for_each_present_cpu(cpu) {
  385. cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
  386. if (!cpc_ptr)
  387. return false;
  388. }
  389. return true;
  390. }
  391. EXPORT_SYMBOL_GPL(acpi_cpc_valid);
  392. bool cppc_allow_fast_switch(void)
  393. {
  394. struct cpc_register_resource *desired_reg;
  395. struct cpc_desc *cpc_ptr;
  396. int cpu;
  397. for_each_possible_cpu(cpu) {
  398. cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
  399. desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
  400. if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
  401. !CPC_IN_SYSTEM_IO(desired_reg))
  402. return false;
  403. }
  404. return true;
  405. }
  406. EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
  407. /**
  408. * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
  409. * @cpu: Find all CPUs that share a domain with cpu.
  410. * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
  411. *
  412. * Return: 0 for success or negative value for err.
  413. */
  414. int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
  415. {
  416. struct cpc_desc *cpc_ptr, *match_cpc_ptr;
  417. struct acpi_psd_package *match_pdomain;
  418. struct acpi_psd_package *pdomain;
  419. int count_target, i;
  420. /*
  421. * Now that we have _PSD data from all CPUs, let's setup P-state
  422. * domain info.
  423. */
  424. cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
  425. if (!cpc_ptr)
  426. return -EFAULT;
  427. pdomain = &(cpc_ptr->domain_info);
  428. cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
  429. if (pdomain->num_processors <= 1)
  430. return 0;
  431. /* Validate the Domain info */
  432. count_target = pdomain->num_processors;
  433. if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
  434. cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  435. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
  436. cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
  437. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
  438. cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
  439. for_each_possible_cpu(i) {
  440. if (i == cpu)
  441. continue;
  442. match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
  443. if (!match_cpc_ptr)
  444. goto err_fault;
  445. match_pdomain = &(match_cpc_ptr->domain_info);
  446. if (match_pdomain->domain != pdomain->domain)
  447. continue;
  448. /* Here i and cpu are in the same domain */
  449. if (match_pdomain->num_processors != count_target)
  450. goto err_fault;
  451. if (pdomain->coord_type != match_pdomain->coord_type)
  452. goto err_fault;
  453. cpumask_set_cpu(i, cpu_data->shared_cpu_map);
  454. }
  455. return 0;
  456. err_fault:
  457. /* Assume no coordination on any error parsing domain info */
  458. cpumask_clear(cpu_data->shared_cpu_map);
  459. cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
  460. cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
  461. return -EFAULT;
  462. }
  463. EXPORT_SYMBOL_GPL(acpi_get_psd_map);
  464. static int register_pcc_channel(int pcc_ss_idx)
  465. {
  466. struct pcc_mbox_chan *pcc_chan;
  467. u64 usecs_lat;
  468. if (pcc_ss_idx >= 0) {
  469. pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
  470. if (IS_ERR(pcc_chan)) {
  471. pr_err("Failed to find PCC channel for subspace %d\n",
  472. pcc_ss_idx);
  473. return -ENODEV;
  474. }
  475. pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
  476. /*
  477. * cppc_ss->latency is just a Nominal value. In reality
  478. * the remote processor could be much slower to reply.
  479. * So add an arbitrary amount of wait on top of Nominal.
  480. */
  481. usecs_lat = NUM_RETRIES * pcc_chan->latency;
  482. pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
  483. pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
  484. pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
  485. pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
  486. pcc_data[pcc_ss_idx]->pcc_comm_addr =
  487. acpi_os_ioremap(pcc_chan->shmem_base_addr,
  488. pcc_chan->shmem_size);
  489. if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
  490. pr_err("Failed to ioremap PCC comm region mem for %d\n",
  491. pcc_ss_idx);
  492. return -ENOMEM;
  493. }
  494. /* Set flag so that we don't come here for each CPU. */
  495. pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
  496. }
  497. return 0;
  498. }
  499. /**
  500. * cpc_ffh_supported() - check if FFH reading supported
  501. *
  502. * Check if the architecture has support for functional fixed hardware
  503. * read/write capability.
  504. *
  505. * Return: true for supported, false for not supported
  506. */
  507. bool __weak cpc_ffh_supported(void)
  508. {
  509. return false;
  510. }
  511. /**
  512. * cpc_supported_by_cpu() - check if CPPC is supported by CPU
  513. *
  514. * Check if the architectural support for CPPC is present even
  515. * if the _OSC hasn't prescribed it
  516. *
  517. * Return: true for supported, false for not supported
  518. */
  519. bool __weak cpc_supported_by_cpu(void)
  520. {
  521. return false;
  522. }
  523. /**
  524. * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
  525. * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
  526. *
  527. * Check and allocate the cppc_pcc_data memory.
  528. * In some processor configurations it is possible that same subspace
  529. * is shared between multiple CPUs. This is seen especially in CPUs
  530. * with hardware multi-threading support.
  531. *
  532. * Return: 0 for success, errno for failure
  533. */
  534. static int pcc_data_alloc(int pcc_ss_id)
  535. {
  536. if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
  537. return -EINVAL;
  538. if (pcc_data[pcc_ss_id]) {
  539. pcc_data[pcc_ss_id]->refcount++;
  540. } else {
  541. pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
  542. GFP_KERNEL);
  543. if (!pcc_data[pcc_ss_id])
  544. return -ENOMEM;
  545. pcc_data[pcc_ss_id]->refcount++;
  546. }
  547. return 0;
  548. }
  549. /*
  550. * An example CPC table looks like the following.
  551. *
  552. * Name (_CPC, Package() {
  553. * 17, // NumEntries
  554. * 1, // Revision
  555. * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
  556. * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
  557. * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
  558. * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
  559. * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
  560. * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
  561. * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
  562. * ...
  563. * ...
  564. * ...
  565. * }
  566. * Each Register() encodes how to access that specific register.
  567. * e.g. a sample PCC entry has the following encoding:
  568. *
  569. * Register (
  570. * PCC, // AddressSpaceKeyword
  571. * 8, // RegisterBitWidth
  572. * 8, // RegisterBitOffset
  573. * 0x30, // RegisterAddress
  574. * 9, // AccessSize (subspace ID)
  575. * )
  576. */
  577. /**
  578. * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
  579. * @pr: Ptr to acpi_processor containing this CPU's logical ID.
  580. *
  581. * Return: 0 for success or negative value for err.
  582. */
  583. int acpi_cppc_processor_probe(struct acpi_processor *pr)
  584. {
  585. struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
  586. union acpi_object *out_obj, *cpc_obj;
  587. struct cpc_desc *cpc_ptr;
  588. struct cpc_reg *gas_t;
  589. struct device *cpu_dev;
  590. acpi_handle handle = pr->handle;
  591. unsigned int num_ent, i, cpc_rev;
  592. int pcc_subspace_id = -1;
  593. acpi_status status;
  594. int ret = -ENODATA;
  595. if (!osc_sb_cppc2_support_acked) {
  596. pr_debug("CPPC v2 _OSC not acked\n");
  597. if (!cpc_supported_by_cpu()) {
  598. pr_debug("CPPC is not supported by the CPU\n");
  599. return -ENODEV;
  600. }
  601. }
  602. /* Parse the ACPI _CPC table for this CPU. */
  603. status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
  604. ACPI_TYPE_PACKAGE);
  605. if (ACPI_FAILURE(status)) {
  606. ret = -ENODEV;
  607. goto out_buf_free;
  608. }
  609. out_obj = (union acpi_object *) output.pointer;
  610. cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
  611. if (!cpc_ptr) {
  612. ret = -ENOMEM;
  613. goto out_buf_free;
  614. }
  615. /* First entry is NumEntries. */
  616. cpc_obj = &out_obj->package.elements[0];
  617. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  618. num_ent = cpc_obj->integer.value;
  619. if (num_ent <= 1) {
  620. pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
  621. num_ent, pr->id);
  622. goto out_free;
  623. }
  624. } else {
  625. pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
  626. cpc_obj->type, pr->id);
  627. goto out_free;
  628. }
  629. /* Second entry should be revision. */
  630. cpc_obj = &out_obj->package.elements[1];
  631. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  632. cpc_rev = cpc_obj->integer.value;
  633. } else {
  634. pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
  635. cpc_obj->type, pr->id);
  636. goto out_free;
  637. }
  638. if (cpc_rev < CPPC_V2_REV) {
  639. pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
  640. pr->id);
  641. goto out_free;
  642. }
  643. /*
  644. * Disregard _CPC if the number of entries in the return pachage is not
  645. * as expected, but support future revisions being proper supersets of
  646. * the v3 and only causing more entries to be returned by _CPC.
  647. */
  648. if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
  649. (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
  650. (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
  651. pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
  652. num_ent, pr->id);
  653. goto out_free;
  654. }
  655. if (cpc_rev > CPPC_V3_REV) {
  656. num_ent = CPPC_V3_NUM_ENT;
  657. cpc_rev = CPPC_V3_REV;
  658. }
  659. cpc_ptr->num_entries = num_ent;
  660. cpc_ptr->version = cpc_rev;
  661. /* Iterate through remaining entries in _CPC */
  662. for (i = 2; i < num_ent; i++) {
  663. cpc_obj = &out_obj->package.elements[i];
  664. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  665. cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
  666. cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
  667. } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
  668. gas_t = (struct cpc_reg *)
  669. cpc_obj->buffer.pointer;
  670. /*
  671. * The PCC Subspace index is encoded inside
  672. * the CPC table entries. The same PCC index
  673. * will be used for all the PCC entries,
  674. * so extract it only once.
  675. */
  676. if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  677. if (pcc_subspace_id < 0) {
  678. pcc_subspace_id = gas_t->access_width;
  679. if (pcc_data_alloc(pcc_subspace_id))
  680. goto out_free;
  681. } else if (pcc_subspace_id != gas_t->access_width) {
  682. pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
  683. pr->id);
  684. goto out_free;
  685. }
  686. } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
  687. if (gas_t->address) {
  688. void __iomem *addr;
  689. size_t access_width;
  690. if (!osc_cpc_flexible_adr_space_confirmed) {
  691. pr_debug("Flexible address space capability not supported\n");
  692. if (!cpc_supported_by_cpu())
  693. goto out_free;
  694. }
  695. access_width = GET_BIT_WIDTH(gas_t) / 8;
  696. addr = ioremap(gas_t->address, access_width);
  697. if (!addr)
  698. goto out_free;
  699. cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
  700. }
  701. } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
  702. if (gas_t->access_width < 1 || gas_t->access_width > 3) {
  703. /*
  704. * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
  705. * SystemIO doesn't implement 64-bit
  706. * registers.
  707. */
  708. pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
  709. gas_t->access_width);
  710. goto out_free;
  711. }
  712. if (gas_t->address & OVER_16BTS_MASK) {
  713. /* SystemIO registers use 16-bit integer addresses */
  714. pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
  715. gas_t->address);
  716. goto out_free;
  717. }
  718. if (!osc_cpc_flexible_adr_space_confirmed) {
  719. pr_debug("Flexible address space capability not supported\n");
  720. if (!cpc_supported_by_cpu())
  721. goto out_free;
  722. }
  723. } else {
  724. if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
  725. /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
  726. pr_debug("Unsupported register type (%d) in _CPC\n",
  727. gas_t->space_id);
  728. goto out_free;
  729. }
  730. }
  731. cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
  732. memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
  733. } else {
  734. pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
  735. i, pr->id);
  736. goto out_free;
  737. }
  738. }
  739. per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
  740. /*
  741. * Initialize the remaining cpc_regs as unsupported.
  742. * Example: In case FW exposes CPPC v2, the below loop will initialize
  743. * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
  744. */
  745. for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
  746. cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
  747. cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
  748. }
  749. /* Store CPU Logical ID */
  750. cpc_ptr->cpu_id = pr->id;
  751. raw_spin_lock_init(&cpc_ptr->rmw_lock);
  752. /* Parse PSD data for this CPU */
  753. ret = acpi_get_psd(cpc_ptr, handle);
  754. if (ret)
  755. goto out_free;
  756. /* Register PCC channel once for all PCC subspace ID. */
  757. if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
  758. ret = register_pcc_channel(pcc_subspace_id);
  759. if (ret)
  760. goto out_free;
  761. init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
  762. init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
  763. }
  764. /* Everything looks okay */
  765. pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
  766. /* Add per logical CPU nodes for reading its feedback counters. */
  767. cpu_dev = get_cpu_device(pr->id);
  768. if (!cpu_dev) {
  769. ret = -EINVAL;
  770. goto out_free;
  771. }
  772. /* Plug PSD data into this CPU's CPC descriptor. */
  773. per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
  774. ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
  775. "acpi_cppc");
  776. if (ret) {
  777. per_cpu(cpc_desc_ptr, pr->id) = NULL;
  778. kobject_put(&cpc_ptr->kobj);
  779. goto out_free;
  780. }
  781. kfree(output.pointer);
  782. return 0;
  783. out_free:
  784. /* Free all the mapped sys mem areas for this CPU */
  785. for (i = 2; i < cpc_ptr->num_entries; i++) {
  786. void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
  787. if (addr)
  788. iounmap(addr);
  789. }
  790. kfree(cpc_ptr);
  791. out_buf_free:
  792. kfree(output.pointer);
  793. return ret;
  794. }
  795. EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
  796. /**
  797. * acpi_cppc_processor_exit - Cleanup CPC structs.
  798. * @pr: Ptr to acpi_processor containing this CPU's logical ID.
  799. *
  800. * Return: Void
  801. */
  802. void acpi_cppc_processor_exit(struct acpi_processor *pr)
  803. {
  804. struct cpc_desc *cpc_ptr;
  805. unsigned int i;
  806. void __iomem *addr;
  807. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
  808. if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
  809. if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
  810. pcc_data[pcc_ss_id]->refcount--;
  811. if (!pcc_data[pcc_ss_id]->refcount) {
  812. pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
  813. kfree(pcc_data[pcc_ss_id]);
  814. pcc_data[pcc_ss_id] = NULL;
  815. }
  816. }
  817. }
  818. cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
  819. if (!cpc_ptr)
  820. return;
  821. /* Free all the mapped sys mem areas for this CPU */
  822. for (i = 2; i < cpc_ptr->num_entries; i++) {
  823. addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
  824. if (addr)
  825. iounmap(addr);
  826. }
  827. kobject_put(&cpc_ptr->kobj);
  828. kfree(cpc_ptr);
  829. }
  830. EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
  831. /**
  832. * cpc_read_ffh() - Read FFH register
  833. * @cpunum: CPU number to read
  834. * @reg: cppc register information
  835. * @val: place holder for return value
  836. *
  837. * Read bit_width bits from a specified address and bit_offset
  838. *
  839. * Return: 0 for success and error code
  840. */
  841. int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
  842. {
  843. return -ENOTSUPP;
  844. }
  845. /**
  846. * cpc_write_ffh() - Write FFH register
  847. * @cpunum: CPU number to write
  848. * @reg: cppc register information
  849. * @val: value to write
  850. *
  851. * Write value of bit_width bits to a specified address and bit_offset
  852. *
  853. * Return: 0 for success and error code
  854. */
  855. int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
  856. {
  857. return -ENOTSUPP;
  858. }
  859. /*
  860. * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
  861. * as fast as possible. We have already mapped the PCC subspace during init, so
  862. * we can directly write to it.
  863. */
  864. static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
  865. {
  866. void __iomem *vaddr = NULL;
  867. int size;
  868. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
  869. struct cpc_reg *reg = &reg_res->cpc_entry.reg;
  870. if (reg_res->type == ACPI_TYPE_INTEGER) {
  871. *val = reg_res->cpc_entry.int_value;
  872. return 0;
  873. }
  874. *val = 0;
  875. size = GET_BIT_WIDTH(reg);
  876. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
  877. u32 val_u32;
  878. acpi_status status;
  879. status = acpi_os_read_port((acpi_io_address)reg->address,
  880. &val_u32, size);
  881. if (ACPI_FAILURE(status)) {
  882. pr_debug("Error: Failed to read SystemIO port %llx\n",
  883. reg->address);
  884. return -EFAULT;
  885. }
  886. *val = val_u32;
  887. return 0;
  888. } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
  889. /*
  890. * For registers in PCC space, the register size is determined
  891. * by the bit width field; the access size is used to indicate
  892. * the PCC subspace id.
  893. */
  894. size = reg->bit_width;
  895. vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
  896. }
  897. else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  898. vaddr = reg_res->sys_mem_vaddr;
  899. else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
  900. return cpc_read_ffh(cpu, reg, val);
  901. else
  902. return acpi_os_read_memory((acpi_physical_address)reg->address,
  903. val, size);
  904. switch (size) {
  905. case 8:
  906. *val = readb_relaxed(vaddr);
  907. break;
  908. case 16:
  909. *val = readw_relaxed(vaddr);
  910. break;
  911. case 32:
  912. *val = readl_relaxed(vaddr);
  913. break;
  914. case 64:
  915. *val = readq_relaxed(vaddr);
  916. break;
  917. default:
  918. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
  919. pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
  920. size, reg->address);
  921. } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  922. pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
  923. size, pcc_ss_id);
  924. }
  925. return -EFAULT;
  926. }
  927. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  928. *val = MASK_VAL_READ(reg, *val);
  929. return 0;
  930. }
  931. static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
  932. {
  933. int ret_val = 0;
  934. int size;
  935. u64 prev_val;
  936. void __iomem *vaddr = NULL;
  937. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
  938. struct cpc_reg *reg = &reg_res->cpc_entry.reg;
  939. struct cpc_desc *cpc_desc;
  940. unsigned long flags;
  941. size = GET_BIT_WIDTH(reg);
  942. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
  943. acpi_status status;
  944. status = acpi_os_write_port((acpi_io_address)reg->address,
  945. (u32)val, size);
  946. if (ACPI_FAILURE(status)) {
  947. pr_debug("Error: Failed to write SystemIO port %llx\n",
  948. reg->address);
  949. return -EFAULT;
  950. }
  951. return 0;
  952. } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
  953. /*
  954. * For registers in PCC space, the register size is determined
  955. * by the bit width field; the access size is used to indicate
  956. * the PCC subspace id.
  957. */
  958. size = reg->bit_width;
  959. vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
  960. }
  961. else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  962. vaddr = reg_res->sys_mem_vaddr;
  963. else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
  964. return cpc_write_ffh(cpu, reg, val);
  965. else
  966. return acpi_os_write_memory((acpi_physical_address)reg->address,
  967. val, size);
  968. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
  969. cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  970. if (!cpc_desc) {
  971. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  972. return -ENODEV;
  973. }
  974. raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
  975. switch (size) {
  976. case 8:
  977. prev_val = readb_relaxed(vaddr);
  978. break;
  979. case 16:
  980. prev_val = readw_relaxed(vaddr);
  981. break;
  982. case 32:
  983. prev_val = readl_relaxed(vaddr);
  984. break;
  985. case 64:
  986. prev_val = readq_relaxed(vaddr);
  987. break;
  988. default:
  989. raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
  990. return -EFAULT;
  991. }
  992. val = MASK_VAL_WRITE(reg, prev_val, val);
  993. }
  994. switch (size) {
  995. case 8:
  996. writeb_relaxed(val, vaddr);
  997. break;
  998. case 16:
  999. writew_relaxed(val, vaddr);
  1000. break;
  1001. case 32:
  1002. writel_relaxed(val, vaddr);
  1003. break;
  1004. case 64:
  1005. writeq_relaxed(val, vaddr);
  1006. break;
  1007. default:
  1008. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
  1009. pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
  1010. size, reg->address);
  1011. } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  1012. pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
  1013. size, pcc_ss_id);
  1014. }
  1015. ret_val = -EFAULT;
  1016. break;
  1017. }
  1018. if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  1019. raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
  1020. return ret_val;
  1021. }
  1022. static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
  1023. {
  1024. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  1025. struct cpc_register_resource *reg;
  1026. if (!cpc_desc) {
  1027. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  1028. return -ENODEV;
  1029. }
  1030. reg = &cpc_desc->cpc_regs[reg_idx];
  1031. if (CPC_IN_PCC(reg)) {
  1032. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
  1033. struct cppc_pcc_data *pcc_ss_data = NULL;
  1034. int ret = 0;
  1035. if (pcc_ss_id < 0)
  1036. return -EIO;
  1037. pcc_ss_data = pcc_data[pcc_ss_id];
  1038. down_write(&pcc_ss_data->pcc_lock);
  1039. if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
  1040. cpc_read(cpunum, reg, perf);
  1041. else
  1042. ret = -EIO;
  1043. up_write(&pcc_ss_data->pcc_lock);
  1044. return ret;
  1045. }
  1046. cpc_read(cpunum, reg, perf);
  1047. return 0;
  1048. }
  1049. /**
  1050. * cppc_get_desired_perf - Get the desired performance register value.
  1051. * @cpunum: CPU from which to get desired performance.
  1052. * @desired_perf: Return address.
  1053. *
  1054. * Return: 0 for success, -EIO otherwise.
  1055. */
  1056. int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
  1057. {
  1058. return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
  1059. }
  1060. EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
  1061. /**
  1062. * cppc_get_nominal_perf - Get the nominal performance register value.
  1063. * @cpunum: CPU from which to get nominal performance.
  1064. * @nominal_perf: Return address.
  1065. *
  1066. * Return: 0 for success, -EIO otherwise.
  1067. */
  1068. int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
  1069. {
  1070. return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
  1071. }
  1072. /**
  1073. * cppc_get_highest_perf - Get the highest performance register value.
  1074. * @cpunum: CPU from which to get highest performance.
  1075. * @highest_perf: Return address.
  1076. *
  1077. * Return: 0 for success, -EIO otherwise.
  1078. */
  1079. int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
  1080. {
  1081. return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
  1082. }
  1083. EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
  1084. /**
  1085. * cppc_get_epp_perf - Get the epp register value.
  1086. * @cpunum: CPU from which to get epp preference value.
  1087. * @epp_perf: Return address.
  1088. *
  1089. * Return: 0 for success, -EIO otherwise.
  1090. */
  1091. int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
  1092. {
  1093. return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
  1094. }
  1095. EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
  1096. /**
  1097. * cppc_get_perf_caps - Get a CPU's performance capabilities.
  1098. * @cpunum: CPU from which to get capabilities info.
  1099. * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
  1100. *
  1101. * Return: 0 for success with perf_caps populated else -ERRNO.
  1102. */
  1103. int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
  1104. {
  1105. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  1106. struct cpc_register_resource *highest_reg, *lowest_reg,
  1107. *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
  1108. *low_freq_reg = NULL, *nom_freq_reg = NULL;
  1109. u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
  1110. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
  1111. struct cppc_pcc_data *pcc_ss_data = NULL;
  1112. int ret = 0, regs_in_pcc = 0;
  1113. if (!cpc_desc) {
  1114. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  1115. return -ENODEV;
  1116. }
  1117. highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
  1118. lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
  1119. lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
  1120. nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
  1121. low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
  1122. nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
  1123. guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
  1124. /* Are any of the regs PCC ?*/
  1125. if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
  1126. CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
  1127. CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
  1128. if (pcc_ss_id < 0) {
  1129. pr_debug("Invalid pcc_ss_id\n");
  1130. return -ENODEV;
  1131. }
  1132. pcc_ss_data = pcc_data[pcc_ss_id];
  1133. regs_in_pcc = 1;
  1134. down_write(&pcc_ss_data->pcc_lock);
  1135. /* Ring doorbell once to update PCC subspace */
  1136. if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
  1137. ret = -EIO;
  1138. goto out_err;
  1139. }
  1140. }
  1141. cpc_read(cpunum, highest_reg, &high);
  1142. perf_caps->highest_perf = high;
  1143. cpc_read(cpunum, lowest_reg, &low);
  1144. perf_caps->lowest_perf = low;
  1145. cpc_read(cpunum, nominal_reg, &nom);
  1146. perf_caps->nominal_perf = nom;
  1147. if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
  1148. IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
  1149. perf_caps->guaranteed_perf = 0;
  1150. } else {
  1151. cpc_read(cpunum, guaranteed_reg, &guaranteed);
  1152. perf_caps->guaranteed_perf = guaranteed;
  1153. }
  1154. cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
  1155. perf_caps->lowest_nonlinear_perf = min_nonlinear;
  1156. if (!high || !low || !nom || !min_nonlinear)
  1157. ret = -EFAULT;
  1158. /* Read optional lowest and nominal frequencies if present */
  1159. if (CPC_SUPPORTED(low_freq_reg))
  1160. cpc_read(cpunum, low_freq_reg, &low_f);
  1161. if (CPC_SUPPORTED(nom_freq_reg))
  1162. cpc_read(cpunum, nom_freq_reg, &nom_f);
  1163. perf_caps->lowest_freq = low_f;
  1164. perf_caps->nominal_freq = nom_f;
  1165. out_err:
  1166. if (regs_in_pcc)
  1167. up_write(&pcc_ss_data->pcc_lock);
  1168. return ret;
  1169. }
  1170. EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
  1171. /**
  1172. * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
  1173. *
  1174. * CPPC has flexibility about how CPU performance counters are accessed.
  1175. * One of the choices is PCC regions, which can have a high access latency. This
  1176. * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
  1177. *
  1178. * Return: true if any of the counters are in PCC regions, false otherwise
  1179. */
  1180. bool cppc_perf_ctrs_in_pcc(void)
  1181. {
  1182. int cpu;
  1183. for_each_present_cpu(cpu) {
  1184. struct cpc_register_resource *ref_perf_reg;
  1185. struct cpc_desc *cpc_desc;
  1186. cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  1187. if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
  1188. CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
  1189. CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
  1190. return true;
  1191. ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
  1192. /*
  1193. * If reference perf register is not supported then we should
  1194. * use the nominal perf value
  1195. */
  1196. if (!CPC_SUPPORTED(ref_perf_reg))
  1197. ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
  1198. if (CPC_IN_PCC(ref_perf_reg))
  1199. return true;
  1200. }
  1201. return false;
  1202. }
  1203. EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
  1204. /**
  1205. * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
  1206. * @cpunum: CPU from which to read counters.
  1207. * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
  1208. *
  1209. * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
  1210. */
  1211. int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
  1212. {
  1213. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  1214. struct cpc_register_resource *delivered_reg, *reference_reg,
  1215. *ref_perf_reg, *ctr_wrap_reg;
  1216. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
  1217. struct cppc_pcc_data *pcc_ss_data = NULL;
  1218. u64 delivered, reference, ref_perf, ctr_wrap_time;
  1219. int ret = 0, regs_in_pcc = 0;
  1220. if (!cpc_desc) {
  1221. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  1222. return -ENODEV;
  1223. }
  1224. delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
  1225. reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
  1226. ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
  1227. ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
  1228. /*
  1229. * If reference perf register is not supported then we should
  1230. * use the nominal perf value
  1231. */
  1232. if (!CPC_SUPPORTED(ref_perf_reg))
  1233. ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
  1234. /* Are any of the regs PCC ?*/
  1235. if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
  1236. CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
  1237. if (pcc_ss_id < 0) {
  1238. pr_debug("Invalid pcc_ss_id\n");
  1239. return -ENODEV;
  1240. }
  1241. pcc_ss_data = pcc_data[pcc_ss_id];
  1242. down_write(&pcc_ss_data->pcc_lock);
  1243. regs_in_pcc = 1;
  1244. /* Ring doorbell once to update PCC subspace */
  1245. if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
  1246. ret = -EIO;
  1247. goto out_err;
  1248. }
  1249. }
  1250. cpc_read(cpunum, delivered_reg, &delivered);
  1251. cpc_read(cpunum, reference_reg, &reference);
  1252. cpc_read(cpunum, ref_perf_reg, &ref_perf);
  1253. /*
  1254. * Per spec, if ctr_wrap_time optional register is unsupported, then the
  1255. * performance counters are assumed to never wrap during the lifetime of
  1256. * platform
  1257. */
  1258. ctr_wrap_time = (u64)(~((u64)0));
  1259. if (CPC_SUPPORTED(ctr_wrap_reg))
  1260. cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
  1261. if (!delivered || !reference || !ref_perf) {
  1262. ret = -EFAULT;
  1263. goto out_err;
  1264. }
  1265. perf_fb_ctrs->delivered = delivered;
  1266. perf_fb_ctrs->reference = reference;
  1267. perf_fb_ctrs->reference_perf = ref_perf;
  1268. perf_fb_ctrs->wraparound_time = ctr_wrap_time;
  1269. out_err:
  1270. if (regs_in_pcc)
  1271. up_write(&pcc_ss_data->pcc_lock);
  1272. return ret;
  1273. }
  1274. EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
  1275. /*
  1276. * Set Energy Performance Preference Register value through
  1277. * Performance Controls Interface
  1278. */
  1279. int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
  1280. {
  1281. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
  1282. struct cpc_register_resource *epp_set_reg;
  1283. struct cpc_register_resource *auto_sel_reg;
  1284. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  1285. struct cppc_pcc_data *pcc_ss_data = NULL;
  1286. int ret;
  1287. if (!cpc_desc) {
  1288. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  1289. return -ENODEV;
  1290. }
  1291. auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
  1292. epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
  1293. if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
  1294. if (pcc_ss_id < 0) {
  1295. pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
  1296. return -ENODEV;
  1297. }
  1298. if (CPC_SUPPORTED(auto_sel_reg)) {
  1299. ret = cpc_write(cpu, auto_sel_reg, enable);
  1300. if (ret)
  1301. return ret;
  1302. }
  1303. if (CPC_SUPPORTED(epp_set_reg)) {
  1304. ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
  1305. if (ret)
  1306. return ret;
  1307. }
  1308. pcc_ss_data = pcc_data[pcc_ss_id];
  1309. down_write(&pcc_ss_data->pcc_lock);
  1310. /* after writing CPC, transfer the ownership of PCC to platform */
  1311. ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
  1312. up_write(&pcc_ss_data->pcc_lock);
  1313. } else if (osc_cpc_flexible_adr_space_confirmed &&
  1314. CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
  1315. ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
  1316. } else {
  1317. ret = -ENOTSUPP;
  1318. pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
  1319. }
  1320. return ret;
  1321. }
  1322. EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
  1323. /**
  1324. * cppc_get_auto_sel_caps - Read autonomous selection register.
  1325. * @cpunum : CPU from which to read register.
  1326. * @perf_caps : struct where autonomous selection register value is updated.
  1327. */
  1328. int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
  1329. {
  1330. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  1331. struct cpc_register_resource *auto_sel_reg;
  1332. u64 auto_sel;
  1333. if (!cpc_desc) {
  1334. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  1335. return -ENODEV;
  1336. }
  1337. auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
  1338. if (!CPC_SUPPORTED(auto_sel_reg))
  1339. pr_warn_once("Autonomous mode is not unsupported!\n");
  1340. if (CPC_IN_PCC(auto_sel_reg)) {
  1341. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
  1342. struct cppc_pcc_data *pcc_ss_data = NULL;
  1343. int ret = 0;
  1344. if (pcc_ss_id < 0)
  1345. return -ENODEV;
  1346. pcc_ss_data = pcc_data[pcc_ss_id];
  1347. down_write(&pcc_ss_data->pcc_lock);
  1348. if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
  1349. cpc_read(cpunum, auto_sel_reg, &auto_sel);
  1350. perf_caps->auto_sel = (bool)auto_sel;
  1351. } else {
  1352. ret = -EIO;
  1353. }
  1354. up_write(&pcc_ss_data->pcc_lock);
  1355. return ret;
  1356. }
  1357. return 0;
  1358. }
  1359. EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
  1360. /**
  1361. * cppc_set_auto_sel - Write autonomous selection register.
  1362. * @cpu : CPU to which to write register.
  1363. * @enable : the desired value of autonomous selection resiter to be updated.
  1364. */
  1365. int cppc_set_auto_sel(int cpu, bool enable)
  1366. {
  1367. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
  1368. struct cpc_register_resource *auto_sel_reg;
  1369. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  1370. struct cppc_pcc_data *pcc_ss_data = NULL;
  1371. int ret = -EINVAL;
  1372. if (!cpc_desc) {
  1373. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  1374. return -ENODEV;
  1375. }
  1376. auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
  1377. if (CPC_IN_PCC(auto_sel_reg)) {
  1378. if (pcc_ss_id < 0) {
  1379. pr_debug("Invalid pcc_ss_id\n");
  1380. return -ENODEV;
  1381. }
  1382. if (CPC_SUPPORTED(auto_sel_reg)) {
  1383. ret = cpc_write(cpu, auto_sel_reg, enable);
  1384. if (ret)
  1385. return ret;
  1386. }
  1387. pcc_ss_data = pcc_data[pcc_ss_id];
  1388. down_write(&pcc_ss_data->pcc_lock);
  1389. /* after writing CPC, transfer the ownership of PCC to platform */
  1390. ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
  1391. up_write(&pcc_ss_data->pcc_lock);
  1392. } else {
  1393. ret = -ENOTSUPP;
  1394. pr_debug("_CPC in PCC is not supported\n");
  1395. }
  1396. return ret;
  1397. }
  1398. EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
  1399. /**
  1400. * cppc_set_enable - Set to enable CPPC on the processor by writing the
  1401. * Continuous Performance Control package EnableRegister field.
  1402. * @cpu: CPU for which to enable CPPC register.
  1403. * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
  1404. *
  1405. * Return: 0 for success, -ERRNO or -EIO otherwise.
  1406. */
  1407. int cppc_set_enable(int cpu, bool enable)
  1408. {
  1409. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
  1410. struct cpc_register_resource *enable_reg;
  1411. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  1412. struct cppc_pcc_data *pcc_ss_data = NULL;
  1413. int ret = -EINVAL;
  1414. if (!cpc_desc) {
  1415. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  1416. return -EINVAL;
  1417. }
  1418. enable_reg = &cpc_desc->cpc_regs[ENABLE];
  1419. if (CPC_IN_PCC(enable_reg)) {
  1420. if (pcc_ss_id < 0)
  1421. return -EIO;
  1422. ret = cpc_write(cpu, enable_reg, enable);
  1423. if (ret)
  1424. return ret;
  1425. pcc_ss_data = pcc_data[pcc_ss_id];
  1426. down_write(&pcc_ss_data->pcc_lock);
  1427. /* after writing CPC, transfer the ownership of PCC to platfrom */
  1428. ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
  1429. up_write(&pcc_ss_data->pcc_lock);
  1430. return ret;
  1431. }
  1432. return cpc_write(cpu, enable_reg, enable);
  1433. }
  1434. EXPORT_SYMBOL_GPL(cppc_set_enable);
  1435. /**
  1436. * cppc_set_perf - Set a CPU's performance controls.
  1437. * @cpu: CPU for which to set performance controls.
  1438. * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
  1439. *
  1440. * Return: 0 for success, -ERRNO otherwise.
  1441. */
  1442. int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
  1443. {
  1444. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  1445. struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
  1446. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
  1447. struct cppc_pcc_data *pcc_ss_data = NULL;
  1448. int ret = 0;
  1449. if (!cpc_desc) {
  1450. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  1451. return -ENODEV;
  1452. }
  1453. desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
  1454. min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
  1455. max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
  1456. /*
  1457. * This is Phase-I where we want to write to CPC registers
  1458. * -> We want all CPUs to be able to execute this phase in parallel
  1459. *
  1460. * Since read_lock can be acquired by multiple CPUs simultaneously we
  1461. * achieve that goal here
  1462. */
  1463. if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
  1464. if (pcc_ss_id < 0) {
  1465. pr_debug("Invalid pcc_ss_id\n");
  1466. return -ENODEV;
  1467. }
  1468. pcc_ss_data = pcc_data[pcc_ss_id];
  1469. down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
  1470. if (pcc_ss_data->platform_owns_pcc) {
  1471. ret = check_pcc_chan(pcc_ss_id, false);
  1472. if (ret) {
  1473. up_read(&pcc_ss_data->pcc_lock);
  1474. return ret;
  1475. }
  1476. }
  1477. /*
  1478. * Update the pending_write to make sure a PCC CMD_READ will not
  1479. * arrive and steal the channel during the switch to write lock
  1480. */
  1481. pcc_ss_data->pending_pcc_write_cmd = true;
  1482. cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
  1483. cpc_desc->write_cmd_status = 0;
  1484. }
  1485. cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
  1486. /*
  1487. * Only write if min_perf and max_perf not zero. Some drivers pass zero
  1488. * value to min and max perf, but they don't mean to set the zero value,
  1489. * they just don't want to write to those registers.
  1490. */
  1491. if (perf_ctrls->min_perf)
  1492. cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
  1493. if (perf_ctrls->max_perf)
  1494. cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
  1495. if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
  1496. up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
  1497. /*
  1498. * This is Phase-II where we transfer the ownership of PCC to Platform
  1499. *
  1500. * Short Summary: Basically if we think of a group of cppc_set_perf
  1501. * requests that happened in short overlapping interval. The last CPU to
  1502. * come out of Phase-I will enter Phase-II and ring the doorbell.
  1503. *
  1504. * We have the following requirements for Phase-II:
  1505. * 1. We want to execute Phase-II only when there are no CPUs
  1506. * currently executing in Phase-I
  1507. * 2. Once we start Phase-II we want to avoid all other CPUs from
  1508. * entering Phase-I.
  1509. * 3. We want only one CPU among all those who went through Phase-I
  1510. * to run phase-II
  1511. *
  1512. * If write_trylock fails to get the lock and doesn't transfer the
  1513. * PCC ownership to the platform, then one of the following will be TRUE
  1514. * 1. There is at-least one CPU in Phase-I which will later execute
  1515. * write_trylock, so the CPUs in Phase-I will be responsible for
  1516. * executing the Phase-II.
  1517. * 2. Some other CPU has beaten this CPU to successfully execute the
  1518. * write_trylock and has already acquired the write_lock. We know for a
  1519. * fact it (other CPU acquiring the write_lock) couldn't have happened
  1520. * before this CPU's Phase-I as we held the read_lock.
  1521. * 3. Some other CPU executing pcc CMD_READ has stolen the
  1522. * down_write, in which case, send_pcc_cmd will check for pending
  1523. * CMD_WRITE commands by checking the pending_pcc_write_cmd.
  1524. * So this CPU can be certain that its request will be delivered
  1525. * So in all cases, this CPU knows that its request will be delivered
  1526. * by another CPU and can return
  1527. *
  1528. * After getting the down_write we still need to check for
  1529. * pending_pcc_write_cmd to take care of the following scenario
  1530. * The thread running this code could be scheduled out between
  1531. * Phase-I and Phase-II. Before it is scheduled back on, another CPU
  1532. * could have delivered the request to Platform by triggering the
  1533. * doorbell and transferred the ownership of PCC to platform. So this
  1534. * avoids triggering an unnecessary doorbell and more importantly before
  1535. * triggering the doorbell it makes sure that the PCC channel ownership
  1536. * is still with OSPM.
  1537. * pending_pcc_write_cmd can also be cleared by a different CPU, if
  1538. * there was a pcc CMD_READ waiting on down_write and it steals the lock
  1539. * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
  1540. * case during a CMD_READ and if there are pending writes it delivers
  1541. * the write command before servicing the read command
  1542. */
  1543. if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
  1544. if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
  1545. /* Update only if there are pending write commands */
  1546. if (pcc_ss_data->pending_pcc_write_cmd)
  1547. send_pcc_cmd(pcc_ss_id, CMD_WRITE);
  1548. up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
  1549. } else
  1550. /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
  1551. wait_event(pcc_ss_data->pcc_write_wait_q,
  1552. cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
  1553. /* send_pcc_cmd updates the status in case of failure */
  1554. ret = cpc_desc->write_cmd_status;
  1555. }
  1556. return ret;
  1557. }
  1558. EXPORT_SYMBOL_GPL(cppc_set_perf);
  1559. /**
  1560. * cppc_get_transition_latency - returns frequency transition latency in ns
  1561. * @cpu_num: CPU number for per_cpu().
  1562. *
  1563. * ACPI CPPC does not explicitly specify how a platform can specify the
  1564. * transition latency for performance change requests. The closest we have
  1565. * is the timing information from the PCCT tables which provides the info
  1566. * on the number and frequency of PCC commands the platform can handle.
  1567. *
  1568. * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
  1569. * then assume there is no latency.
  1570. */
  1571. unsigned int cppc_get_transition_latency(int cpu_num)
  1572. {
  1573. /*
  1574. * Expected transition latency is based on the PCCT timing values
  1575. * Below are definition from ACPI spec:
  1576. * pcc_nominal- Expected latency to process a command, in microseconds
  1577. * pcc_mpar - The maximum number of periodic requests that the subspace
  1578. * channel can support, reported in commands per minute. 0
  1579. * indicates no limitation.
  1580. * pcc_mrtt - The minimum amount of time that OSPM must wait after the
  1581. * completion of a command before issuing the next command,
  1582. * in microseconds.
  1583. */
  1584. unsigned int latency_ns = 0;
  1585. struct cpc_desc *cpc_desc;
  1586. struct cpc_register_resource *desired_reg;
  1587. int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
  1588. struct cppc_pcc_data *pcc_ss_data;
  1589. cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
  1590. if (!cpc_desc)
  1591. return CPUFREQ_ETERNAL;
  1592. desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
  1593. if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
  1594. return 0;
  1595. else if (!CPC_IN_PCC(desired_reg))
  1596. return CPUFREQ_ETERNAL;
  1597. if (pcc_ss_id < 0)
  1598. return CPUFREQ_ETERNAL;
  1599. pcc_ss_data = pcc_data[pcc_ss_id];
  1600. if (pcc_ss_data->pcc_mpar)
  1601. latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
  1602. latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
  1603. latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
  1604. return latency_ns;
  1605. }
  1606. EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
  1607. /* Minimum struct length needed for the DMI processor entry we want */
  1608. #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
  1609. /* Offset in the DMI processor structure for the max frequency */
  1610. #define DMI_PROCESSOR_MAX_SPEED 0x14
  1611. /* Callback function used to retrieve the max frequency from DMI */
  1612. static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
  1613. {
  1614. const u8 *dmi_data = (const u8 *)dm;
  1615. u16 *mhz = (u16 *)private;
  1616. if (dm->type == DMI_ENTRY_PROCESSOR &&
  1617. dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
  1618. u16 val = (u16)get_unaligned((const u16 *)
  1619. (dmi_data + DMI_PROCESSOR_MAX_SPEED));
  1620. *mhz = umax(val, *mhz);
  1621. }
  1622. }
  1623. /* Look up the max frequency in DMI */
  1624. static u64 cppc_get_dmi_max_khz(void)
  1625. {
  1626. u16 mhz = 0;
  1627. dmi_walk(cppc_find_dmi_mhz, &mhz);
  1628. /*
  1629. * Real stupid fallback value, just in case there is no
  1630. * actual value set.
  1631. */
  1632. mhz = mhz ? mhz : 1;
  1633. return KHZ_PER_MHZ * mhz;
  1634. }
  1635. /*
  1636. * If CPPC lowest_freq and nominal_freq registers are exposed then we can
  1637. * use them to convert perf to freq and vice versa. The conversion is
  1638. * extrapolated as an affine function passing by the 2 points:
  1639. * - (Low perf, Low freq)
  1640. * - (Nominal perf, Nominal freq)
  1641. */
  1642. unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
  1643. {
  1644. s64 retval, offset = 0;
  1645. static u64 max_khz;
  1646. u64 mul, div;
  1647. if (caps->lowest_freq && caps->nominal_freq) {
  1648. /* Avoid special case when nominal_freq is equal to lowest_freq */
  1649. if (caps->lowest_freq == caps->nominal_freq) {
  1650. mul = caps->nominal_freq;
  1651. div = caps->nominal_perf;
  1652. } else {
  1653. mul = caps->nominal_freq - caps->lowest_freq;
  1654. div = caps->nominal_perf - caps->lowest_perf;
  1655. }
  1656. mul *= KHZ_PER_MHZ;
  1657. offset = caps->nominal_freq * KHZ_PER_MHZ -
  1658. div64_u64(caps->nominal_perf * mul, div);
  1659. } else {
  1660. if (!max_khz)
  1661. max_khz = cppc_get_dmi_max_khz();
  1662. mul = max_khz;
  1663. div = caps->highest_perf;
  1664. }
  1665. retval = offset + div64_u64(perf * mul, div);
  1666. if (retval >= 0)
  1667. return retval;
  1668. return 0;
  1669. }
  1670. EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
  1671. unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
  1672. {
  1673. s64 retval, offset = 0;
  1674. static u64 max_khz;
  1675. u64 mul, div;
  1676. if (caps->lowest_freq && caps->nominal_freq) {
  1677. /* Avoid special case when nominal_freq is equal to lowest_freq */
  1678. if (caps->lowest_freq == caps->nominal_freq) {
  1679. mul = caps->nominal_perf;
  1680. div = caps->nominal_freq;
  1681. } else {
  1682. mul = caps->nominal_perf - caps->lowest_perf;
  1683. div = caps->nominal_freq - caps->lowest_freq;
  1684. }
  1685. /*
  1686. * We don't need to convert to kHz for computing offset and can
  1687. * directly use nominal_freq and lowest_freq as the div64_u64
  1688. * will remove the frequency unit.
  1689. */
  1690. offset = caps->nominal_perf -
  1691. div64_u64(caps->nominal_freq * mul, div);
  1692. /* But we need it for computing the perf level. */
  1693. div *= KHZ_PER_MHZ;
  1694. } else {
  1695. if (!max_khz)
  1696. max_khz = cppc_get_dmi_max_khz();
  1697. mul = caps->highest_perf;
  1698. div = max_khz;
  1699. }
  1700. retval = offset + div64_u64(freq * mul, div);
  1701. if (retval >= 0)
  1702. return retval;
  1703. return 0;
  1704. }
  1705. EXPORT_SYMBOL_GPL(cppc_khz_to_perf);