hpet.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/clockchips.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/export.h>
  5. #include <linux/delay.h>
  6. #include <linux/hpet.h>
  7. #include <linux/cpu.h>
  8. #include <linux/irq.h>
  9. #include <asm/irq_remapping.h>
  10. #include <asm/hpet.h>
  11. #include <asm/time.h>
  12. #include <asm/mwait.h>
  13. #undef pr_fmt
  14. #define pr_fmt(fmt) "hpet: " fmt
  15. enum hpet_mode {
  16. HPET_MODE_UNUSED,
  17. HPET_MODE_LEGACY,
  18. HPET_MODE_CLOCKEVT,
  19. HPET_MODE_DEVICE,
  20. };
  21. struct hpet_channel {
  22. struct clock_event_device evt;
  23. unsigned int num;
  24. unsigned int cpu;
  25. unsigned int irq;
  26. unsigned int in_use;
  27. enum hpet_mode mode;
  28. unsigned int boot_cfg;
  29. char name[10];
  30. };
  31. struct hpet_base {
  32. unsigned int nr_channels;
  33. unsigned int nr_clockevents;
  34. unsigned int boot_cfg;
  35. struct hpet_channel *channels;
  36. };
  37. #define HPET_MASK CLOCKSOURCE_MASK(32)
  38. #define HPET_MIN_CYCLES 128
  39. #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
  40. /*
  41. * HPET address is set in acpi/boot.c, when an ACPI entry exists
  42. */
  43. unsigned long hpet_address;
  44. u8 hpet_blockid; /* OS timer block num */
  45. bool hpet_msi_disable;
  46. #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ)
  47. static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel);
  48. static struct irq_domain *hpet_domain;
  49. #endif
  50. static void __iomem *hpet_virt_address;
  51. static struct hpet_base hpet_base;
  52. static bool hpet_legacy_int_enabled;
  53. static unsigned long hpet_freq;
  54. bool boot_hpet_disable;
  55. bool hpet_force_user;
  56. static bool hpet_verbose;
  57. static inline
  58. struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt)
  59. {
  60. return container_of(evt, struct hpet_channel, evt);
  61. }
  62. inline unsigned int hpet_readl(unsigned int a)
  63. {
  64. return readl(hpet_virt_address + a);
  65. }
  66. static inline void hpet_writel(unsigned int d, unsigned int a)
  67. {
  68. writel(d, hpet_virt_address + a);
  69. }
  70. static inline void hpet_set_mapping(void)
  71. {
  72. hpet_virt_address = ioremap(hpet_address, HPET_MMAP_SIZE);
  73. }
  74. static inline void hpet_clear_mapping(void)
  75. {
  76. iounmap(hpet_virt_address);
  77. hpet_virt_address = NULL;
  78. }
  79. /*
  80. * HPET command line enable / disable
  81. */
  82. static int __init hpet_setup(char *str)
  83. {
  84. while (str) {
  85. char *next = strchr(str, ',');
  86. if (next)
  87. *next++ = 0;
  88. if (!strncmp("disable", str, 7))
  89. boot_hpet_disable = true;
  90. if (!strncmp("force", str, 5))
  91. hpet_force_user = true;
  92. if (!strncmp("verbose", str, 7))
  93. hpet_verbose = true;
  94. str = next;
  95. }
  96. return 1;
  97. }
  98. __setup("hpet=", hpet_setup);
  99. static int __init disable_hpet(char *str)
  100. {
  101. boot_hpet_disable = true;
  102. return 1;
  103. }
  104. __setup("nohpet", disable_hpet);
  105. static inline int is_hpet_capable(void)
  106. {
  107. return !boot_hpet_disable && hpet_address;
  108. }
  109. /**
  110. * is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled
  111. */
  112. int is_hpet_enabled(void)
  113. {
  114. return is_hpet_capable() && hpet_legacy_int_enabled;
  115. }
  116. EXPORT_SYMBOL_GPL(is_hpet_enabled);
  117. static void _hpet_print_config(const char *function, int line)
  118. {
  119. u32 i, id, period, cfg, status, channels, l, h;
  120. pr_info("%s(%d):\n", function, line);
  121. id = hpet_readl(HPET_ID);
  122. period = hpet_readl(HPET_PERIOD);
  123. pr_info("ID: 0x%x, PERIOD: 0x%x\n", id, period);
  124. cfg = hpet_readl(HPET_CFG);
  125. status = hpet_readl(HPET_STATUS);
  126. pr_info("CFG: 0x%x, STATUS: 0x%x\n", cfg, status);
  127. l = hpet_readl(HPET_COUNTER);
  128. h = hpet_readl(HPET_COUNTER+4);
  129. pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
  130. channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  131. for (i = 0; i < channels; i++) {
  132. l = hpet_readl(HPET_Tn_CFG(i));
  133. h = hpet_readl(HPET_Tn_CFG(i)+4);
  134. pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h);
  135. l = hpet_readl(HPET_Tn_CMP(i));
  136. h = hpet_readl(HPET_Tn_CMP(i)+4);
  137. pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h);
  138. l = hpet_readl(HPET_Tn_ROUTE(i));
  139. h = hpet_readl(HPET_Tn_ROUTE(i)+4);
  140. pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h);
  141. }
  142. }
  143. #define hpet_print_config() \
  144. do { \
  145. if (hpet_verbose) \
  146. _hpet_print_config(__func__, __LINE__); \
  147. } while (0)
  148. /*
  149. * When the HPET driver (/dev/hpet) is enabled, we need to reserve
  150. * timer 0 and timer 1 in case of RTC emulation.
  151. */
  152. #ifdef CONFIG_HPET
  153. static void __init hpet_reserve_platform_timers(void)
  154. {
  155. struct hpet_data hd;
  156. unsigned int i;
  157. memset(&hd, 0, sizeof(hd));
  158. hd.hd_phys_address = hpet_address;
  159. hd.hd_address = hpet_virt_address;
  160. hd.hd_nirqs = hpet_base.nr_channels;
  161. /*
  162. * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
  163. * is wrong for i8259!) not the output IRQ. Many BIOS writers
  164. * don't bother configuring *any* comparator interrupts.
  165. */
  166. hd.hd_irq[0] = HPET_LEGACY_8254;
  167. hd.hd_irq[1] = HPET_LEGACY_RTC;
  168. for (i = 0; i < hpet_base.nr_channels; i++) {
  169. struct hpet_channel *hc = hpet_base.channels + i;
  170. if (i >= 2)
  171. hd.hd_irq[i] = hc->irq;
  172. switch (hc->mode) {
  173. case HPET_MODE_UNUSED:
  174. case HPET_MODE_DEVICE:
  175. hc->mode = HPET_MODE_DEVICE;
  176. break;
  177. case HPET_MODE_CLOCKEVT:
  178. case HPET_MODE_LEGACY:
  179. hpet_reserve_timer(&hd, hc->num);
  180. break;
  181. }
  182. }
  183. hpet_alloc(&hd);
  184. }
  185. static void __init hpet_select_device_channel(void)
  186. {
  187. int i;
  188. for (i = 0; i < hpet_base.nr_channels; i++) {
  189. struct hpet_channel *hc = hpet_base.channels + i;
  190. /* Associate the first unused channel to /dev/hpet */
  191. if (hc->mode == HPET_MODE_UNUSED) {
  192. hc->mode = HPET_MODE_DEVICE;
  193. return;
  194. }
  195. }
  196. }
  197. #else
  198. static inline void hpet_reserve_platform_timers(void) { }
  199. static inline void hpet_select_device_channel(void) {}
  200. #endif
  201. /* Common HPET functions */
  202. static void hpet_stop_counter(void)
  203. {
  204. u32 cfg = hpet_readl(HPET_CFG);
  205. cfg &= ~HPET_CFG_ENABLE;
  206. hpet_writel(cfg, HPET_CFG);
  207. }
  208. static void hpet_reset_counter(void)
  209. {
  210. hpet_writel(0, HPET_COUNTER);
  211. hpet_writel(0, HPET_COUNTER + 4);
  212. }
  213. static void hpet_start_counter(void)
  214. {
  215. unsigned int cfg = hpet_readl(HPET_CFG);
  216. cfg |= HPET_CFG_ENABLE;
  217. hpet_writel(cfg, HPET_CFG);
  218. }
  219. static void hpet_restart_counter(void)
  220. {
  221. hpet_stop_counter();
  222. hpet_reset_counter();
  223. hpet_start_counter();
  224. }
  225. static void hpet_resume_device(void)
  226. {
  227. force_hpet_resume();
  228. }
  229. static void hpet_resume_counter(struct clocksource *cs)
  230. {
  231. hpet_resume_device();
  232. hpet_restart_counter();
  233. }
  234. static void hpet_enable_legacy_int(void)
  235. {
  236. unsigned int cfg = hpet_readl(HPET_CFG);
  237. cfg |= HPET_CFG_LEGACY;
  238. hpet_writel(cfg, HPET_CFG);
  239. hpet_legacy_int_enabled = true;
  240. }
  241. static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt)
  242. {
  243. unsigned int channel = clockevent_to_channel(evt)->num;
  244. unsigned int cfg, cmp, now;
  245. uint64_t delta;
  246. hpet_stop_counter();
  247. delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult;
  248. delta >>= evt->shift;
  249. now = hpet_readl(HPET_COUNTER);
  250. cmp = now + (unsigned int)delta;
  251. cfg = hpet_readl(HPET_Tn_CFG(channel));
  252. cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
  253. HPET_TN_32BIT;
  254. hpet_writel(cfg, HPET_Tn_CFG(channel));
  255. hpet_writel(cmp, HPET_Tn_CMP(channel));
  256. udelay(1);
  257. /*
  258. * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
  259. * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
  260. * bit is automatically cleared after the first write.
  261. * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
  262. * Publication # 24674)
  263. */
  264. hpet_writel((unsigned int)delta, HPET_Tn_CMP(channel));
  265. hpet_start_counter();
  266. hpet_print_config();
  267. return 0;
  268. }
  269. static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt)
  270. {
  271. unsigned int channel = clockevent_to_channel(evt)->num;
  272. unsigned int cfg;
  273. cfg = hpet_readl(HPET_Tn_CFG(channel));
  274. cfg &= ~HPET_TN_PERIODIC;
  275. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  276. hpet_writel(cfg, HPET_Tn_CFG(channel));
  277. return 0;
  278. }
  279. static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt)
  280. {
  281. unsigned int channel = clockevent_to_channel(evt)->num;
  282. unsigned int cfg;
  283. cfg = hpet_readl(HPET_Tn_CFG(channel));
  284. cfg &= ~HPET_TN_ENABLE;
  285. hpet_writel(cfg, HPET_Tn_CFG(channel));
  286. return 0;
  287. }
  288. static int hpet_clkevt_legacy_resume(struct clock_event_device *evt)
  289. {
  290. hpet_enable_legacy_int();
  291. hpet_print_config();
  292. return 0;
  293. }
  294. static int
  295. hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt)
  296. {
  297. unsigned int channel = clockevent_to_channel(evt)->num;
  298. u32 cnt;
  299. s32 res;
  300. cnt = hpet_readl(HPET_COUNTER);
  301. cnt += (u32) delta;
  302. hpet_writel(cnt, HPET_Tn_CMP(channel));
  303. /*
  304. * HPETs are a complete disaster. The compare register is
  305. * based on a equal comparison and neither provides a less
  306. * than or equal functionality (which would require to take
  307. * the wraparound into account) nor a simple count down event
  308. * mode. Further the write to the comparator register is
  309. * delayed internally up to two HPET clock cycles in certain
  310. * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
  311. * longer delays. We worked around that by reading back the
  312. * compare register, but that required another workaround for
  313. * ICH9,10 chips where the first readout after write can
  314. * return the old stale value. We already had a minimum
  315. * programming delta of 5us enforced, but a NMI or SMI hitting
  316. * between the counter readout and the comparator write can
  317. * move us behind that point easily. Now instead of reading
  318. * the compare register back several times, we make the ETIME
  319. * decision based on the following: Return ETIME if the
  320. * counter value after the write is less than HPET_MIN_CYCLES
  321. * away from the event or if the counter is already ahead of
  322. * the event. The minimum programming delta for the generic
  323. * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
  324. */
  325. res = (s32)(cnt - hpet_readl(HPET_COUNTER));
  326. return res < HPET_MIN_CYCLES ? -ETIME : 0;
  327. }
  328. static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating)
  329. {
  330. struct clock_event_device *evt = &hc->evt;
  331. evt->rating = rating;
  332. evt->irq = hc->irq;
  333. evt->name = hc->name;
  334. evt->cpumask = cpumask_of(hc->cpu);
  335. evt->set_state_oneshot = hpet_clkevt_set_state_oneshot;
  336. evt->set_next_event = hpet_clkevt_set_next_event;
  337. evt->set_state_shutdown = hpet_clkevt_set_state_shutdown;
  338. evt->features = CLOCK_EVT_FEAT_ONESHOT;
  339. if (hc->boot_cfg & HPET_TN_PERIODIC) {
  340. evt->features |= CLOCK_EVT_FEAT_PERIODIC;
  341. evt->set_state_periodic = hpet_clkevt_set_state_periodic;
  342. }
  343. }
  344. static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
  345. {
  346. /*
  347. * Start HPET with the boot CPU's cpumask and make it global after
  348. * the IO_APIC has been initialized.
  349. */
  350. hc->cpu = boot_cpu_data.cpu_index;
  351. strscpy(hc->name, "hpet", sizeof(hc->name));
  352. hpet_init_clockevent(hc, 50);
  353. hc->evt.tick_resume = hpet_clkevt_legacy_resume;
  354. /*
  355. * Legacy horrors and sins from the past. HPET used periodic mode
  356. * unconditionally forever on the legacy channel 0. Removing the
  357. * below hack and using the conditional in hpet_init_clockevent()
  358. * makes at least Qemu and one hardware machine fail to boot.
  359. * There are two issues which cause the boot failure:
  360. *
  361. * #1 After the timer delivery test in IOAPIC and the IOAPIC setup
  362. * the next interrupt is not delivered despite the HPET channel
  363. * being programmed correctly. Reprogramming the HPET after
  364. * switching to IOAPIC makes it work again. After fixing this,
  365. * the next issue surfaces:
  366. *
  367. * #2 Due to the unconditional periodic mode availability the Local
  368. * APIC timer calibration can hijack the global clockevents
  369. * event handler without causing damage. Using oneshot at this
  370. * stage makes if hang because the HPET does not get
  371. * reprogrammed due to the handler hijacking. Duh, stupid me!
  372. *
  373. * Both issues require major surgery and especially the kick HPET
  374. * again after enabling IOAPIC results in really nasty hackery.
  375. * This 'assume periodic works' magic has survived since HPET
  376. * support got added, so it's questionable whether this should be
  377. * fixed. Both Qemu and the failing hardware machine support
  378. * periodic mode despite the fact that both don't advertise it in
  379. * the configuration register and both need that extra kick after
  380. * switching to IOAPIC. Seems to be a feature...
  381. */
  382. hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC;
  383. hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic;
  384. /* Start HPET legacy interrupts */
  385. hpet_enable_legacy_int();
  386. clockevents_config_and_register(&hc->evt, hpet_freq,
  387. HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
  388. global_clock_event = &hc->evt;
  389. pr_debug("Clockevent registered\n");
  390. }
  391. /*
  392. * HPET MSI Support
  393. */
  394. #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ)
  395. static void hpet_msi_unmask(struct irq_data *data)
  396. {
  397. struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
  398. unsigned int cfg;
  399. cfg = hpet_readl(HPET_Tn_CFG(hc->num));
  400. cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
  401. hpet_writel(cfg, HPET_Tn_CFG(hc->num));
  402. }
  403. static void hpet_msi_mask(struct irq_data *data)
  404. {
  405. struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
  406. unsigned int cfg;
  407. cfg = hpet_readl(HPET_Tn_CFG(hc->num));
  408. cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
  409. hpet_writel(cfg, HPET_Tn_CFG(hc->num));
  410. }
  411. static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg)
  412. {
  413. hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num));
  414. hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4);
  415. }
  416. static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
  417. {
  418. hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
  419. }
  420. static struct irq_chip hpet_msi_controller __ro_after_init = {
  421. .name = "HPET-MSI",
  422. .irq_unmask = hpet_msi_unmask,
  423. .irq_mask = hpet_msi_mask,
  424. .irq_ack = irq_chip_ack_parent,
  425. .irq_set_affinity = msi_domain_set_affinity,
  426. .irq_retrigger = irq_chip_retrigger_hierarchy,
  427. .irq_write_msi_msg = hpet_msi_write_msg,
  428. .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
  429. };
  430. static int hpet_msi_init(struct irq_domain *domain,
  431. struct msi_domain_info *info, unsigned int virq,
  432. irq_hw_number_t hwirq, msi_alloc_info_t *arg)
  433. {
  434. irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
  435. irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL,
  436. handle_edge_irq, arg->data, "edge");
  437. return 0;
  438. }
  439. static void hpet_msi_free(struct irq_domain *domain,
  440. struct msi_domain_info *info, unsigned int virq)
  441. {
  442. irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
  443. }
  444. static struct msi_domain_ops hpet_msi_domain_ops = {
  445. .msi_init = hpet_msi_init,
  446. .msi_free = hpet_msi_free,
  447. };
  448. static struct msi_domain_info hpet_msi_domain_info = {
  449. .ops = &hpet_msi_domain_ops,
  450. .chip = &hpet_msi_controller,
  451. .flags = MSI_FLAG_USE_DEF_DOM_OPS,
  452. };
  453. static struct irq_domain *hpet_create_irq_domain(int hpet_id)
  454. {
  455. struct msi_domain_info *domain_info;
  456. struct irq_domain *parent, *d;
  457. struct fwnode_handle *fn;
  458. struct irq_fwspec fwspec;
  459. if (x86_vector_domain == NULL)
  460. return NULL;
  461. domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL);
  462. if (!domain_info)
  463. return NULL;
  464. *domain_info = hpet_msi_domain_info;
  465. domain_info->data = (void *)(long)hpet_id;
  466. fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name,
  467. hpet_id);
  468. if (!fn) {
  469. kfree(domain_info);
  470. return NULL;
  471. }
  472. fwspec.fwnode = fn;
  473. fwspec.param_count = 1;
  474. fwspec.param[0] = hpet_id;
  475. parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_GENERIC_MSI);
  476. if (!parent) {
  477. irq_domain_free_fwnode(fn);
  478. kfree(domain_info);
  479. return NULL;
  480. }
  481. if (parent != x86_vector_domain)
  482. hpet_msi_controller.name = "IR-HPET-MSI";
  483. d = msi_create_irq_domain(fn, domain_info, parent);
  484. if (!d) {
  485. irq_domain_free_fwnode(fn);
  486. kfree(domain_info);
  487. }
  488. return d;
  489. }
  490. static inline int hpet_dev_id(struct irq_domain *domain)
  491. {
  492. struct msi_domain_info *info = msi_get_domain_info(domain);
  493. return (int)(long)info->data;
  494. }
  495. static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc,
  496. int dev_num)
  497. {
  498. struct irq_alloc_info info;
  499. init_irq_alloc_info(&info, NULL);
  500. info.type = X86_IRQ_ALLOC_TYPE_HPET;
  501. info.data = hc;
  502. info.devid = hpet_dev_id(domain);
  503. info.hwirq = dev_num;
  504. return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
  505. }
  506. static int hpet_clkevt_msi_resume(struct clock_event_device *evt)
  507. {
  508. struct hpet_channel *hc = clockevent_to_channel(evt);
  509. struct irq_data *data = irq_get_irq_data(hc->irq);
  510. struct msi_msg msg;
  511. /* Restore the MSI msg and unmask the interrupt */
  512. irq_chip_compose_msi_msg(data, &msg);
  513. hpet_msi_write(hc, &msg);
  514. hpet_msi_unmask(data);
  515. return 0;
  516. }
  517. static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data)
  518. {
  519. struct hpet_channel *hc = data;
  520. struct clock_event_device *evt = &hc->evt;
  521. if (!evt->event_handler) {
  522. pr_info("Spurious interrupt HPET channel %d\n", hc->num);
  523. return IRQ_HANDLED;
  524. }
  525. evt->event_handler(evt);
  526. return IRQ_HANDLED;
  527. }
  528. static int hpet_setup_msi_irq(struct hpet_channel *hc)
  529. {
  530. if (request_irq(hc->irq, hpet_msi_interrupt_handler,
  531. IRQF_TIMER | IRQF_NOBALANCING,
  532. hc->name, hc))
  533. return -1;
  534. disable_irq(hc->irq);
  535. irq_set_affinity(hc->irq, cpumask_of(hc->cpu));
  536. enable_irq(hc->irq);
  537. pr_debug("%s irq %u for MSI\n", hc->name, hc->irq);
  538. return 0;
  539. }
  540. /* Invoked from the hotplug callback on @cpu */
  541. static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu)
  542. {
  543. struct clock_event_device *evt = &hc->evt;
  544. hc->cpu = cpu;
  545. per_cpu(cpu_hpet_channel, cpu) = hc;
  546. hpet_setup_msi_irq(hc);
  547. hpet_init_clockevent(hc, 110);
  548. evt->tick_resume = hpet_clkevt_msi_resume;
  549. clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
  550. 0x7FFFFFFF);
  551. }
  552. static struct hpet_channel *hpet_get_unused_clockevent(void)
  553. {
  554. int i;
  555. for (i = 0; i < hpet_base.nr_channels; i++) {
  556. struct hpet_channel *hc = hpet_base.channels + i;
  557. if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use)
  558. continue;
  559. hc->in_use = 1;
  560. return hc;
  561. }
  562. return NULL;
  563. }
  564. static int hpet_cpuhp_online(unsigned int cpu)
  565. {
  566. struct hpet_channel *hc = hpet_get_unused_clockevent();
  567. if (hc)
  568. init_one_hpet_msi_clockevent(hc, cpu);
  569. return 0;
  570. }
  571. static int hpet_cpuhp_dead(unsigned int cpu)
  572. {
  573. struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
  574. if (!hc)
  575. return 0;
  576. free_irq(hc->irq, hc);
  577. hc->in_use = 0;
  578. per_cpu(cpu_hpet_channel, cpu) = NULL;
  579. return 0;
  580. }
  581. static void __init hpet_select_clockevents(void)
  582. {
  583. unsigned int i;
  584. hpet_base.nr_clockevents = 0;
  585. /* No point if MSI is disabled or CPU has an Always Running APIC Timer */
  586. if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT))
  587. return;
  588. hpet_print_config();
  589. hpet_domain = hpet_create_irq_domain(hpet_blockid);
  590. if (!hpet_domain)
  591. return;
  592. for (i = 0; i < hpet_base.nr_channels; i++) {
  593. struct hpet_channel *hc = hpet_base.channels + i;
  594. int irq;
  595. if (hc->mode != HPET_MODE_UNUSED)
  596. continue;
  597. /* Only consider HPET channel with MSI support */
  598. if (!(hc->boot_cfg & HPET_TN_FSB_CAP))
  599. continue;
  600. sprintf(hc->name, "hpet%d", i);
  601. irq = hpet_assign_irq(hpet_domain, hc, hc->num);
  602. if (irq <= 0)
  603. continue;
  604. hc->irq = irq;
  605. hc->mode = HPET_MODE_CLOCKEVT;
  606. if (++hpet_base.nr_clockevents == num_possible_cpus())
  607. break;
  608. }
  609. pr_info("%d channels of %d reserved for per-cpu timers\n",
  610. hpet_base.nr_channels, hpet_base.nr_clockevents);
  611. }
  612. #else
  613. static inline void hpet_select_clockevents(void) { }
  614. #define hpet_cpuhp_online NULL
  615. #define hpet_cpuhp_dead NULL
  616. #endif
  617. /*
  618. * Clock source related code
  619. */
  620. #if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
  621. /*
  622. * Reading the HPET counter is a very slow operation. If a large number of
  623. * CPUs are trying to access the HPET counter simultaneously, it can cause
  624. * massive delays and slow down system performance dramatically. This may
  625. * happen when HPET is the default clock source instead of TSC. For a
  626. * really large system with hundreds of CPUs, the slowdown may be so
  627. * severe, that it can actually crash the system because of a NMI watchdog
  628. * soft lockup, for example.
  629. *
  630. * If multiple CPUs are trying to access the HPET counter at the same time,
  631. * we don't actually need to read the counter multiple times. Instead, the
  632. * other CPUs can use the counter value read by the first CPU in the group.
  633. *
  634. * This special feature is only enabled on x86-64 systems. It is unlikely
  635. * that 32-bit x86 systems will have enough CPUs to require this feature
  636. * with its associated locking overhead. We also need 64-bit atomic read.
  637. *
  638. * The lock and the HPET value are stored together and can be read in a
  639. * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t
  640. * is 32 bits in size.
  641. */
  642. union hpet_lock {
  643. struct {
  644. arch_spinlock_t lock;
  645. u32 value;
  646. };
  647. u64 lockval;
  648. };
  649. static union hpet_lock hpet __cacheline_aligned = {
  650. { .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
  651. };
  652. static u64 read_hpet(struct clocksource *cs)
  653. {
  654. unsigned long flags;
  655. union hpet_lock old, new;
  656. BUILD_BUG_ON(sizeof(union hpet_lock) != 8);
  657. /*
  658. * Read HPET directly if in NMI.
  659. */
  660. if (in_nmi())
  661. return (u64)hpet_readl(HPET_COUNTER);
  662. /*
  663. * Read the current state of the lock and HPET value atomically.
  664. */
  665. old.lockval = READ_ONCE(hpet.lockval);
  666. if (arch_spin_is_locked(&old.lock))
  667. goto contended;
  668. local_irq_save(flags);
  669. if (arch_spin_trylock(&hpet.lock)) {
  670. new.value = hpet_readl(HPET_COUNTER);
  671. /*
  672. * Use WRITE_ONCE() to prevent store tearing.
  673. */
  674. WRITE_ONCE(hpet.value, new.value);
  675. arch_spin_unlock(&hpet.lock);
  676. local_irq_restore(flags);
  677. return (u64)new.value;
  678. }
  679. local_irq_restore(flags);
  680. contended:
  681. /*
  682. * Contended case
  683. * --------------
  684. * Wait until the HPET value change or the lock is free to indicate
  685. * its value is up-to-date.
  686. *
  687. * It is possible that old.value has already contained the latest
  688. * HPET value while the lock holder was in the process of releasing
  689. * the lock. Checking for lock state change will enable us to return
  690. * the value immediately instead of waiting for the next HPET reader
  691. * to come along.
  692. */
  693. do {
  694. cpu_relax();
  695. new.lockval = READ_ONCE(hpet.lockval);
  696. } while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
  697. return (u64)new.value;
  698. }
  699. #else
  700. /*
  701. * For UP or 32-bit.
  702. */
  703. static u64 read_hpet(struct clocksource *cs)
  704. {
  705. return (u64)hpet_readl(HPET_COUNTER);
  706. }
  707. #endif
  708. static struct clocksource clocksource_hpet = {
  709. .name = "hpet",
  710. .rating = 250,
  711. .read = read_hpet,
  712. .mask = HPET_MASK,
  713. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  714. .resume = hpet_resume_counter,
  715. };
  716. /*
  717. * AMD SB700 based systems with spread spectrum enabled use a SMM based
  718. * HPET emulation to provide proper frequency setting.
  719. *
  720. * On such systems the SMM code is initialized with the first HPET register
  721. * access and takes some time to complete. During this time the config
  722. * register reads 0xffffffff. We check for max 1000 loops whether the
  723. * config register reads a non-0xffffffff value to make sure that the
  724. * HPET is up and running before we proceed any further.
  725. *
  726. * A counting loop is safe, as the HPET access takes thousands of CPU cycles.
  727. *
  728. * On non-SB700 based machines this check is only done once and has no
  729. * side effects.
  730. */
  731. static bool __init hpet_cfg_working(void)
  732. {
  733. int i;
  734. for (i = 0; i < 1000; i++) {
  735. if (hpet_readl(HPET_CFG) != 0xFFFFFFFF)
  736. return true;
  737. }
  738. pr_warn("Config register invalid. Disabling HPET\n");
  739. return false;
  740. }
  741. static bool __init hpet_counting(void)
  742. {
  743. u64 start, now, t1;
  744. hpet_restart_counter();
  745. t1 = hpet_readl(HPET_COUNTER);
  746. start = rdtsc();
  747. /*
  748. * We don't know the TSC frequency yet, but waiting for
  749. * 200000 TSC cycles is safe:
  750. * 4 GHz == 50us
  751. * 1 GHz == 200us
  752. */
  753. do {
  754. if (t1 != hpet_readl(HPET_COUNTER))
  755. return true;
  756. now = rdtsc();
  757. } while ((now - start) < 200000UL);
  758. pr_warn("Counter not counting. HPET disabled\n");
  759. return false;
  760. }
  761. static bool __init mwait_pc10_supported(void)
  762. {
  763. unsigned int eax, ebx, ecx, mwait_substates;
  764. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
  765. return false;
  766. if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
  767. return false;
  768. if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
  769. return false;
  770. cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
  771. return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
  772. (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
  773. (mwait_substates & (0xF << 28));
  774. }
  775. /*
  776. * Check whether the system supports PC10. If so force disable HPET as that
  777. * stops counting in PC10. This check is overbroad as it does not take any
  778. * of the following into account:
  779. *
  780. * - ACPI tables
  781. * - Enablement of intel_idle
  782. * - Command line arguments which limit intel_idle C-state support
  783. *
  784. * That's perfectly fine. HPET is a piece of hardware designed by committee
  785. * and the only reasons why it is still in use on modern systems is the
  786. * fact that it is impossible to reliably query TSC and CPU frequency via
  787. * CPUID or firmware.
  788. *
  789. * If HPET is functional it is useful for calibrating TSC, but this can be
  790. * done via PMTIMER as well which seems to be the last remaining timer on
  791. * X86/INTEL platforms that has not been completely wreckaged by feature
  792. * creep.
  793. *
  794. * In theory HPET support should be removed altogether, but there are older
  795. * systems out there which depend on it because TSC and APIC timer are
  796. * dysfunctional in deeper C-states.
  797. *
  798. * It's only 20 years now that hardware people have been asked to provide
  799. * reliable and discoverable facilities which can be used for timekeeping
  800. * and per CPU timer interrupts.
  801. *
  802. * The probability that this problem is going to be solved in the
  803. * foreseeable future is close to zero, so the kernel has to be cluttered
  804. * with heuristics to keep up with the ever growing amount of hardware and
  805. * firmware trainwrecks. Hopefully some day hardware people will understand
  806. * that the approach of "This can be fixed in software" is not sustainable.
  807. * Hope dies last...
  808. */
  809. static bool __init hpet_is_pc10_damaged(void)
  810. {
  811. unsigned long long pcfg;
  812. /* Check whether PC10 substates are supported */
  813. if (!mwait_pc10_supported())
  814. return false;
  815. /* Check whether PC10 is enabled in PKG C-state limit */
  816. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
  817. if ((pcfg & 0xF) < 8)
  818. return false;
  819. if (hpet_force_user) {
  820. pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
  821. return false;
  822. }
  823. pr_info("HPET dysfunctional in PC10. Force disabled.\n");
  824. boot_hpet_disable = true;
  825. return true;
  826. }
  827. /**
  828. * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  829. */
  830. int __init hpet_enable(void)
  831. {
  832. u32 hpet_period, cfg, id, irq;
  833. unsigned int i, channels;
  834. struct hpet_channel *hc;
  835. u64 freq;
  836. if (!is_hpet_capable())
  837. return 0;
  838. if (hpet_is_pc10_damaged())
  839. return 0;
  840. hpet_set_mapping();
  841. if (!hpet_virt_address)
  842. return 0;
  843. /* Validate that the config register is working */
  844. if (!hpet_cfg_working())
  845. goto out_nohpet;
  846. /*
  847. * Read the period and check for a sane value:
  848. */
  849. hpet_period = hpet_readl(HPET_PERIOD);
  850. if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
  851. goto out_nohpet;
  852. /* The period is a femtoseconds value. Convert it to a frequency. */
  853. freq = FSEC_PER_SEC;
  854. do_div(freq, hpet_period);
  855. hpet_freq = freq;
  856. /*
  857. * Read the HPET ID register to retrieve the IRQ routing
  858. * information and the number of channels
  859. */
  860. id = hpet_readl(HPET_ID);
  861. hpet_print_config();
  862. /* This is the HPET channel number which is zero based */
  863. channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  864. /*
  865. * The legacy routing mode needs at least two channels, tick timer
  866. * and the rtc emulation channel.
  867. */
  868. if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC) && channels < 2)
  869. goto out_nohpet;
  870. hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL);
  871. if (!hc) {
  872. pr_warn("Disabling HPET.\n");
  873. goto out_nohpet;
  874. }
  875. hpet_base.channels = hc;
  876. hpet_base.nr_channels = channels;
  877. /* Read, store and sanitize the global configuration */
  878. cfg = hpet_readl(HPET_CFG);
  879. hpet_base.boot_cfg = cfg;
  880. cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
  881. hpet_writel(cfg, HPET_CFG);
  882. if (cfg)
  883. pr_warn("Global config: Unknown bits %#x\n", cfg);
  884. /* Read, store and sanitize the per channel configuration */
  885. for (i = 0; i < channels; i++, hc++) {
  886. hc->num = i;
  887. cfg = hpet_readl(HPET_Tn_CFG(i));
  888. hc->boot_cfg = cfg;
  889. irq = (cfg & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
  890. hc->irq = irq;
  891. cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
  892. hpet_writel(cfg, HPET_Tn_CFG(i));
  893. cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
  894. | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
  895. | HPET_TN_FSB | HPET_TN_FSB_CAP);
  896. if (cfg)
  897. pr_warn("Channel #%u config: Unknown bits %#x\n", i, cfg);
  898. }
  899. hpet_print_config();
  900. /*
  901. * Validate that the counter is counting. This needs to be done
  902. * after sanitizing the config registers to properly deal with
  903. * force enabled HPETs.
  904. */
  905. if (!hpet_counting())
  906. goto out_nohpet;
  907. if (tsc_clocksource_watchdog_disabled())
  908. clocksource_hpet.flags |= CLOCK_SOURCE_MUST_VERIFY;
  909. clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
  910. if (id & HPET_ID_LEGSUP) {
  911. hpet_legacy_clockevent_register(&hpet_base.channels[0]);
  912. hpet_base.channels[0].mode = HPET_MODE_LEGACY;
  913. if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC))
  914. hpet_base.channels[1].mode = HPET_MODE_LEGACY;
  915. return 1;
  916. }
  917. return 0;
  918. out_nohpet:
  919. kfree(hpet_base.channels);
  920. hpet_base.channels = NULL;
  921. hpet_base.nr_channels = 0;
  922. hpet_clear_mapping();
  923. hpet_address = 0;
  924. return 0;
  925. }
  926. /*
  927. * The late initialization runs after the PCI quirks have been invoked
  928. * which might have detected a system on which the HPET can be enforced.
  929. *
  930. * Also, the MSI machinery is not working yet when the HPET is initialized
  931. * early.
  932. *
  933. * If the HPET is enabled, then:
  934. *
  935. * 1) Reserve one channel for /dev/hpet if CONFIG_HPET=y
  936. * 2) Reserve up to num_possible_cpus() channels as per CPU clockevents
  937. * 3) Setup /dev/hpet if CONFIG_HPET=y
  938. * 4) Register hotplug callbacks when clockevents are available
  939. */
  940. static __init int hpet_late_init(void)
  941. {
  942. int ret;
  943. if (!hpet_address) {
  944. if (!force_hpet_address)
  945. return -ENODEV;
  946. hpet_address = force_hpet_address;
  947. hpet_enable();
  948. }
  949. if (!hpet_virt_address)
  950. return -ENODEV;
  951. hpet_select_device_channel();
  952. hpet_select_clockevents();
  953. hpet_reserve_platform_timers();
  954. hpet_print_config();
  955. if (!hpet_base.nr_clockevents)
  956. return 0;
  957. ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
  958. hpet_cpuhp_online, NULL);
  959. if (ret)
  960. return ret;
  961. ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
  962. hpet_cpuhp_dead);
  963. if (ret)
  964. goto err_cpuhp;
  965. return 0;
  966. err_cpuhp:
  967. cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
  968. return ret;
  969. }
  970. fs_initcall(hpet_late_init);
  971. void hpet_disable(void)
  972. {
  973. unsigned int i;
  974. u32 cfg;
  975. if (!is_hpet_capable() || !hpet_virt_address)
  976. return;
  977. /* Restore boot configuration with the enable bit cleared */
  978. cfg = hpet_base.boot_cfg;
  979. cfg &= ~HPET_CFG_ENABLE;
  980. hpet_writel(cfg, HPET_CFG);
  981. /* Restore the channel boot configuration */
  982. for (i = 0; i < hpet_base.nr_channels; i++)
  983. hpet_writel(hpet_base.channels[i].boot_cfg, HPET_Tn_CFG(i));
  984. /* If the HPET was enabled at boot time, reenable it */
  985. if (hpet_base.boot_cfg & HPET_CFG_ENABLE)
  986. hpet_writel(hpet_base.boot_cfg, HPET_CFG);
  987. }
  988. #ifdef CONFIG_HPET_EMULATE_RTC
  989. /*
  990. * HPET in LegacyReplacement mode eats up the RTC interrupt line. When HPET
  991. * is enabled, we support RTC interrupt functionality in software.
  992. *
  993. * RTC has 3 kinds of interrupts:
  994. *
  995. * 1) Update Interrupt - generate an interrupt, every second, when the
  996. * RTC clock is updated
  997. * 2) Alarm Interrupt - generate an interrupt at a specific time of day
  998. * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
  999. * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all frequencies in powers of 2)
  1000. *
  1001. * (1) and (2) above are implemented using polling at a frequency of 64 Hz:
  1002. * DEFAULT_RTC_INT_FREQ.
  1003. *
  1004. * The exact frequency is a tradeoff between accuracy and interrupt overhead.
  1005. *
  1006. * For (3), we use interrupts at 64 Hz, or the user specified periodic frequency,
  1007. * if it's higher.
  1008. */
  1009. #include <linux/mc146818rtc.h>
  1010. #include <linux/rtc.h>
  1011. #define DEFAULT_RTC_INT_FREQ 64
  1012. #define DEFAULT_RTC_SHIFT 6
  1013. #define RTC_NUM_INTS 1
  1014. static unsigned long hpet_rtc_flags;
  1015. static int hpet_prev_update_sec;
  1016. static struct rtc_time hpet_alarm_time;
  1017. static unsigned long hpet_pie_count;
  1018. static u32 hpet_t1_cmp;
  1019. static u32 hpet_default_delta;
  1020. static u32 hpet_pie_delta;
  1021. static unsigned long hpet_pie_limit;
  1022. static rtc_irq_handler irq_handler;
  1023. /*
  1024. * Check that the HPET counter c1 is ahead of c2
  1025. */
  1026. static inline int hpet_cnt_ahead(u32 c1, u32 c2)
  1027. {
  1028. return (s32)(c2 - c1) < 0;
  1029. }
  1030. /*
  1031. * Registers a IRQ handler.
  1032. */
  1033. int hpet_register_irq_handler(rtc_irq_handler handler)
  1034. {
  1035. if (!is_hpet_enabled())
  1036. return -ENODEV;
  1037. if (irq_handler)
  1038. return -EBUSY;
  1039. irq_handler = handler;
  1040. return 0;
  1041. }
  1042. EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
  1043. /*
  1044. * Deregisters the IRQ handler registered with hpet_register_irq_handler()
  1045. * and does cleanup.
  1046. */
  1047. void hpet_unregister_irq_handler(rtc_irq_handler handler)
  1048. {
  1049. if (!is_hpet_enabled())
  1050. return;
  1051. irq_handler = NULL;
  1052. hpet_rtc_flags = 0;
  1053. }
  1054. EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
  1055. /*
  1056. * Channel 1 for RTC emulation. We use one shot mode, as periodic mode
  1057. * is not supported by all HPET implementations for channel 1.
  1058. *
  1059. * hpet_rtc_timer_init() is called when the rtc is initialized.
  1060. */
  1061. int hpet_rtc_timer_init(void)
  1062. {
  1063. unsigned int cfg, cnt, delta;
  1064. unsigned long flags;
  1065. if (!is_hpet_enabled())
  1066. return 0;
  1067. if (!hpet_default_delta) {
  1068. struct clock_event_device *evt = &hpet_base.channels[0].evt;
  1069. uint64_t clc;
  1070. clc = (uint64_t) evt->mult * NSEC_PER_SEC;
  1071. clc >>= evt->shift + DEFAULT_RTC_SHIFT;
  1072. hpet_default_delta = clc;
  1073. }
  1074. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  1075. delta = hpet_default_delta;
  1076. else
  1077. delta = hpet_pie_delta;
  1078. local_irq_save(flags);
  1079. cnt = delta + hpet_readl(HPET_COUNTER);
  1080. hpet_writel(cnt, HPET_T1_CMP);
  1081. hpet_t1_cmp = cnt;
  1082. cfg = hpet_readl(HPET_T1_CFG);
  1083. cfg &= ~HPET_TN_PERIODIC;
  1084. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  1085. hpet_writel(cfg, HPET_T1_CFG);
  1086. local_irq_restore(flags);
  1087. return 1;
  1088. }
  1089. EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
  1090. static void hpet_disable_rtc_channel(void)
  1091. {
  1092. u32 cfg = hpet_readl(HPET_T1_CFG);
  1093. cfg &= ~HPET_TN_ENABLE;
  1094. hpet_writel(cfg, HPET_T1_CFG);
  1095. }
  1096. /*
  1097. * The functions below are called from rtc driver.
  1098. * Return 0 if HPET is not being used.
  1099. * Otherwise do the necessary changes and return 1.
  1100. */
  1101. int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
  1102. {
  1103. if (!is_hpet_enabled())
  1104. return 0;
  1105. hpet_rtc_flags &= ~bit_mask;
  1106. if (unlikely(!hpet_rtc_flags))
  1107. hpet_disable_rtc_channel();
  1108. return 1;
  1109. }
  1110. EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
  1111. int hpet_set_rtc_irq_bit(unsigned long bit_mask)
  1112. {
  1113. unsigned long oldbits = hpet_rtc_flags;
  1114. if (!is_hpet_enabled())
  1115. return 0;
  1116. hpet_rtc_flags |= bit_mask;
  1117. if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
  1118. hpet_prev_update_sec = -1;
  1119. if (!oldbits)
  1120. hpet_rtc_timer_init();
  1121. return 1;
  1122. }
  1123. EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
  1124. int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
  1125. {
  1126. if (!is_hpet_enabled())
  1127. return 0;
  1128. hpet_alarm_time.tm_hour = hrs;
  1129. hpet_alarm_time.tm_min = min;
  1130. hpet_alarm_time.tm_sec = sec;
  1131. return 1;
  1132. }
  1133. EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
  1134. int hpet_set_periodic_freq(unsigned long freq)
  1135. {
  1136. uint64_t clc;
  1137. if (!is_hpet_enabled())
  1138. return 0;
  1139. if (freq <= DEFAULT_RTC_INT_FREQ) {
  1140. hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
  1141. } else {
  1142. struct clock_event_device *evt = &hpet_base.channels[0].evt;
  1143. clc = (uint64_t) evt->mult * NSEC_PER_SEC;
  1144. do_div(clc, freq);
  1145. clc >>= evt->shift;
  1146. hpet_pie_delta = clc;
  1147. hpet_pie_limit = 0;
  1148. }
  1149. return 1;
  1150. }
  1151. EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
  1152. int hpet_rtc_dropped_irq(void)
  1153. {
  1154. return is_hpet_enabled();
  1155. }
  1156. EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
  1157. static void hpet_rtc_timer_reinit(void)
  1158. {
  1159. unsigned int delta;
  1160. int lost_ints = -1;
  1161. if (unlikely(!hpet_rtc_flags))
  1162. hpet_disable_rtc_channel();
  1163. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  1164. delta = hpet_default_delta;
  1165. else
  1166. delta = hpet_pie_delta;
  1167. /*
  1168. * Increment the comparator value until we are ahead of the
  1169. * current count.
  1170. */
  1171. do {
  1172. hpet_t1_cmp += delta;
  1173. hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
  1174. lost_ints++;
  1175. } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
  1176. if (lost_ints) {
  1177. if (hpet_rtc_flags & RTC_PIE)
  1178. hpet_pie_count += lost_ints;
  1179. if (printk_ratelimit())
  1180. pr_warn("Lost %d RTC interrupts\n", lost_ints);
  1181. }
  1182. }
  1183. irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
  1184. {
  1185. struct rtc_time curr_time;
  1186. unsigned long rtc_int_flag = 0;
  1187. hpet_rtc_timer_reinit();
  1188. memset(&curr_time, 0, sizeof(struct rtc_time));
  1189. if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
  1190. if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) {
  1191. pr_err_ratelimited("unable to read current time from RTC\n");
  1192. return IRQ_HANDLED;
  1193. }
  1194. }
  1195. if (hpet_rtc_flags & RTC_UIE &&
  1196. curr_time.tm_sec != hpet_prev_update_sec) {
  1197. if (hpet_prev_update_sec >= 0)
  1198. rtc_int_flag = RTC_UF;
  1199. hpet_prev_update_sec = curr_time.tm_sec;
  1200. }
  1201. if (hpet_rtc_flags & RTC_PIE && ++hpet_pie_count >= hpet_pie_limit) {
  1202. rtc_int_flag |= RTC_PF;
  1203. hpet_pie_count = 0;
  1204. }
  1205. if (hpet_rtc_flags & RTC_AIE &&
  1206. (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
  1207. (curr_time.tm_min == hpet_alarm_time.tm_min) &&
  1208. (curr_time.tm_hour == hpet_alarm_time.tm_hour))
  1209. rtc_int_flag |= RTC_AF;
  1210. if (rtc_int_flag) {
  1211. rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
  1212. if (irq_handler)
  1213. irq_handler(rtc_int_flag, dev_id);
  1214. }
  1215. return IRQ_HANDLED;
  1216. }
  1217. EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
  1218. #endif