ctlreg.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 1999, 2023
  4. */
  5. #include <linux/irqflags.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/smp.h>
  10. #include <linux/cache.h>
  11. #include <asm/abs_lowcore.h>
  12. #include <asm/ctlreg.h>
  13. /*
  14. * ctl_lock guards access to global control register contents which
  15. * are kept in the control register save area within absolute lowcore
  16. * at physical address zero.
  17. */
  18. static DEFINE_SPINLOCK(system_ctl_lock);
  19. void system_ctlreg_lock(void)
  20. __acquires(&system_ctl_lock)
  21. {
  22. spin_lock(&system_ctl_lock);
  23. }
  24. void system_ctlreg_unlock(void)
  25. __releases(&system_ctl_lock)
  26. {
  27. spin_unlock(&system_ctl_lock);
  28. }
  29. static bool system_ctlreg_area_init __ro_after_init;
  30. void __init system_ctlreg_init_save_area(struct lowcore *lc)
  31. {
  32. struct lowcore *abs_lc;
  33. abs_lc = get_abs_lowcore();
  34. __local_ctl_store(0, 15, lc->cregs_save_area);
  35. __local_ctl_store(0, 15, abs_lc->cregs_save_area);
  36. put_abs_lowcore(abs_lc);
  37. system_ctlreg_area_init = true;
  38. }
  39. struct ctlreg_parms {
  40. unsigned long andval;
  41. unsigned long orval;
  42. unsigned long val;
  43. int request;
  44. int cr;
  45. };
  46. static void ctlreg_callback(void *info)
  47. {
  48. struct ctlreg_parms *pp = info;
  49. struct ctlreg regs[16];
  50. __local_ctl_store(0, 15, regs);
  51. if (pp->request == CTLREG_LOAD) {
  52. regs[pp->cr].val = pp->val;
  53. } else {
  54. regs[pp->cr].val &= pp->andval;
  55. regs[pp->cr].val |= pp->orval;
  56. }
  57. __local_ctl_load(0, 15, regs);
  58. }
  59. static void system_ctlreg_update(void *info)
  60. {
  61. unsigned long flags;
  62. if (system_state == SYSTEM_BOOTING) {
  63. /*
  64. * For very early calls do not call on_each_cpu()
  65. * since not everything might be setup.
  66. */
  67. local_irq_save(flags);
  68. ctlreg_callback(info);
  69. local_irq_restore(flags);
  70. } else {
  71. on_each_cpu(ctlreg_callback, info, 1);
  72. }
  73. }
  74. void system_ctlreg_modify(unsigned int cr, unsigned long data, int request)
  75. {
  76. struct ctlreg_parms pp = { .cr = cr, .request = request, };
  77. struct lowcore *abs_lc;
  78. switch (request) {
  79. case CTLREG_SET_BIT:
  80. pp.orval = 1UL << data;
  81. pp.andval = -1UL;
  82. break;
  83. case CTLREG_CLEAR_BIT:
  84. pp.orval = 0;
  85. pp.andval = ~(1UL << data);
  86. break;
  87. case CTLREG_LOAD:
  88. pp.val = data;
  89. break;
  90. }
  91. if (system_ctlreg_area_init) {
  92. system_ctlreg_lock();
  93. abs_lc = get_abs_lowcore();
  94. if (request == CTLREG_LOAD) {
  95. abs_lc->cregs_save_area[cr].val = pp.val;
  96. } else {
  97. abs_lc->cregs_save_area[cr].val &= pp.andval;
  98. abs_lc->cregs_save_area[cr].val |= pp.orval;
  99. }
  100. put_abs_lowcore(abs_lc);
  101. system_ctlreg_update(&pp);
  102. system_ctlreg_unlock();
  103. } else {
  104. system_ctlreg_update(&pp);
  105. }
  106. }
  107. EXPORT_SYMBOL(system_ctlreg_modify);