mmu_context.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /*
  2. * Common implementation of switch_mm_irqs_off
  3. *
  4. * Copyright IBM Corp. 2017
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched/mm.h>
  15. #include <asm/mmu_context.h>
  16. #if defined(CONFIG_PPC32)
  17. static inline void switch_mm_pgdir(struct task_struct *tsk,
  18. struct mm_struct *mm)
  19. {
  20. /* 32-bit keeps track of the current PGDIR in the thread struct */
  21. tsk->thread.pgdir = mm->pgd;
  22. }
  23. #elif defined(CONFIG_PPC_BOOK3E_64)
  24. static inline void switch_mm_pgdir(struct task_struct *tsk,
  25. struct mm_struct *mm)
  26. {
  27. /* 64-bit Book3E keeps track of current PGD in the PACA */
  28. get_paca()->pgd = mm->pgd;
  29. }
  30. #else
  31. static inline void switch_mm_pgdir(struct task_struct *tsk,
  32. struct mm_struct *mm) { }
  33. #endif
  34. void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  35. struct task_struct *tsk)
  36. {
  37. bool new_on_cpu = false;
  38. /* Mark this context has been used on the new CPU */
  39. if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
  40. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  41. inc_mm_active_cpus(next);
  42. /*
  43. * This full barrier orders the store to the cpumask above vs
  44. * a subsequent operation which allows this CPU to begin loading
  45. * translations for next.
  46. *
  47. * When using the radix MMU that operation is the load of the
  48. * MMU context id, which is then moved to SPRN_PID.
  49. *
  50. * For the hash MMU it is either the first load from slb_cache
  51. * in switch_slb(), and/or the store of paca->mm_ctx_id in
  52. * copy_mm_to_paca().
  53. *
  54. * On the other side, the barrier is in mm/tlb-radix.c for
  55. * radix which orders earlier stores to clear the PTEs vs
  56. * the load of mm_cpumask. And pte_xchg which does the same
  57. * thing for hash.
  58. *
  59. * This full barrier is needed by membarrier when switching
  60. * between processes after store to rq->curr, before user-space
  61. * memory accesses.
  62. */
  63. smp_mb();
  64. new_on_cpu = true;
  65. }
  66. /* Some subarchs need to track the PGD elsewhere */
  67. switch_mm_pgdir(tsk, next);
  68. /* Nothing else to do if we aren't actually switching */
  69. if (prev == next)
  70. return;
  71. /*
  72. * We must stop all altivec streams before changing the HW
  73. * context
  74. */
  75. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  76. asm volatile ("dssall");
  77. if (new_on_cpu)
  78. radix_kvm_prefetch_workaround(next);
  79. else
  80. membarrier_arch_switch_mm(prev, next, tsk);
  81. /*
  82. * The actual HW switching method differs between the various
  83. * sub architectures. Out of line for now
  84. */
  85. switch_mmu_context(prev, next, tsk);
  86. }