paravirt-spinlocks.c 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Split spinlock implementation out into its own file, so it can be
  4. * compiled in a FTRACE-compatible way.
  5. */
  6. #include <linux/spinlock.h>
  7. #include <linux/export.h>
  8. #include <linux/jump_label.h>
  9. #include <asm/paravirt.h>
  10. __visible void __native_queued_spin_unlock(struct qspinlock *lock)
  11. {
  12. native_queued_spin_unlock(lock);
  13. }
  14. PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
  15. bool pv_is_native_spin_unlock(void)
  16. {
  17. return pv_lock_ops.queued_spin_unlock.func ==
  18. __raw_callee_save___native_queued_spin_unlock;
  19. }
  20. __visible bool __native_vcpu_is_preempted(long cpu)
  21. {
  22. return false;
  23. }
  24. PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
  25. bool pv_is_native_vcpu_is_preempted(void)
  26. {
  27. return pv_lock_ops.vcpu_is_preempted.func ==
  28. __raw_callee_save___native_vcpu_is_preempted;
  29. }
  30. struct pv_lock_ops pv_lock_ops = {
  31. #ifdef CONFIG_SMP
  32. .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
  33. .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
  34. .wait = paravirt_nop,
  35. .kick = paravirt_nop,
  36. .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
  37. #endif /* SMP */
  38. };
  39. EXPORT_SYMBOL(pv_lock_ops);