udivdi3.S 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. .section .text..SHmedia32,"ax"
  3. .align 2
  4. .global __udivdi3
  5. __udivdi3:
  6. shlri r3,1,r4
  7. nsb r4,r22
  8. shlld r3,r22,r6
  9. shlri r6,49,r5
  10. movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
  11. sub r21,r5,r1
  12. mmulfx.w r1,r1,r4
  13. mshflo.w r1,r63,r1
  14. sub r63,r22,r20 // r63 == 64 % 64
  15. mmulfx.w r5,r4,r4
  16. pta large_divisor,tr0
  17. addi r20,32,r9
  18. msub.w r1,r4,r1
  19. madd.w r1,r1,r1
  20. mmulfx.w r1,r1,r4
  21. shlri r6,32,r7
  22. bgt/u r9,r63,tr0 // large_divisor
  23. mmulfx.w r5,r4,r4
  24. shlri r2,32+14,r19
  25. addi r22,-31,r0
  26. msub.w r1,r4,r1
  27. mulu.l r1,r7,r4
  28. addi r1,-3,r5
  29. mulu.l r5,r19,r5
  30. sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
  31. shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
  32. the case may be, %0000000000000000 000.11111111111, still */
  33. muls.l r1,r4,r4 /* leaving at least one sign bit. */
  34. mulu.l r5,r3,r8
  35. mshalds.l r1,r21,r1
  36. shari r4,26,r4
  37. shlld r8,r0,r8
  38. add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
  39. sub r2,r8,r2
  40. /* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
  41. shlri r2,22,r21
  42. mulu.l r21,r1,r21
  43. shlld r5,r0,r8
  44. addi r20,30-22,r0
  45. shlrd r21,r0,r21
  46. mulu.l r21,r3,r5
  47. add r8,r21,r8
  48. mcmpgt.l r21,r63,r21 // See Note 1
  49. addi r20,30,r0
  50. mshfhi.l r63,r21,r21
  51. sub r2,r5,r2
  52. andc r2,r21,r2
  53. /* small divisor: need a third divide step */
  54. mulu.l r2,r1,r7
  55. ptabs r18,tr0
  56. addi r2,1,r2
  57. shlrd r7,r0,r7
  58. mulu.l r7,r3,r5
  59. add r8,r7,r8
  60. sub r2,r3,r2
  61. cmpgt r2,r5,r5
  62. add r8,r5,r2
  63. /* could test r3 here to check for divide by zero. */
  64. blink tr0,r63
  65. large_divisor:
  66. mmulfx.w r5,r4,r4
  67. shlrd r2,r9,r25
  68. shlri r25,32,r8
  69. msub.w r1,r4,r1
  70. mulu.l r1,r7,r4
  71. addi r1,-3,r5
  72. mulu.l r5,r8,r5
  73. sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
  74. shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
  75. the case may be, %0000000000000000 000.11111111111, still */
  76. muls.l r1,r4,r4 /* leaving at least one sign bit. */
  77. shlri r5,14-1,r8
  78. mulu.l r8,r7,r5
  79. mshalds.l r1,r21,r1
  80. shari r4,26,r4
  81. add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
  82. sub r25,r5,r25
  83. /* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
  84. shlri r25,22,r21
  85. mulu.l r21,r1,r21
  86. pta no_lo_adj,tr0
  87. addi r22,32,r0
  88. shlri r21,40,r21
  89. mulu.l r21,r7,r5
  90. add r8,r21,r8
  91. shlld r2,r0,r2
  92. sub r25,r5,r25
  93. bgtu/u r7,r25,tr0 // no_lo_adj
  94. addi r8,1,r8
  95. sub r25,r7,r25
  96. no_lo_adj:
  97. mextr4 r2,r25,r2
  98. /* large_divisor: only needs a few adjustments. */
  99. mulu.l r8,r6,r5
  100. ptabs r18,tr0
  101. /* bubble */
  102. cmpgtu r5,r2,r5
  103. sub r8,r5,r2
  104. blink tr0,r63
  105. /* Note 1: To shift the result of the second divide stage so that the result
  106. always fits into 32 bits, yet we still reduce the rest sufficiently
  107. would require a lot of instructions to do the shifts just right. Using
  108. the full 64 bit shift result to multiply with the divisor would require
  109. four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
  110. Fortunately, if the upper 32 bits of the shift result are nonzero, we
  111. know that the rest after taking this partial result into account will
  112. fit into 32 bits. So we just clear the upper 32 bits of the rest if the
  113. upper 32 bits of the partial result are nonzero. */