checksum_32.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #ifndef __SPARC_CHECKSUM_H
  2. #define __SPARC_CHECKSUM_H
  3. /* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
  4. *
  5. * Copyright(C) 1995 Linus Torvalds
  6. * Copyright(C) 1995 Miguel de Icaza
  7. * Copyright(C) 1996 David S. Miller
  8. * Copyright(C) 1996 Eddie C. Dost
  9. * Copyright(C) 1997 Jakub Jelinek
  10. *
  11. * derived from:
  12. * Alpha checksum c-code
  13. * ix86 inline assembly
  14. * RFC1071 Computing the Internet Checksum
  15. */
  16. #include <linux/in6.h>
  17. #include <asm/uaccess.h>
  18. /* computes the checksum of a memory block at buff, length len,
  19. * and adds in "sum" (32-bit)
  20. *
  21. * returns a 32-bit number suitable for feeding into itself
  22. * or csum_tcpudp_magic
  23. *
  24. * this function must be called with even lengths, except
  25. * for the last fragment, which may be odd
  26. *
  27. * it's best to have buff aligned on a 32-bit boundary
  28. */
  29. __wsum csum_partial(const void *buff, int len, __wsum sum);
  30. /* the same as csum_partial, but copies from fs:src while it
  31. * checksums
  32. *
  33. * here even more important to align src and dst on a 32-bit (or even
  34. * better 64-bit) boundary
  35. */
  36. unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
  37. static inline __wsum
  38. csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
  39. {
  40. register unsigned int ret asm("o0") = (unsigned int)src;
  41. register char *d asm("o1") = dst;
  42. register int l asm("g1") = len;
  43. __asm__ __volatile__ (
  44. "call __csum_partial_copy_sparc_generic\n\t"
  45. " mov %6, %%g7\n"
  46. : "=&r" (ret), "=&r" (d), "=&r" (l)
  47. : "0" (ret), "1" (d), "2" (l), "r" (sum)
  48. : "o2", "o3", "o4", "o5", "o7",
  49. "g2", "g3", "g4", "g5", "g7",
  50. "memory", "cc");
  51. return (__force __wsum)ret;
  52. }
  53. static inline __wsum
  54. csum_partial_copy_from_user(const void __user *src, void *dst, int len,
  55. __wsum sum, int *err)
  56. {
  57. register unsigned long ret asm("o0") = (unsigned long)src;
  58. register char *d asm("o1") = dst;
  59. register int l asm("g1") = len;
  60. register __wsum s asm("g7") = sum;
  61. __asm__ __volatile__ (
  62. ".section __ex_table,#alloc\n\t"
  63. ".align 4\n\t"
  64. ".word 1f,2\n\t"
  65. ".previous\n"
  66. "1:\n\t"
  67. "call __csum_partial_copy_sparc_generic\n\t"
  68. " st %8, [%%sp + 64]\n"
  69. : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
  70. : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
  71. : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
  72. "cc", "memory");
  73. return (__force __wsum)ret;
  74. }
  75. static inline __wsum
  76. csum_partial_copy_to_user(const void *src, void __user *dst, int len,
  77. __wsum sum, int *err)
  78. {
  79. if (!access_ok (VERIFY_WRITE, dst, len)) {
  80. *err = -EFAULT;
  81. return sum;
  82. } else {
  83. register unsigned long ret asm("o0") = (unsigned long)src;
  84. register char __user *d asm("o1") = dst;
  85. register int l asm("g1") = len;
  86. register __wsum s asm("g7") = sum;
  87. __asm__ __volatile__ (
  88. ".section __ex_table,#alloc\n\t"
  89. ".align 4\n\t"
  90. ".word 1f,1\n\t"
  91. ".previous\n"
  92. "1:\n\t"
  93. "call __csum_partial_copy_sparc_generic\n\t"
  94. " st %8, [%%sp + 64]\n"
  95. : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
  96. : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
  97. : "o2", "o3", "o4", "o5", "o7",
  98. "g2", "g3", "g4", "g5",
  99. "cc", "memory");
  100. return (__force __wsum)ret;
  101. }
  102. }
  103. #define HAVE_CSUM_COPY_USER
  104. #define csum_and_copy_to_user csum_partial_copy_to_user
  105. /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
  106. * the majority of the time.
  107. */
  108. static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  109. {
  110. __sum16 sum;
  111. /* Note: We must read %2 before we touch %0 for the first time,
  112. * because GCC can legitimately use the same register for
  113. * both operands.
  114. */
  115. __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
  116. "ld\t[%1 + 0x00], %0\n\t"
  117. "ld\t[%1 + 0x04], %%g2\n\t"
  118. "ld\t[%1 + 0x08], %%g3\n\t"
  119. "addcc\t%%g2, %0, %0\n\t"
  120. "addxcc\t%%g3, %0, %0\n\t"
  121. "ld\t[%1 + 0x0c], %%g2\n\t"
  122. "ld\t[%1 + 0x10], %%g3\n\t"
  123. "addxcc\t%%g2, %0, %0\n\t"
  124. "addx\t%0, %%g0, %0\n"
  125. "1:\taddcc\t%%g3, %0, %0\n\t"
  126. "add\t%1, 4, %1\n\t"
  127. "addxcc\t%0, %%g0, %0\n\t"
  128. "subcc\t%%g4, 1, %%g4\n\t"
  129. "be,a\t2f\n\t"
  130. "sll\t%0, 16, %%g2\n\t"
  131. "b\t1b\n\t"
  132. "ld\t[%1 + 0x10], %%g3\n"
  133. "2:\taddcc\t%0, %%g2, %%g2\n\t"
  134. "srl\t%%g2, 16, %0\n\t"
  135. "addx\t%0, %%g0, %0\n\t"
  136. "xnor\t%%g0, %0, %0"
  137. : "=r" (sum), "=&r" (iph)
  138. : "r" (ihl), "1" (iph)
  139. : "g2", "g3", "g4", "cc", "memory");
  140. return sum;
  141. }
  142. /* Fold a partial checksum without adding pseudo headers. */
  143. static inline __sum16 csum_fold(__wsum sum)
  144. {
  145. unsigned int tmp;
  146. __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
  147. "srl\t%1, 16, %1\n\t"
  148. "addx\t%1, %%g0, %1\n\t"
  149. "xnor\t%%g0, %1, %0"
  150. : "=&r" (sum), "=r" (tmp)
  151. : "0" (sum), "1" ((__force u32)sum<<16)
  152. : "cc");
  153. return (__force __sum16)sum;
  154. }
  155. static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
  156. __u32 len, __u8 proto,
  157. __wsum sum)
  158. {
  159. __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
  160. "addxcc\t%2, %0, %0\n\t"
  161. "addxcc\t%3, %0, %0\n\t"
  162. "addx\t%0, %%g0, %0\n\t"
  163. : "=r" (sum), "=r" (saddr)
  164. : "r" (daddr), "r" (proto + len), "0" (sum),
  165. "1" (saddr)
  166. : "cc");
  167. return sum;
  168. }
  169. /*
  170. * computes the checksum of the TCP/UDP pseudo-header
  171. * returns a 16-bit checksum, already complemented
  172. */
  173. static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
  174. __u32 len, __u8 proto,
  175. __wsum sum)
  176. {
  177. return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
  178. }
  179. #define _HAVE_ARCH_IPV6_CSUM
  180. static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
  181. const struct in6_addr *daddr,
  182. __u32 len, __u8 proto, __wsum sum)
  183. {
  184. __asm__ __volatile__ (
  185. "addcc %3, %4, %%g4\n\t"
  186. "addxcc %5, %%g4, %%g4\n\t"
  187. "ld [%2 + 0x0c], %%g2\n\t"
  188. "ld [%2 + 0x08], %%g3\n\t"
  189. "addxcc %%g2, %%g4, %%g4\n\t"
  190. "ld [%2 + 0x04], %%g2\n\t"
  191. "addxcc %%g3, %%g4, %%g4\n\t"
  192. "ld [%2 + 0x00], %%g3\n\t"
  193. "addxcc %%g2, %%g4, %%g4\n\t"
  194. "ld [%1 + 0x0c], %%g2\n\t"
  195. "addxcc %%g3, %%g4, %%g4\n\t"
  196. "ld [%1 + 0x08], %%g3\n\t"
  197. "addxcc %%g2, %%g4, %%g4\n\t"
  198. "ld [%1 + 0x04], %%g2\n\t"
  199. "addxcc %%g3, %%g4, %%g4\n\t"
  200. "ld [%1 + 0x00], %%g3\n\t"
  201. "addxcc %%g2, %%g4, %%g4\n\t"
  202. "addxcc %%g3, %%g4, %0\n\t"
  203. "addx 0, %0, %0\n"
  204. : "=&r" (sum)
  205. : "r" (saddr), "r" (daddr),
  206. "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
  207. : "g2", "g3", "g4", "cc");
  208. return csum_fold(sum);
  209. }
  210. /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
  211. static inline __sum16 ip_compute_csum(const void *buff, int len)
  212. {
  213. return csum_fold(csum_partial(buff, len, 0));
  214. }
  215. #define HAVE_ARCH_CSUM_ADD
  216. static inline __wsum csum_add(__wsum csum, __wsum addend)
  217. {
  218. __asm__ __volatile__(
  219. "addcc %0, %1, %0\n"
  220. "addx %0, %%g0, %0"
  221. : "=r" (csum)
  222. : "r" (addend), "0" (csum));
  223. return csum;
  224. }
  225. #endif /* !(__SPARC_CHECKSUM_H) */