tx.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License version 2
  3. * as published by the Free Software Foundation.
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. */
  10. #include <net/6lowpan.h>
  11. #include <net/ndisc.h>
  12. #include <net/ieee802154_netdev.h>
  13. #include <net/mac802154.h>
  14. #include "6lowpan_i.h"
  15. #define LOWPAN_FRAG1_HEAD_SIZE 0x4
  16. #define LOWPAN_FRAGN_HEAD_SIZE 0x5
  17. struct lowpan_addr_info {
  18. struct ieee802154_addr daddr;
  19. struct ieee802154_addr saddr;
  20. };
  21. static inline struct
  22. lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
  23. {
  24. WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
  25. return (struct lowpan_addr_info *)(skb->data -
  26. sizeof(struct lowpan_addr_info));
  27. }
  28. /* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
  29. * sockets gives an 8 byte array for addresses only!
  30. *
  31. * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
  32. * sense here. We should disable it, the right use-case would be AF_INET6
  33. * RAW/DGRAM sockets.
  34. */
  35. int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
  36. unsigned short type, const void *daddr,
  37. const void *saddr, unsigned int len)
  38. {
  39. struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
  40. struct lowpan_addr_info *info = lowpan_skb_priv(skb);
  41. struct lowpan_802154_neigh *llneigh = NULL;
  42. const struct ipv6hdr *hdr = ipv6_hdr(skb);
  43. struct neighbour *n;
  44. if (!daddr)
  45. return -EINVAL;
  46. /* TODO:
  47. * if this package isn't ipv6 one, where should it be routed?
  48. */
  49. if (type != ETH_P_IPV6)
  50. return 0;
  51. /* intra-pan communication */
  52. info->saddr.pan_id = wpan_dev->pan_id;
  53. info->daddr.pan_id = info->saddr.pan_id;
  54. if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
  55. info->daddr.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
  56. info->daddr.mode = IEEE802154_ADDR_SHORT;
  57. } else {
  58. __le16 short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
  59. n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev);
  60. if (n) {
  61. llneigh = lowpan_802154_neigh(neighbour_priv(n));
  62. read_lock_bh(&n->lock);
  63. short_addr = llneigh->short_addr;
  64. read_unlock_bh(&n->lock);
  65. }
  66. if (llneigh &&
  67. lowpan_802154_is_valid_src_short_addr(short_addr)) {
  68. info->daddr.short_addr = short_addr;
  69. info->daddr.mode = IEEE802154_ADDR_SHORT;
  70. } else {
  71. info->daddr.mode = IEEE802154_ADDR_LONG;
  72. ieee802154_be64_to_le64(&info->daddr.extended_addr,
  73. daddr);
  74. }
  75. if (n)
  76. neigh_release(n);
  77. }
  78. if (!saddr) {
  79. if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
  80. info->saddr.mode = IEEE802154_ADDR_SHORT;
  81. info->saddr.short_addr = wpan_dev->short_addr;
  82. } else {
  83. info->saddr.mode = IEEE802154_ADDR_LONG;
  84. info->saddr.extended_addr = wpan_dev->extended_addr;
  85. }
  86. } else {
  87. info->saddr.mode = IEEE802154_ADDR_LONG;
  88. ieee802154_be64_to_le64(&info->saddr.extended_addr, saddr);
  89. }
  90. return 0;
  91. }
  92. static struct sk_buff*
  93. lowpan_alloc_frag(struct sk_buff *skb, int size,
  94. const struct ieee802154_hdr *master_hdr, bool frag1)
  95. {
  96. struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
  97. struct sk_buff *frag;
  98. int rc;
  99. frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
  100. GFP_ATOMIC);
  101. if (likely(frag)) {
  102. frag->dev = wdev;
  103. frag->priority = skb->priority;
  104. skb_reserve(frag, wdev->needed_headroom);
  105. skb_reset_network_header(frag);
  106. *mac_cb(frag) = *mac_cb(skb);
  107. if (frag1) {
  108. memcpy(skb_put(frag, skb->mac_len),
  109. skb_mac_header(skb), skb->mac_len);
  110. } else {
  111. rc = wpan_dev_hard_header(frag, wdev,
  112. &master_hdr->dest,
  113. &master_hdr->source, size);
  114. if (rc < 0) {
  115. kfree_skb(frag);
  116. return ERR_PTR(rc);
  117. }
  118. }
  119. } else {
  120. frag = ERR_PTR(-ENOMEM);
  121. }
  122. return frag;
  123. }
  124. static int
  125. lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
  126. u8 *frag_hdr, int frag_hdrlen,
  127. int offset, int len, bool frag1)
  128. {
  129. struct sk_buff *frag;
  130. raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
  131. frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
  132. if (IS_ERR(frag))
  133. return PTR_ERR(frag);
  134. memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
  135. memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
  136. raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
  137. return dev_queue_xmit(frag);
  138. }
  139. static int
  140. lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
  141. const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
  142. u16 dgram_offset)
  143. {
  144. __be16 frag_tag;
  145. u8 frag_hdr[5];
  146. int frag_cap, frag_len, payload_cap, rc;
  147. int skb_unprocessed, skb_offset;
  148. frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
  149. lowpan_802154_dev(ldev)->fragment_tag++;
  150. frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
  151. frag_hdr[1] = dgram_size & 0xff;
  152. memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
  153. payload_cap = ieee802154_max_payload(wpan_hdr);
  154. frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
  155. skb_network_header_len(skb), 8);
  156. skb_offset = skb_network_header_len(skb);
  157. skb_unprocessed = skb->len - skb->mac_len - skb_offset;
  158. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  159. LOWPAN_FRAG1_HEAD_SIZE, 0,
  160. frag_len + skb_network_header_len(skb),
  161. true);
  162. if (rc) {
  163. pr_debug("%s unable to send FRAG1 packet (tag: %d)",
  164. __func__, ntohs(frag_tag));
  165. goto err;
  166. }
  167. frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
  168. frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
  169. frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
  170. do {
  171. dgram_offset += frag_len;
  172. skb_offset += frag_len;
  173. skb_unprocessed -= frag_len;
  174. frag_len = min(frag_cap, skb_unprocessed);
  175. frag_hdr[4] = dgram_offset >> 3;
  176. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  177. LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
  178. frag_len, false);
  179. if (rc) {
  180. pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
  181. __func__, ntohs(frag_tag), skb_offset);
  182. goto err;
  183. }
  184. } while (skb_unprocessed > frag_cap);
  185. ldev->stats.tx_packets++;
  186. ldev->stats.tx_bytes += dgram_size;
  187. consume_skb(skb);
  188. return NET_XMIT_SUCCESS;
  189. err:
  190. kfree_skb(skb);
  191. return rc;
  192. }
  193. static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
  194. u16 *dgram_size, u16 *dgram_offset)
  195. {
  196. struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
  197. struct ieee802154_mac_cb *cb = mac_cb_init(skb);
  198. struct lowpan_addr_info info;
  199. memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
  200. *dgram_size = skb->len;
  201. lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
  202. /* dgram_offset = (saved bytes after compression) + lowpan header len */
  203. *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
  204. cb->type = IEEE802154_FC_TYPE_DATA;
  205. if (info.daddr.mode == IEEE802154_ADDR_SHORT &&
  206. ieee802154_is_broadcast_short_addr(info.daddr.short_addr))
  207. cb->ackreq = false;
  208. else
  209. cb->ackreq = wpan_dev->ackreq;
  210. return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
  211. &info.daddr, &info.saddr, 0);
  212. }
  213. netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
  214. {
  215. struct ieee802154_hdr wpan_hdr;
  216. int max_single, ret;
  217. u16 dgram_size, dgram_offset;
  218. pr_debug("package xmit\n");
  219. WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
  220. /* We must take a copy of the skb before we modify/replace the ipv6
  221. * header as the header could be used elsewhere
  222. */
  223. if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
  224. skb_tailroom(skb) < ldev->needed_tailroom)) {
  225. struct sk_buff *nskb;
  226. nskb = skb_copy_expand(skb, ldev->needed_headroom,
  227. ldev->needed_tailroom, GFP_ATOMIC);
  228. if (likely(nskb)) {
  229. consume_skb(skb);
  230. skb = nskb;
  231. } else {
  232. kfree_skb(skb);
  233. return NET_XMIT_DROP;
  234. }
  235. } else {
  236. skb = skb_unshare(skb, GFP_ATOMIC);
  237. if (!skb)
  238. return NET_XMIT_DROP;
  239. }
  240. ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
  241. if (ret < 0) {
  242. kfree_skb(skb);
  243. return NET_XMIT_DROP;
  244. }
  245. if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
  246. kfree_skb(skb);
  247. return NET_XMIT_DROP;
  248. }
  249. max_single = ieee802154_max_payload(&wpan_hdr);
  250. if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
  251. skb->dev = lowpan_802154_dev(ldev)->wdev;
  252. ldev->stats.tx_packets++;
  253. ldev->stats.tx_bytes += dgram_size;
  254. return dev_queue_xmit(skb);
  255. } else {
  256. netdev_tx_t rc;
  257. pr_debug("frame is too big, fragmentation is needed\n");
  258. rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
  259. dgram_offset);
  260. return rc < 0 ? NET_XMIT_DROP : rc;
  261. }
  262. }