xfrm_interface.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * XFRM virtual interface
  4. *
  5. * Copyright (C) 2018 secunet Security Networks AG
  6. *
  7. * Author:
  8. * Steffen Klassert <[email protected]>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/sockios.h>
  15. #include <linux/icmp.h>
  16. #include <linux/if.h>
  17. #include <linux/in.h>
  18. #include <linux/ip.h>
  19. #include <linux/net.h>
  20. #include <linux/in6.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/if_link.h>
  23. #include <linux/if_arp.h>
  24. #include <linux/icmpv6.h>
  25. #include <linux/init.h>
  26. #include <linux/route.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/netfilter_ipv6.h>
  29. #include <linux/slab.h>
  30. #include <linux/hash.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/atomic.h>
  33. #include <net/icmp.h>
  34. #include <net/ip.h>
  35. #include <net/ipv6.h>
  36. #include <net/ip6_route.h>
  37. #include <net/addrconf.h>
  38. #include <net/xfrm.h>
  39. #include <net/net_namespace.h>
  40. #include <net/netns/generic.h>
  41. #include <linux/etherdevice.h>
  42. static int xfrmi_dev_init(struct net_device *dev);
  43. static void xfrmi_dev_setup(struct net_device *dev);
  44. static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
  45. static unsigned int xfrmi_net_id __read_mostly;
  46. struct xfrmi_net {
  47. /* lists for storing interfaces in use */
  48. struct xfrm_if __rcu *xfrmi[1];
  49. };
  50. #define for_each_xfrmi_rcu(start, xi) \
  51. for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
  52. static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
  53. {
  54. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  55. struct xfrm_if *xi;
  56. for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
  57. if (x->if_id == xi->p.if_id &&
  58. (xi->dev->flags & IFF_UP))
  59. return xi;
  60. }
  61. return NULL;
  62. }
  63. static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
  64. {
  65. struct xfrmi_net *xfrmn;
  66. int ifindex;
  67. struct xfrm_if *xi;
  68. if (!skb->dev)
  69. return NULL;
  70. xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
  71. ifindex = skb->dev->ifindex;
  72. for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
  73. if (ifindex == xi->dev->ifindex &&
  74. (xi->dev->flags & IFF_UP))
  75. return xi;
  76. }
  77. return NULL;
  78. }
  79. static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  80. {
  81. struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
  82. rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
  83. rcu_assign_pointer(*xip, xi);
  84. }
  85. static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  86. {
  87. struct xfrm_if __rcu **xip;
  88. struct xfrm_if *iter;
  89. for (xip = &xfrmn->xfrmi[0];
  90. (iter = rtnl_dereference(*xip)) != NULL;
  91. xip = &iter->next) {
  92. if (xi == iter) {
  93. rcu_assign_pointer(*xip, xi->next);
  94. break;
  95. }
  96. }
  97. }
  98. static void xfrmi_dev_free(struct net_device *dev)
  99. {
  100. free_percpu(dev->tstats);
  101. }
  102. static int xfrmi_create2(struct net_device *dev)
  103. {
  104. struct xfrm_if *xi = netdev_priv(dev);
  105. struct net *net = dev_net(dev);
  106. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  107. int err;
  108. dev->rtnl_link_ops = &xfrmi_link_ops;
  109. err = register_netdevice(dev);
  110. if (err < 0)
  111. goto out;
  112. strcpy(xi->p.name, dev->name);
  113. dev_hold(dev);
  114. xfrmi_link(xfrmn, xi);
  115. return 0;
  116. out:
  117. return err;
  118. }
  119. static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p)
  120. {
  121. struct net_device *dev;
  122. struct xfrm_if *xi;
  123. char name[IFNAMSIZ];
  124. int err;
  125. if (p->name[0]) {
  126. strlcpy(name, p->name, IFNAMSIZ);
  127. } else {
  128. err = -EINVAL;
  129. goto failed;
  130. }
  131. dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup);
  132. if (!dev) {
  133. err = -EAGAIN;
  134. goto failed;
  135. }
  136. dev_net_set(dev, net);
  137. xi = netdev_priv(dev);
  138. xi->p = *p;
  139. xi->net = net;
  140. xi->dev = dev;
  141. xi->phydev = dev_get_by_index(net, p->link);
  142. if (!xi->phydev) {
  143. err = -ENODEV;
  144. goto failed_free;
  145. }
  146. err = xfrmi_create2(dev);
  147. if (err < 0)
  148. goto failed_dev_put;
  149. return xi;
  150. failed_dev_put:
  151. dev_put(xi->phydev);
  152. failed_free:
  153. free_netdev(dev);
  154. failed:
  155. return ERR_PTR(err);
  156. }
  157. static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p,
  158. int create)
  159. {
  160. struct xfrm_if __rcu **xip;
  161. struct xfrm_if *xi;
  162. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  163. for (xip = &xfrmn->xfrmi[0];
  164. (xi = rtnl_dereference(*xip)) != NULL;
  165. xip = &xi->next) {
  166. if (xi->p.if_id == p->if_id) {
  167. if (create)
  168. return ERR_PTR(-EEXIST);
  169. return xi;
  170. }
  171. }
  172. if (!create)
  173. return ERR_PTR(-ENODEV);
  174. return xfrmi_create(net, p);
  175. }
  176. static void xfrmi_dev_uninit(struct net_device *dev)
  177. {
  178. struct xfrm_if *xi = netdev_priv(dev);
  179. struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
  180. xfrmi_unlink(xfrmn, xi);
  181. dev_put(xi->phydev);
  182. dev_put(dev);
  183. }
  184. static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
  185. {
  186. skb->tstamp.tv64 = 0;
  187. skb->pkt_type = PACKET_HOST;
  188. skb->skb_iif = 0;
  189. skb->ignore_df = 0;
  190. skb_dst_drop(skb);
  191. nf_reset(skb);
  192. nf_reset_trace(skb);
  193. if (!xnet)
  194. return;
  195. ipvs_reset(skb);
  196. secpath_reset(skb);
  197. skb_orphan(skb);
  198. skb->mark = 0;
  199. }
  200. static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
  201. {
  202. struct pcpu_sw_netstats *tstats;
  203. struct xfrm_mode *inner_mode;
  204. struct net_device *dev;
  205. struct xfrm_state *x;
  206. struct xfrm_if *xi;
  207. bool xnet;
  208. if (err && !skb->sp)
  209. return 0;
  210. x = xfrm_input_state(skb);
  211. xi = xfrmi_lookup(xs_net(x), x);
  212. if (!xi)
  213. return 1;
  214. dev = xi->dev;
  215. skb->dev = dev;
  216. if (err) {
  217. dev->stats.rx_errors++;
  218. dev->stats.rx_dropped++;
  219. return 0;
  220. }
  221. xnet = !net_eq(xi->net, dev_net(skb->dev));
  222. if (xnet) {
  223. inner_mode = x->inner_mode;
  224. if (x->sel.family == AF_UNSPEC) {
  225. inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
  226. if (inner_mode == NULL) {
  227. XFRM_INC_STATS(dev_net(skb->dev),
  228. LINUX_MIB_XFRMINSTATEMODEERROR);
  229. return -EINVAL;
  230. }
  231. }
  232. if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
  233. inner_mode->afinfo->family))
  234. return -EPERM;
  235. }
  236. xfrmi_scrub_packet(skb, xnet);
  237. tstats = this_cpu_ptr(dev->tstats);
  238. u64_stats_update_begin(&tstats->syncp);
  239. tstats->rx_packets++;
  240. tstats->rx_bytes += skb->len;
  241. u64_stats_update_end(&tstats->syncp);
  242. return 0;
  243. }
  244. static int
  245. xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
  246. {
  247. struct xfrm_if *xi = netdev_priv(dev);
  248. struct net_device_stats *stats = &xi->dev->stats;
  249. struct dst_entry *dst = skb_dst(skb);
  250. unsigned int length = skb->len;
  251. struct net_device *tdev;
  252. struct xfrm_state *x;
  253. int err = -1;
  254. int mtu;
  255. if (!dst)
  256. goto tx_err_link_failure;
  257. fl->flowi_xfrm.if_id = xi->p.if_id;
  258. dst_hold(dst);
  259. dst = xfrm_lookup(xi->net, dst, fl, NULL, 0);
  260. if (IS_ERR(dst)) {
  261. err = PTR_ERR(dst);
  262. dst = NULL;
  263. goto tx_err_link_failure;
  264. }
  265. x = dst->xfrm;
  266. if (!x)
  267. goto tx_err_link_failure;
  268. if (x->if_id != xi->p.if_id)
  269. goto tx_err_link_failure;
  270. tdev = dst->dev;
  271. if (tdev == dev) {
  272. stats->collisions++;
  273. net_warn_ratelimited("%s: Local routing loop detected!\n",
  274. xi->p.name);
  275. goto tx_err_dst_release;
  276. }
  277. mtu = dst_mtu(dst);
  278. if (!skb->ignore_df && skb->len > mtu) {
  279. if (dst && dst->ops->update_pmtu)
  280. dst->ops->update_pmtu(dst, NULL, skb, mtu);
  281. if (skb->protocol == htons(ETH_P_IPV6)) {
  282. if (mtu < IPV6_MIN_MTU)
  283. mtu = IPV6_MIN_MTU;
  284. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  285. } else {
  286. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  287. htonl(mtu));
  288. }
  289. dst_release(dst);
  290. return -EMSGSIZE;
  291. }
  292. xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
  293. skb_dst_set(skb, dst);
  294. skb->dev = tdev;
  295. err = dst_output(xi->net, skb->sk, skb);
  296. if (net_xmit_eval(err) == 0) {
  297. struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  298. u64_stats_update_begin(&tstats->syncp);
  299. tstats->tx_bytes += length;
  300. tstats->tx_packets++;
  301. u64_stats_update_end(&tstats->syncp);
  302. } else {
  303. stats->tx_errors++;
  304. stats->tx_aborted_errors++;
  305. }
  306. return 0;
  307. tx_err_link_failure:
  308. stats->tx_carrier_errors++;
  309. dst_link_failure(skb);
  310. tx_err_dst_release:
  311. dst_release(dst);
  312. return err;
  313. }
  314. static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
  315. {
  316. struct xfrm_if *xi = netdev_priv(dev);
  317. struct net_device_stats *stats = &xi->dev->stats;
  318. struct flowi fl;
  319. int ret;
  320. memset(&fl, 0, sizeof(fl));
  321. switch (skb->protocol) {
  322. case htons(ETH_P_IPV6):
  323. xfrm_decode_session(skb, &fl, AF_INET6);
  324. memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
  325. break;
  326. case htons(ETH_P_IP):
  327. xfrm_decode_session(skb, &fl, AF_INET);
  328. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  329. break;
  330. default:
  331. goto tx_err;
  332. }
  333. fl.flowi_oif = xi->phydev->ifindex;
  334. ret = xfrmi_xmit2(skb, dev, &fl);
  335. if (ret < 0)
  336. goto tx_err;
  337. return NETDEV_TX_OK;
  338. tx_err:
  339. stats->tx_errors++;
  340. stats->tx_dropped++;
  341. kfree_skb(skb);
  342. return NETDEV_TX_OK;
  343. }
  344. static int xfrmi4_err(struct sk_buff *skb, u32 info)
  345. {
  346. const struct iphdr *iph = (const struct iphdr *)skb->data;
  347. struct net *net = dev_net(skb->dev);
  348. int protocol = iph->protocol;
  349. struct ip_comp_hdr *ipch;
  350. struct ip_esp_hdr *esph;
  351. struct ip_auth_hdr *ah ;
  352. struct xfrm_state *x;
  353. struct xfrm_if *xi;
  354. __be32 spi;
  355. switch (protocol) {
  356. case IPPROTO_ESP:
  357. esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  358. spi = esph->spi;
  359. break;
  360. case IPPROTO_AH:
  361. ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
  362. spi = ah->spi;
  363. break;
  364. case IPPROTO_COMP:
  365. ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
  366. spi = htonl(ntohs(ipch->cpi));
  367. break;
  368. default:
  369. return 0;
  370. }
  371. switch (icmp_hdr(skb)->type) {
  372. case ICMP_DEST_UNREACH:
  373. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  374. return 0;
  375. case ICMP_REDIRECT:
  376. break;
  377. default:
  378. return 0;
  379. }
  380. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  381. spi, protocol, AF_INET);
  382. if (!x)
  383. return 0;
  384. xi = xfrmi_lookup(net, x);
  385. if (!xi) {
  386. xfrm_state_put(x);
  387. return -1;
  388. }
  389. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  390. ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
  391. else
  392. ipv4_redirect(skb, net, 0, 0, protocol, 0);
  393. xfrm_state_put(x);
  394. return 0;
  395. }
  396. static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  397. u8 type, u8 code, int offset, __be32 info)
  398. {
  399. const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
  400. struct net *net = dev_net(skb->dev);
  401. int protocol = iph->nexthdr;
  402. struct ip_comp_hdr *ipch;
  403. struct ip_esp_hdr *esph;
  404. struct ip_auth_hdr *ah;
  405. struct xfrm_state *x;
  406. struct xfrm_if *xi;
  407. __be32 spi;
  408. switch (protocol) {
  409. case IPPROTO_ESP:
  410. esph = (struct ip_esp_hdr *)(skb->data + offset);
  411. spi = esph->spi;
  412. break;
  413. case IPPROTO_AH:
  414. ah = (struct ip_auth_hdr *)(skb->data + offset);
  415. spi = ah->spi;
  416. break;
  417. case IPPROTO_COMP:
  418. ipch = (struct ip_comp_hdr *)(skb->data + offset);
  419. spi = htonl(ntohs(ipch->cpi));
  420. break;
  421. default:
  422. return 0;
  423. }
  424. if (type != ICMPV6_PKT_TOOBIG &&
  425. type != NDISC_REDIRECT)
  426. return 0;
  427. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  428. spi, protocol, AF_INET6);
  429. if (!x)
  430. return 0;
  431. xi = xfrmi_lookup(net, x);
  432. if (!xi) {
  433. xfrm_state_put(x);
  434. return -1;
  435. }
  436. if (type == NDISC_REDIRECT)
  437. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  438. sock_net_uid(net, NULL));
  439. else
  440. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  441. xfrm_state_put(x);
  442. return 0;
  443. }
  444. static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
  445. {
  446. if (xi->p.link != p->link)
  447. return -EINVAL;
  448. xi->p.if_id = p->if_id;
  449. return 0;
  450. }
  451. static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
  452. {
  453. struct net *net = dev_net(xi->dev);
  454. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  455. int err;
  456. xfrmi_unlink(xfrmn, xi);
  457. synchronize_net();
  458. err = xfrmi_change(xi, p);
  459. xfrmi_link(xfrmn, xi);
  460. netdev_state_change(xi->dev);
  461. return err;
  462. }
  463. static struct rtnl_link_stats64 *xfrmi_get_stats64(struct net_device *dev,
  464. struct rtnl_link_stats64 *s)
  465. {
  466. int cpu;
  467. if (!dev->tstats)
  468. return s;
  469. for_each_possible_cpu(cpu) {
  470. struct pcpu_sw_netstats *stats;
  471. struct pcpu_sw_netstats tmp;
  472. int start;
  473. stats = per_cpu_ptr(dev->tstats, cpu);
  474. do {
  475. start = u64_stats_fetch_begin_irq(&stats->syncp);
  476. tmp.rx_packets = stats->rx_packets;
  477. tmp.rx_bytes = stats->rx_bytes;
  478. tmp.tx_packets = stats->tx_packets;
  479. tmp.tx_bytes = stats->tx_bytes;
  480. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  481. s->rx_packets += tmp.rx_packets;
  482. s->rx_bytes += tmp.rx_bytes;
  483. s->tx_packets += tmp.tx_packets;
  484. s->tx_bytes += tmp.tx_bytes;
  485. }
  486. s->rx_dropped = dev->stats.rx_dropped;
  487. s->tx_dropped = dev->stats.tx_dropped;
  488. return s;
  489. }
  490. static int xfrmi_get_iflink(const struct net_device *dev)
  491. {
  492. struct xfrm_if *xi = netdev_priv(dev);
  493. return xi->phydev->ifindex;
  494. }
  495. static const struct net_device_ops xfrmi_netdev_ops = {
  496. .ndo_init = xfrmi_dev_init,
  497. .ndo_uninit = xfrmi_dev_uninit,
  498. .ndo_start_xmit = xfrmi_xmit,
  499. .ndo_get_stats64 = xfrmi_get_stats64,
  500. .ndo_get_iflink = xfrmi_get_iflink,
  501. };
  502. static void xfrmi_dev_setup(struct net_device *dev)
  503. {
  504. dev->netdev_ops = &xfrmi_netdev_ops;
  505. dev->type = ARPHRD_NONE;
  506. dev->hard_header_len = ETH_HLEN;
  507. dev->min_header_len = ETH_HLEN;
  508. dev->mtu = ETH_DATA_LEN;
  509. dev->addr_len = ETH_ALEN;
  510. dev->flags = IFF_NOARP;
  511. dev->destructor = xfrmi_dev_free;
  512. netif_keep_dst(dev);
  513. }
  514. static int xfrmi_dev_init(struct net_device *dev)
  515. {
  516. struct xfrm_if *xi = netdev_priv(dev);
  517. struct net_device *phydev = xi->phydev;
  518. int err;
  519. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  520. if (!dev->tstats)
  521. return -ENOMEM;
  522. err = gro_cells_init(&xi->gro_cells, dev);
  523. if (err) {
  524. free_percpu(dev->tstats);
  525. return err;
  526. }
  527. dev->features |= NETIF_F_LLTX;
  528. dev->needed_headroom = phydev->needed_headroom;
  529. dev->needed_tailroom = phydev->needed_tailroom;
  530. if (is_zero_ether_addr(dev->dev_addr))
  531. eth_hw_addr_inherit(dev, phydev);
  532. if (is_zero_ether_addr(dev->broadcast))
  533. memcpy(dev->broadcast, phydev->broadcast, dev->addr_len);
  534. return 0;
  535. }
  536. static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[])
  537. {
  538. return 0;
  539. }
  540. static void xfrmi_netlink_parms(struct nlattr *data[],
  541. struct xfrm_if_parms *parms)
  542. {
  543. memset(parms, 0, sizeof(*parms));
  544. if (!data)
  545. return;
  546. if (data[IFLA_XFRM_LINK])
  547. parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
  548. if (data[IFLA_XFRM_IF_ID])
  549. parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
  550. }
  551. static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
  552. struct nlattr *tb[], struct nlattr *data[])
  553. {
  554. struct net *net = dev_net(dev);
  555. struct xfrm_if_parms *p;
  556. struct xfrm_if *xi;
  557. xi = netdev_priv(dev);
  558. p = &xi->p;
  559. xfrmi_netlink_parms(data, p);
  560. if (!tb[IFLA_IFNAME])
  561. return -EINVAL;
  562. nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ);
  563. xi = xfrmi_locate(net, p, 1);
  564. return PTR_ERR_OR_ZERO(xi);
  565. }
  566. static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
  567. {
  568. unregister_netdevice_queue(dev, head);
  569. }
  570. static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
  571. struct nlattr *data[])
  572. {
  573. struct xfrm_if *xi = netdev_priv(dev);
  574. struct net *net = dev_net(dev);
  575. xfrmi_netlink_parms(data, &xi->p);
  576. xi = xfrmi_locate(net, &xi->p, 0);
  577. if (IS_ERR_OR_NULL(xi)) {
  578. xi = netdev_priv(dev);
  579. } else {
  580. if (xi->dev != dev)
  581. return -EEXIST;
  582. }
  583. return xfrmi_update(xi, &xi->p);
  584. }
  585. static size_t xfrmi_get_size(const struct net_device *dev)
  586. {
  587. return
  588. /* IFLA_XFRM_LINK */
  589. nla_total_size(4) +
  590. /* IFLA_XFRM_IF_ID */
  591. nla_total_size(4) +
  592. 0;
  593. }
  594. static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
  595. {
  596. struct xfrm_if *xi = netdev_priv(dev);
  597. struct xfrm_if_parms *parm = &xi->p;
  598. if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
  599. nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
  600. goto nla_put_failure;
  601. return 0;
  602. nla_put_failure:
  603. return -EMSGSIZE;
  604. }
  605. struct net *xfrmi_get_link_net(const struct net_device *dev)
  606. {
  607. struct xfrm_if *xi = netdev_priv(dev);
  608. return dev_net(xi->phydev);
  609. }
  610. static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
  611. [IFLA_XFRM_LINK] = { .type = NLA_U32 },
  612. [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
  613. };
  614. static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
  615. .kind = "xfrm",
  616. .maxtype = IFLA_XFRM_MAX,
  617. .policy = xfrmi_policy,
  618. .priv_size = sizeof(struct xfrm_if),
  619. .setup = xfrmi_dev_setup,
  620. .validate = xfrmi_validate,
  621. .newlink = xfrmi_newlink,
  622. .dellink = xfrmi_dellink,
  623. .changelink = xfrmi_changelink,
  624. .get_size = xfrmi_get_size,
  625. .fill_info = xfrmi_fill_info,
  626. .get_link_net = xfrmi_get_link_net,
  627. };
  628. static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
  629. {
  630. struct xfrm_if *xi;
  631. LIST_HEAD(list);
  632. xi = rtnl_dereference(xfrmn->xfrmi[0]);
  633. if (!xi)
  634. return;
  635. unregister_netdevice_queue(xi->dev, &list);
  636. unregister_netdevice_many(&list);
  637. }
  638. static int __net_init xfrmi_init_net(struct net *net)
  639. {
  640. return 0;
  641. }
  642. static void __net_exit xfrmi_exit_net(struct net *net)
  643. {
  644. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  645. rtnl_lock();
  646. xfrmi_destroy_interfaces(xfrmn);
  647. rtnl_unlock();
  648. }
  649. static struct pernet_operations xfrmi_net_ops = {
  650. .init = xfrmi_init_net,
  651. .exit = xfrmi_exit_net,
  652. .id = &xfrmi_net_id,
  653. .size = sizeof(struct xfrmi_net),
  654. };
  655. static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
  656. .handler = xfrm6_rcv,
  657. .cb_handler = xfrmi_rcv_cb,
  658. .err_handler = xfrmi6_err,
  659. .priority = 10,
  660. };
  661. static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
  662. .handler = xfrm6_rcv,
  663. .cb_handler = xfrmi_rcv_cb,
  664. .err_handler = xfrmi6_err,
  665. .priority = 10,
  666. };
  667. static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
  668. .handler = xfrm6_rcv,
  669. .cb_handler = xfrmi_rcv_cb,
  670. .err_handler = xfrmi6_err,
  671. .priority = 10,
  672. };
  673. static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
  674. .handler = xfrm4_rcv,
  675. .input_handler = xfrm_input,
  676. .cb_handler = xfrmi_rcv_cb,
  677. .err_handler = xfrmi4_err,
  678. .priority = 10,
  679. };
  680. static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
  681. .handler = xfrm4_rcv,
  682. .input_handler = xfrm_input,
  683. .cb_handler = xfrmi_rcv_cb,
  684. .err_handler = xfrmi4_err,
  685. .priority = 10,
  686. };
  687. static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
  688. .handler = xfrm4_rcv,
  689. .input_handler = xfrm_input,
  690. .cb_handler = xfrmi_rcv_cb,
  691. .err_handler = xfrmi4_err,
  692. .priority = 10,
  693. };
  694. static int __init xfrmi4_init(void)
  695. {
  696. int err;
  697. err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
  698. if (err < 0)
  699. goto xfrm_proto_esp_failed;
  700. err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
  701. if (err < 0)
  702. goto xfrm_proto_ah_failed;
  703. err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  704. if (err < 0)
  705. goto xfrm_proto_comp_failed;
  706. return 0;
  707. xfrm_proto_comp_failed:
  708. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  709. xfrm_proto_ah_failed:
  710. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  711. xfrm_proto_esp_failed:
  712. return err;
  713. }
  714. static void xfrmi4_fini(void)
  715. {
  716. xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  717. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  718. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  719. }
  720. static int __init xfrmi6_init(void)
  721. {
  722. int err;
  723. err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
  724. if (err < 0)
  725. goto xfrm_proto_esp_failed;
  726. err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
  727. if (err < 0)
  728. goto xfrm_proto_ah_failed;
  729. err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  730. if (err < 0)
  731. goto xfrm_proto_comp_failed;
  732. return 0;
  733. xfrm_proto_comp_failed:
  734. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  735. xfrm_proto_ah_failed:
  736. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  737. xfrm_proto_esp_failed:
  738. return err;
  739. }
  740. static void xfrmi6_fini(void)
  741. {
  742. xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  743. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  744. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  745. }
  746. static const struct xfrm_if_cb xfrm_if_cb = {
  747. .decode_session = xfrmi_decode_session,
  748. };
  749. static int __init xfrmi_init(void)
  750. {
  751. const char *msg;
  752. int err;
  753. pr_info("IPsec XFRM device driver\n");
  754. msg = "tunnel device";
  755. err = register_pernet_device(&xfrmi_net_ops);
  756. if (err < 0)
  757. goto pernet_dev_failed;
  758. msg = "xfrm4 protocols";
  759. err = xfrmi4_init();
  760. if (err < 0)
  761. goto xfrmi4_failed;
  762. msg = "xfrm6 protocols";
  763. err = xfrmi6_init();
  764. if (err < 0)
  765. goto xfrmi6_failed;
  766. msg = "netlink interface";
  767. err = rtnl_link_register(&xfrmi_link_ops);
  768. if (err < 0)
  769. goto rtnl_link_failed;
  770. xfrm_if_register_cb(&xfrm_if_cb);
  771. return err;
  772. rtnl_link_failed:
  773. xfrmi6_fini();
  774. xfrmi6_failed:
  775. xfrmi4_fini();
  776. xfrmi4_failed:
  777. unregister_pernet_device(&xfrmi_net_ops);
  778. pernet_dev_failed:
  779. pr_err("xfrmi init: failed to register %s\n", msg);
  780. return err;
  781. }
  782. static void __exit xfrmi_fini(void)
  783. {
  784. xfrm_if_unregister_cb();
  785. rtnl_link_unregister(&xfrmi_link_ops);
  786. xfrmi4_fini();
  787. xfrmi6_fini();
  788. unregister_pernet_device(&xfrmi_net_ops);
  789. }
  790. module_init(xfrmi_init);
  791. module_exit(xfrmi_fini);
  792. MODULE_LICENSE("GPL");
  793. MODULE_ALIAS_RTNL_LINK("xfrm");
  794. MODULE_ALIAS_NETDEV("xfrm0");
  795. MODULE_AUTHOR("Steffen Klassert");
  796. MODULE_DESCRIPTION("XFRM virtual interface");