summaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/Kconfig14
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/addrconf.c116
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/esp6.c318
-rw-r--r--net/ipv6/esp6_offload.c108
-rw-r--r--net/ipv6/icmp.c68
-rw-r--r--net/ipv6/ila/ila_lwt.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c40
-rw-r--r--net/ipv6/inet6_hashtables.c46
-rw-r--r--net/ipv6/ip6_fib.c22
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/ipv6_sockglue.c16
-rw-r--r--net/ipv6/netfilter/ip6_tables.c21
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c2
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c20
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c4
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c7
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c2
-rw-r--r--net/ipv6/ping.c7
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/route.c337
-rw-r--r--net/ipv6/seg6_hmac.c46
-rw-r--r--net/ipv6/seg6_iptunnel.c10
-rw-r--r--net/ipv6/syncookies.c41
-rw-r--r--net/ipv6/tcp_ipv6.c23
-rw-r--r--net/ipv6/udp.c32
-rw-r--r--net/ipv6/xfrm6_input.c22
-rw-r--r--net/ipv6/xfrm6_mode_transport.c4
-rw-r--r--net/ipv6/xfrm6_policy.c9
-rw-r--r--net/ipv6/xfrm6_protocol.c3
37 files changed, 956 insertions, 444 deletions
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index ec1267e2bd1f..e2afe677a9d9 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -75,6 +75,19 @@ config INET6_ESP
If unsure, say Y.
+config INET6_ESP_OFFLOAD
+ tristate "IPv6: ESP transformation offload"
+ depends on INET6_ESP
+ select XFRM_OFFLOAD
+ default n
+ ---help---
+ Support for ESP transformation offload. This makes sense
+ only if this system really does IPsec and want to do it
+ with high throughput. A typical desktop system does not
+ need it, even if it does IPsec.
+
+ If unsure, say N.
+
config INET6_IPCOMP
tristate "IPv6: IPComp transformation"
select INET6_XFRM_TUNNEL
@@ -208,6 +221,7 @@ config IPV6_TUNNEL
tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)"
select INET6_TUNNEL
select DST_CACHE
+ select GRO_CELLS
---help---
Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in
RFC 2473.
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index a9e9fec387ce..217e9ff0e24b 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -30,6 +30,7 @@ ipv6-objs += $(ipv6-y)
obj-$(CONFIG_INET6_AH) += ah6.o
obj-$(CONFIG_INET6_ESP) += esp6.o
+obj-$(CONFIG_INET6_ESP_OFFLOAD) += esp6_offload.o
obj-$(CONFIG_INET6_IPCOMP) += ipcomp6.o
obj-$(CONFIG_INET6_XFRM_TUNNEL) += xfrm6_tunnel.o
obj-$(CONFIG_INET6_TUNNEL) += tunnel6.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a7bcc0ab5e99..3a2025f5bf2c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -243,6 +243,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.seg6_require_hmac = 0,
#endif
.enhanced_dad = 1,
+ .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -294,6 +295,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.seg6_require_hmac = 0,
#endif
.enhanced_dad = 1,
+ .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
};
/* Check if a valid qdisc is available */
@@ -386,9 +388,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
if (ndev->cnf.stable_secret.initialized)
- ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
else
- ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
+ ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
ndev->cnf.mtu6 = dev->mtu;
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -2144,12 +2146,14 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
case ARPHRD_SIT:
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
+ case ARPHRD_TUNNEL:
return addrconf_ifid_gre(eui, dev);
case ARPHRD_6LOWPAN:
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
case ARPHRD_TUNNEL6:
+ case ARPHRD_IP6GRE:
return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
@@ -2387,8 +2391,8 @@ static void manage_tempaddrs(struct inet6_dev *idev,
static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
{
- return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
- idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
+ return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
+ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
}
int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
@@ -3152,7 +3156,7 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
- switch (idev->addr_gen_mode) {
+ switch (idev->cnf.addr_gen_mode) {
case IN6_ADDR_GEN_MODE_RANDOM:
ipv6_gen_mode_random_init(idev);
/* fallthrough */
@@ -3193,6 +3197,9 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_IEEE1394) &&
(dev->type != ARPHRD_TUNNEL6) &&
(dev->type != ARPHRD_6LOWPAN) &&
+ (dev->type != ARPHRD_IP6GRE) &&
+ (dev->type != ARPHRD_IPGRE) &&
+ (dev->type != ARPHRD_TUNNEL) &&
(dev->type != ARPHRD_NONE)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
@@ -3204,8 +3211,8 @@ static void addrconf_dev_config(struct net_device *dev)
/* this device type has no EUI support */
if (dev->type == ARPHRD_NONE &&
- idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
- idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
+ idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
addrconf_addr_gen(idev, false);
}
@@ -4900,6 +4907,13 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
struct net *net = dev_net(ifa->idev->dev);
int err = -ENOBUFS;
+ /* Don't send DELADDR notification for TENTATIVE address,
+ * since NEWADDR notification is sent only after removing
+ * TENTATIVE flag.
+ */
+ if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
+ return;
+
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
if (!skb)
goto errout;
@@ -4987,6 +5001,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
#endif
array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
+ array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
}
static inline size_t inet6_ifla6_size(void)
@@ -5098,7 +5113,7 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
if (!nla)
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
goto nla_put_failure;
read_lock_bh(&idev->lock);
@@ -5216,6 +5231,26 @@ static int inet6_validate_link_af(const struct net_device *dev,
return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
}
+static int check_addr_gen_mode(int mode)
+{
+ if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
+ mode != IN6_ADDR_GEN_MODE_NONE &&
+ mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+ mode != IN6_ADDR_GEN_MODE_RANDOM)
+ return -EINVAL;
+ return 1;
+}
+
+static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
+ int mode)
+{
+ if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+ !idev->cnf.stable_secret.initialized &&
+ !net->ipv6.devconf_dflt->stable_secret.initialized)
+ return -EINVAL;
+ return 1;
+}
+
static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
{
int err = -EINVAL;
@@ -5237,18 +5272,11 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
- if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
- mode != IN6_ADDR_GEN_MODE_NONE &&
- mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
- mode != IN6_ADDR_GEN_MODE_RANDOM)
- return -EINVAL;
-
- if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
- !idev->cnf.stable_secret.initialized &&
- !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
+ if (check_addr_gen_mode(mode) < 0 ||
+ check_stable_privacy(idev, dev_net(dev), mode) < 0)
return -EINVAL;
- idev->addr_gen_mode = mode;
+ idev->cnf.addr_gen_mode = mode;
err = 0;
}
@@ -5655,6 +5683,47 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
return ret;
}
+static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int new_val;
+ struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
+ struct net *net = (struct net *)ctl->extra2;
+
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+ if (write) {
+ new_val = *((int *)ctl->data);
+
+ if (check_addr_gen_mode(new_val) < 0)
+ return -EINVAL;
+
+ /* request for default */
+ if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
+ ipv6_devconf_dflt.addr_gen_mode = new_val;
+
+ /* request for individual net device */
+ } else {
+ if (!idev)
+ return ret;
+
+ if (check_stable_privacy(idev, net, new_val) < 0)
+ return -EINVAL;
+
+ if (idev->cnf.addr_gen_mode != new_val) {
+ idev->cnf.addr_gen_mode = new_val;
+ rtnl_lock();
+ addrconf_dev_config(idev->dev);
+ rtnl_unlock();
+ }
+ }
+ }
+
+ return ret;
+}
+
static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -5705,14 +5774,14 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
struct inet6_dev *idev = __in6_dev_get(dev);
if (idev) {
- idev->addr_gen_mode =
+ idev->cnf.addr_gen_mode =
IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
}
}
} else {
struct inet6_dev *idev = ctl->extra1;
- idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
}
out:
@@ -6100,6 +6169,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "addr_gen_mode",
+ .data = &ipv6_devconf.addr_gen_mode,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_addr_gen_mode,
+ },
+ {
/* sentinel */
}
};
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index aa42123bc301..04db40620ea6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -302,7 +302,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
snum = ntohs(addr->sin6_port);
- if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
+ if (snum && snum < inet_prot_sock(net) &&
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
lock_sock(sk);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 189eb10b742d..dda6035e3b84 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -474,6 +474,9 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
int hdr_len = skb_network_header_len(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
+ if (err)
+ goto out;
+
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, hdr_len);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index cbcdd5db31f4..ff54faa75631 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -44,6 +44,8 @@
#include <net/protocol.h>
#include <linux/icmpv6.h>
+#include <linux/highmem.h>
+
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
void *tmp;
@@ -114,11 +116,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+{
+ __be32 *seqhi;
+ struct crypto_aead *aead = x->data;
+ int seqhilen = 0;
+ u8 *iv;
+ struct aead_request *req;
+ struct scatterlist *sg;
+
+ if (x->props.flags & XFRM_STATE_ESN)
+ seqhilen += sizeof(__be32);
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+ req = esp_tmp_req(aead, iv);
+
+ /* Unref skb_frag_pages in the src scatterlist if necessary.
+ * Skip the first sg which comes from skb->data.
+ */
+ if (req->src != req->dst)
+ for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+ put_page(sg_page(sg));
+}
+
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ void *tmp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
- kfree(ESP_SKB_CB(skb)->tmp);
+ tmp = ESP_SKB_CB(skb)->tmp;
+ esp_ssg_unref(x, tmp);
+ kfree(tmp);
xfrm_output_resume(skb, err);
}
@@ -138,6 +169,27 @@ static void esp_output_restore_header(struct sk_buff *skb)
esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
}
+static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
+ struct ip_esp_hdr *esph,
+ __be32 *seqhi)
+{
+ struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+ /* For ESN we move the header forward by 4 bytes to
+ * accomodate the high bits. We will move it back after
+ * encryption.
+ */
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
+ *seqhi = esph->spi;
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ }
+
+ esph->spi = x->id.spi;
+
+ return esph;
+}
+
static void esp_output_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
@@ -146,14 +198,31 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
esp_output_done(base, err);
}
+static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+ /* Fill padding... */
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
+ do {
+ int i;
+ for (i = 0; i < plen - 2; i++)
+ tail[i] = i + 1;
+ } while (0);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = proto;
+}
+
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
- struct scatterlist *sg;
+ struct scatterlist *sg, *dsg;
struct sk_buff *trailer;
+ struct page *page;
void *tmp;
int blksize;
int clen;
@@ -164,10 +233,13 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
int nfrags;
int assoclen;
int seqhilen;
+ int tailen;
u8 *iv;
u8 *tail;
+ u8 *vaddr;
__be32 *seqhi;
__be64 seqno;
+ __u8 proto = *skb_mac_header(skb);
/* skb is pure payload to encrypt */
aead = x->data;
@@ -186,11 +258,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
plen = clen - skb->len - tfclen;
-
- err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
- if (err < 0)
- goto error;
- nfrags = err;
+ tailen = tfclen + plen + alen;
assoclen = sizeof(*esph);
seqhilen = 0;
@@ -200,59 +268,152 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
assoclen += seqhilen;
}
- tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
- if (!tmp) {
- err = -ENOMEM;
- goto error;
+ *skb_mac_header(skb) = IPPROTO_ESP;
+ esph = ip_esp_hdr(skb);
+
+ if (!skb_cloned(skb)) {
+ if (tailen <= skb_availroom(skb)) {
+ nfrags = 1;
+ trailer = skb;
+ tail = skb_tail_pointer(trailer);
+
+ goto skip_cow;
+ } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
+ && !skb_has_frag_list(skb)) {
+ int allocsize;
+ struct sock *sk = skb->sk;
+ struct page_frag *pfrag = &x->xfrag;
+
+ allocsize = ALIGN(tailen, L1_CACHE_BYTES);
+
+ spin_lock_bh(&x->lock);
+
+ if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+ spin_unlock_bh(&x->lock);
+ goto cow;
+ }
+
+ page = pfrag->page;
+ get_page(page);
+
+ vaddr = kmap_atomic(page);
+
+ tail = vaddr + pfrag->offset;
+
+ esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+ kunmap_atomic(vaddr);
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
+ tailen);
+ skb_shinfo(skb)->nr_frags = ++nfrags;
+
+ pfrag->offset = pfrag->offset + allocsize;
+ nfrags++;
+
+ skb->len += tailen;
+ skb->data_len += tailen;
+ skb->truesize += tailen;
+ if (sk)
+ atomic_add(tailen, &sk->sk_wmem_alloc);
+
+ skb_push(skb, -skb_network_offset(skb));
+
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+ esph->spi = x->id.spi;
+
+ tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
+ if (!tmp) {
+ spin_unlock_bh(&x->lock);
+ err = -ENOMEM;
+ goto error;
+ }
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = &sg[nfrags];
+
+ esph = esp_output_set_esn(skb, esph, seqhi);
+
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+
+ allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
+
+ if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+ spin_unlock_bh(&x->lock);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ skb_shinfo(skb)->nr_frags = 1;
+
+ page = pfrag->page;
+ get_page(page);
+ /* replace page frags in skb with new page */
+ __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
+ pfrag->offset = pfrag->offset + allocsize;
+
+ sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
+ skb_to_sgvec(skb, dsg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+
+ spin_unlock_bh(&x->lock);
+
+ goto skip_cow2;
+ }
}
- seqhi = esp_tmp_seqhi(tmp);
- iv = esp_tmp_iv(aead, tmp, seqhilen);
- req = esp_tmp_req(aead, iv);
- sg = esp_req_sg(aead, req);
+cow:
+ err = skb_cow_data(skb, tailen, &trailer);
+ if (err < 0)
+ goto error;
+ nfrags = err;
- /* Fill padding... */
tail = skb_tail_pointer(trailer);
- if (tfclen) {
- memset(tail, 0, tfclen);
- tail += tfclen;
- }
- do {
- int i;
- for (i = 0; i < plen - 2; i++)
- tail[i] = i + 1;
- } while (0);
- tail[plen - 2] = plen - 2;
- tail[plen - 1] = *skb_mac_header(skb);
- pskb_put(skb, trailer, clen - skb->len + alen);
+ esph = ip_esp_hdr(skb);
+skip_cow:
+ esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+ pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb));
- esph = ip_esp_hdr(skb);
- *skb_mac_header(skb) = IPPROTO_ESP;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+ esph->spi = x->id.spi;
- aead_request_set_callback(req, 0, esp_output_done, skb);
-
- /* For ESN we move the header forward by 4 bytes to
- * accomodate the high bits. We will move it back after
- * encryption.
- */
- if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
- *seqhi = esph->spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
- aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+ tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto error;
}
- esph->spi = x->id.spi;
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = sg;
+
+ esph = esp_output_set_esn(skb, esph, seqhi);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
- aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
+skip_cow2:
+ if ((x->props.flags & XFRM_STATE_ESN))
+ aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+ else
+ aead_request_set_callback(req, 0, esp_output_done, skb);
+
+ aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
@@ -278,6 +439,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
esp_output_restore_header(skb);
}
+ if (sg != dsg)
+ esp_ssg_unref(x, tmp);
kfree(tmp);
error:
@@ -343,6 +506,23 @@ static void esp_input_restore_header(struct sk_buff *skb)
__skb_pull(skb, 4);
}
+static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
+
+ /* For ESN we move the header forward by 4 bytes to
+ * accomodate the high bits. We will move it back after
+ * decryption.
+ */
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ esph = (void *)skb_push(skb, 4);
+ *seqhi = esph->spi;
+ esph->spi = esph->seq_no;
+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+ }
+}
+
static void esp_input_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
@@ -378,14 +558,6 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
- nfrags = skb_cow_data(skb, 0, &trailer);
- if (nfrags < 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = -ENOMEM;
-
assoclen = sizeof(*esph);
seqhilen = 0;
@@ -394,6 +566,27 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
assoclen += seqhilen;
}
+ if (!skb_cloned(skb)) {
+ if (!skb_is_nonlinear(skb)) {
+ nfrags = 1;
+
+ goto skip_cow;
+ } else if (!skb_has_frag_list(skb)) {
+ nfrags = skb_shinfo(skb)->nr_frags;
+ nfrags++;
+
+ goto skip_cow;
+ }
+ }
+
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (nfrags < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+skip_cow:
+ ret = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
if (!tmp)
goto out;
@@ -404,26 +597,17 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
- skb->ip_summed = CHECKSUM_NONE;
+ esp_input_set_header(skb, seqhi);
- esph = (struct ip_esp_hdr *)skb->data;
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
- aead_request_set_callback(req, 0, esp_input_done, skb);
+ skb->ip_summed = CHECKSUM_NONE;
- /* For ESN we move the header forward by 4 bytes to
- * accomodate the high bits. We will move it back after
- * decryption.
- */
- if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)skb_push(skb, 4);
- *seqhi = esph->spi;
- esph->spi = esph->seq_no;
- esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+ if ((x->props.flags & XFRM_STATE_ESN))
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
- }
-
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ else
+ aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
new file mode 100644
index 000000000000..d914eb93204a
--- /dev/null
+++ b/net/ipv6/esp6_offload.c
@@ -0,0 +1,108 @@
+/*
+ * IPV6 GSO/GRO offload support
+ * Linux INET implementation
+ *
+ * Copyright (C) 2016 secunet Security Networks AG
+ * Author: Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * ESP GRO support
+ */
+
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <net/protocol.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <net/xfrm.h>
+#include <net/esp.h>
+#include <linux/scatterlist.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <linux/icmpv6.h>
+
+static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ int offset = skb_gro_offset(skb);
+ struct xfrm_offload *xo;
+ struct xfrm_state *x;
+ __be32 seq;
+ __be32 spi;
+ int err;
+
+ skb_pull(skb, offset);
+
+ if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+ goto out;
+
+ err = secpath_set(skb);
+ if (err)
+ goto out;
+
+ if (skb->sp->len == XFRM_MAX_DEPTH)
+ goto out;
+
+ x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+ (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
+ spi, IPPROTO_ESP, AF_INET6);
+ if (!x)
+ goto out;
+
+ skb->sp->xvec[skb->sp->len++] = x;
+ skb->sp->olen++;
+
+ xo = xfrm_offload(skb);
+ if (!xo) {
+ xfrm_state_put(x);
+ goto out;
+ }
+ xo->flags |= XFRM_GRO;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+ XFRM_SPI_SKB_CB(skb)->seq = seq;
+
+ /* We don't need to handle errors from xfrm_input, it does all
+ * the error handling and frees the resources on error. */
+ xfrm_input(skb, IPPROTO_ESP, spi, -2);
+
+ return ERR_PTR(-EINPROGRESS);
+out:
+ skb_push(skb, offset);
+ NAPI_GRO_CB(skb)->same_flow = 0;
+ NAPI_GRO_CB(skb)->flush = 1;
+
+ return NULL;
+}
+
+static const struct net_offload esp6_offload = {
+ .callbacks = {
+ .gro_receive = esp6_gro_receive,
+ },
+};
+
+static int __init esp6_offload_init(void)
+{
+ return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
+}
+
+static void __exit esp6_offload_exit(void)
+{
+ inet6_del_offload(&esp6_offload, IPPROTO_ESP);
+}
+
+module_init(esp6_offload_init);
+module_exit(esp6_offload_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3036f665e6c8..230b5aac9f03 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -110,19 +110,17 @@ static const struct inet6_protocol icmpv6_protocol = {
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
+/* Called with BH disabled */
static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
{
struct sock *sk;
- local_bh_disable();
-
sk = icmpv6_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path (f.e. SIT or
* ip6ip6 tunnel) signals dst_link_failure() for an
* outgoing ICMP6 packet.
*/
- local_bh_enable();
return NULL;
}
return sk;
@@ -130,7 +128,7 @@ static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
{
- spin_unlock_bh(&sk->sk_lock.slock);
+ spin_unlock(&sk->sk_lock.slock);
}
/*
@@ -168,6 +166,30 @@ static bool is_ineligible(const struct sk_buff *skb)
return false;
}
+static bool icmpv6_mask_allow(int type)
+{
+ /* Informational messages are not limited. */
+ if (type & ICMPV6_INFOMSG_MASK)
+ return true;
+
+ /* Do not limit pmtu discovery, it would break it. */
+ if (type == ICMPV6_PKT_TOOBIG)
+ return true;
+
+ return false;
+}
+
+static bool icmpv6_global_allow(int type)
+{
+ if (icmpv6_mask_allow(type))
+ return true;
+
+ if (icmp_global_allow())
+ return true;
+
+ return false;
+}
+
/*
* Check the ICMP output rate limit
*/
@@ -178,12 +200,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
struct dst_entry *dst;
bool res = false;
- /* Informational messages are not limited. */
- if (type & ICMPV6_INFOMSG_MASK)
- return true;
-
- /* Do not limit pmtu discovery, it would break it. */
- if (type == ICMPV6_PKT_TOOBIG)
+ if (icmpv6_mask_allow(type))
return true;
/*
@@ -200,20 +217,16 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
} else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = net->ipv6.sysctl.icmpv6_time;
+ struct inet_peer *peer;
/* Give more bandwidth to wider prefixes. */
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- if (icmp_global_allow()) {
- struct inet_peer *peer;
-
- peer = inet_getpeer_v6(net->ipv6.peers,
- &fl6->daddr, 1);
- res = inet_peer_xrlim_allow(peer, tmo);
- if (peer)
- inet_putpeer(peer);
- }
+ peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
+ res = inet_peer_xrlim_allow(peer, tmo);
+ if (peer)
+ inet_putpeer(peer);
}
dst_release(dst);
return res;
@@ -474,6 +487,13 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
return;
}
+ /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
+ local_bh_disable();
+
+ /* Check global sysctl_icmp_msgs_per_sec ratelimit */
+ if (!icmpv6_global_allow(type))
+ goto out_bh_enable;
+
mip6_addr_swap(skb);
memset(&fl6, 0, sizeof(fl6));
@@ -492,7 +512,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
sk = icmpv6_xmit_lock(net);
if (!sk)
- return;
+ goto out_bh_enable;
+
sk->sk_mark = mark;
np = inet6_sk(sk);
@@ -552,6 +573,8 @@ out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
+out_bh_enable:
+ local_bh_enable();
}
/* Slightly more convenient version of icmp6_send.
@@ -665,9 +688,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
fl6.flowi6_uid = sock_net_uid(net, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+ local_bh_disable();
sk = icmpv6_xmit_lock(net);
if (!sk)
- return;
+ goto out_bh_enable;
sk->sk_mark = mark;
np = inet6_sk(sk);
@@ -709,6 +733,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
+out_bh_enable:
+ local_bh_enable();
}
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 13b5e85fe0d5..ce1aae4a7fc8 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -115,7 +115,7 @@ static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
[ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
};
-static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+static int ila_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 75c308239243..9a31d13bf180 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,46 +28,6 @@
#include <net/inet6_connection_sock.h>
#include <net/sock_reuseport.h>
-int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax,
- bool reuseport_ok)
-{
- const struct sock *sk2;
- bool reuse = !!sk->sk_reuse;
- bool reuseport = !!sk->sk_reuseport && reuseport_ok;
- kuid_t uid = sock_i_uid((struct sock *)sk);
-
- /* We must walk the whole port owner list in this case. -DaveM */
- /*
- * See comment in inet_csk_bind_conflict about sock lookup
- * vs net namespaces issues.
- */
- sk_for_each_bound(sk2, &tb->owners) {
- if (sk != sk2 &&
- (!sk->sk_bound_dev_if ||
- !sk2->sk_bound_dev_if ||
- sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if ((!reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
- (!reuseport || !sk2->sk_reuseport ||
- rcu_access_pointer(sk->sk_reuseport_cb) ||
- (sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(uid,
- sock_i_uid((struct sock *)sk2))))) {
- if (ipv6_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- if (!relax && reuse && sk2->sk_reuse &&
- sk2->sk_state != TCP_LISTEN &&
- ipv6_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- }
-
- return sk2 != NULL;
-}
-EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
-
struct dst_entry *inet6_csk_route_req(const struct sock *sk,
struct flowi6 *fl6,
const struct request_sock *req,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 02761c9fe43e..d0900918a19e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -268,54 +268,10 @@ int inet6_hash(struct sock *sk)
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL);
local_bh_enable();
}
return err;
}
EXPORT_SYMBOL_GPL(inet6_hash);
-
-/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
- * only, and any IPv4 addresses if not IPv6 only
- * match_wildcard == false: addresses must be exactly the same, i.e.
- * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
- * and 0.0.0.0 equals to 0.0.0.0 only
- */
-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
- bool match_wildcard)
-{
- const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
- int sk2_ipv6only = inet_v6_ipv6only(sk2);
- int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
- int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
-
- /* if both are mapped, treat as IPv4 */
- if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
- if (!sk2_ipv6only) {
- if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr)
- return 1;
- if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr)
- return match_wildcard;
- }
- return 0;
- }
-
- if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
- return 1;
-
- if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
- !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
- return 1;
-
- if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
- !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
- return 1;
-
- if (sk2_rcv_saddr6 &&
- ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipv6_rcv_saddr_equal);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ef5485204522..e4266746e4a2 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -318,6 +318,16 @@ static int fib6_dump_node(struct fib6_walker *w)
w->leaf = rt;
return 1;
}
+
+ /* Multipath routes are dumped in one route with the
+ * RTA_MULTIPATH attribute. Jump 'rt' to point to the
+ * last sibling of this route (no need to dump the
+ * sibling routes again)
+ */
+ if (rt->rt6i_nsiblings)
+ rt = list_last_entry(&rt->rt6i_siblings,
+ struct rt6_info,
+ rt6i_siblings);
}
w->leaf = NULL;
return 0;
@@ -746,6 +756,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
u16 nlflags = NLM_F_EXCL;
int err;
+ if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
+ nlflags |= NLM_F_APPEND;
+
ins = &fn->leaf;
for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
@@ -868,7 +881,8 @@ add:
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
+ if (!info->skip_notify)
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -894,7 +908,8 @@ add:
rt->rt6i_node = fn;
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
+ if (!info->skip_notify)
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
@@ -1439,7 +1454,8 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
fib6_purge_rt(rt, fn, net);
- inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
+ if (!info->skip_notify)
+ inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
rt6_release(rt);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 630b73be5999..6fcb7cb49bb2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -486,11 +486,6 @@ drop:
return 0;
}
-struct ipv6_tel_txoption {
- struct ipv6_txoptions ops;
- __u8 dst_opt[8];
-};
-
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{
return iptunnel_handle_offloads(skb,
@@ -1003,6 +998,9 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
static int ip6gre_tunnel_init_common(struct net_device *dev)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index fc7b4017ba24..0838e6d01d2e 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -253,7 +253,7 @@ out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final(skb, pp, flush);
return pp;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7cebee58e55b..528b3c1f3fde 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -119,7 +119,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
- ret = dst_neigh_output(dst, neigh, skb);
+ sock_confirm_neigh(skb, neigh);
+ ret = neigh_output(neigh, skb);
rcu_read_unlock_bh();
return ret;
}
@@ -1149,6 +1150,9 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb->protocol = htons(ETH_P_IPV6);
skb->csum = 0;
+ if (flags & MSG_CONFIRM)
+ skb_set_dst_pending_confirm(skb, 1);
+
__skb_queue_tail(queue, skb);
} else if (skb_is_gso(skb)) {
goto append;
@@ -1521,6 +1525,9 @@ alloc_new_skb:
exthdrlen = 0;
dst_exthdrlen = 0;
+ if ((flags & MSG_CONFIRM) && !skb_prev)
+ skb_set_dst_pending_confirm(skb, 1);
+
/*
* Put the packet on the pending queue
*/
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d82042c8d8fd..c795fee372c4 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -49,6 +49,7 @@
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/etherdevice.h>
#define IP6_VTI_HASH_SIZE_SHIFT 5
#define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT)
@@ -842,6 +843,9 @@ static void vti6_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
/**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 604d8953c775..babaf3ec2742 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2243,8 +2243,10 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
int ct;
/* If cache is unresolved, don't try to parse IIF and OIF */
- if (c->mf6c_parent >= MAXMIFS)
+ if (c->mf6c_parent >= MAXMIFS) {
+ rtm->rtm_flags |= RTNH_F_UNRESOLVED;
return -ENOENT;
+ }
if (MIF_EXISTS(mrt, c->mf6c_parent) &&
nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
@@ -2286,7 +2288,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
}
int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
- int nowait, u32 portid)
+ u32 portid)
{
int err;
struct mr6_table *mrt;
@@ -2313,11 +2315,6 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
struct net_device *dev;
int vif;
- if (nowait) {
- read_unlock(&mrt_lock);
- return -EAGAIN;
- }
-
dev = skb->dev;
if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
@@ -2355,7 +2352,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
return err;
}
- if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
+ if (rtm->rtm_flags & RTM_F_NOTIFY)
cache->mfc_flags |= MFC_NOTIFY;
err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ee97c44e2aa0..a531ba032b85 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -595,16 +595,24 @@ done:
if (val) {
struct net_device *dev;
+ int midx;
- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
- goto e_inval;
+ rcu_read_lock();
- dev = dev_get_by_index(net, val);
+ dev = dev_get_by_index_rcu(net, val);
if (!dev) {
+ rcu_read_unlock();
retv = -ENODEV;
break;
}
- dev_put(dev);
+ midx = l3mdev_master_ifindex_rcu(dev);
+
+ rcu_read_unlock();
+
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != val &&
+ (!midx || midx != sk->sk_bound_dev_if))
+ goto e_inval;
}
np->mcast_oif = val;
retv = 0;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 25a022d41a70..1e15c54fd5e2 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -855,10 +855,6 @@ copy_entries_to_user(unsigned int total_size,
return PTR_ERR(counters);
loc_cpu_entry = private->entries;
- if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
- ret = -EFAULT;
- goto free_counters;
- }
/* FIXME: use iterator macros --RR */
/* ... then go back and fix counters and names */
@@ -868,6 +864,10 @@ copy_entries_to_user(unsigned int total_size,
const struct xt_entry_target *t;
e = (struct ip6t_entry *)(loc_cpu_entry + off);
+ if (copy_to_user(userptr + off, e, sizeof(*e))) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
if (copy_to_user(userptr + off
+ offsetof(struct ip6t_entry, counters),
&counters[num],
@@ -881,23 +881,14 @@ copy_entries_to_user(unsigned int total_size,
i += m->u.match_size) {
m = (void *)e + i;
- if (copy_to_user(userptr + off + i
- + offsetof(struct xt_entry_match,
- u.user.name),
- m->u.kernel.match->name,
- strlen(m->u.kernel.match->name)+1)
- != 0) {
+ if (xt_match_to_user(m, userptr + off + i)) {
ret = -EFAULT;
goto free_counters;
}
}
t = ip6t_get_target_c(e);
- if (copy_to_user(userptr + off + e->target_offset
- + offsetof(struct xt_entry_target,
- u.user.name),
- t->u.kernel.target->name,
- strlen(t->u.kernel.target->name)+1) != 0) {
+ if (xt_target_to_user(t, userptr + off + e->target_offset)) {
ret = -EFAULT;
goto free_counters;
}
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 590f767db5d4..a379d2f79b19 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -112,6 +112,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
.table = "mangle",
.target = ip6t_snpt_tg,
.targetsize = sizeof(struct ip6t_npt_tginfo),
+ .usersize = offsetof(struct ip6t_npt_tginfo, adjustment),
.checkentry = ip6t_npt_checkentry,
.family = NFPROTO_IPV6,
.hooks = (1 << NF_INET_LOCAL_IN) |
@@ -123,6 +124,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
.table = "mangle",
.target = ip6t_dnpt_tg,
.targetsize = sizeof(struct ip6t_npt_tginfo),
+ .usersize = offsetof(struct ip6t_npt_tginfo, adjustment),
.checkentry = ip6t_npt_checkentry,
.family = NFPROTO_IPV6,
.hooks = (1 << NF_INET_PRE_ROUTING) |
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 98c8dd38575a..4ef1ddd4bbbd 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -71,8 +71,7 @@ synproxy_send_tcp(struct net *net,
skb_dst_set(nskb, dst);
if (nfct) {
- nskb->nfct = nfct;
- nskb->nfctinfo = ctinfo;
+ nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nf_conntrack_get(nfct);
}
@@ -121,8 +120,8 @@ synproxy_send_client_synack(struct net *net,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
- niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
+ IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
static void
@@ -244,8 +243,8 @@ synproxy_send_client_ack(struct net *net,
synproxy_build_options(nth, opts);
- synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
- niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
+ IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
static bool
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index f5a61bc3ec2b..d2c2ccbfbe72 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -145,15 +145,15 @@ static int
icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int icmp6off,
- enum ip_conntrack_info *ctinfo,
unsigned int hooknum)
{
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_l4proto *inproto;
+ enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
- NF_CT_ASSERT(skb->nfct == NULL);
+ NF_CT_ASSERT(!skb_nfct(skb));
/* Are they talking about one of our connections? */
if (!nf_ct_get_tuplepr(skb,
@@ -176,7 +176,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return -NF_ACCEPT;
}
- *ctinfo = IP_CT_RELATED;
+ ctinfo = IP_CT_RELATED;
h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
&intuple);
@@ -185,19 +185,18 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return -NF_ACCEPT;
} else {
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
- *ctinfo += IP_CT_IS_REPLY;
+ ctinfo += IP_CT_IS_REPLY;
}
/* Update skb to refer to this connection */
- skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
- skb->nfctinfo = *ctinfo;
+ nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
return NF_ACCEPT;
}
static int
icmpv6_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
- enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
+ u8 pf, unsigned int hooknum)
{
const struct icmp6hdr *icmp6h;
struct icmp6hdr _ih;
@@ -222,9 +221,8 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
type = icmp6h->icmp6_type - 130;
if (type >= 0 && type < sizeof(noct_valid_new) &&
noct_valid_new[type]) {
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
+ nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
+ nf_conntrack_get(skb_nfct(skb));
return NF_ACCEPT;
}
@@ -232,7 +230,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
- return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
+ return icmpv6_error_message(net, tmpl, skb, dataoff, hooknum);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 8e0bdd058787..ada60d1a991b 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -37,7 +37,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
{
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (skb->nfct) {
+ if (skb_nfct(skb)) {
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
@@ -61,7 +61,7 @@ static unsigned int ipv6_defrag(void *priv,
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Previously seen (loopback)? */
- if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
+ if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
return NF_ACCEPT;
#endif
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index 4a84b5ad9ecb..888ecd106e5f 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -57,10 +57,9 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
return;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
+ nf_reset(skb);
+ nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
+ nf_conntrack_get(skb_nfct(skb));
#endif
if (hooknum == NF_INET_PRE_ROUTING ||
hooknum == NF_INET_LOCAL_IN) {
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index 57d86066a13b..055c51b80f5d 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -351,7 +351,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
struct nf_log_buf *m;
/* FIXME: Disabled from containers until syslog ns is supported */
- if (!net_eq(net, &init_net))
+ if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
return;
m = nf_log_buf_open();
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index e1f8b34d7a2e..9b522fa90e6d 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -126,12 +126,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return PTR_ERR(dst);
rt = (struct rt6_info *) dst;
- np = inet6_sk(sk);
- if (!np) {
- err = -EBADF;
- goto dst_err_out;
- }
-
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
@@ -166,7 +160,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
release_sock(sk);
-dst_err_out:
dst_release(dst);
if (err)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ea89073c8247..f174e76e6505 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -654,6 +654,9 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->ip_summed = CHECKSUM_NONE;
+ if (flags & MSG_CONFIRM)
+ skb_set_dst_pending_confirm(skb, 1);
+
skb->transport_header = skb->network_header;
err = memcpy_from_msg(iph, msg, length);
if (err)
@@ -934,7 +937,8 @@ out:
txopt_put(opt_to_free);
return err < 0 ? err : len;
do_confirm:
- dst_confirm(dst);
+ if (msg->msg_flags & MSG_PROBE)
+ dst_confirm_neigh(dst, &fl6.daddr);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7ea85370c11c..f54f4265b37f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -98,6 +98,12 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void rt6_dst_from_metrics_check(struct rt6_info *rt);
static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
+static size_t rt6_nlmsg_size(struct rt6_info *rt);
+static int rt6_fill_node(struct net *net,
+ struct sk_buff *skb, struct rt6_info *rt,
+ struct in6_addr *dst, struct in6_addr *src,
+ int iif, int type, u32 portid, u32 seq,
+ unsigned int flags);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
@@ -217,6 +223,21 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
return neigh_create(&nd_tbl, daddr, dst->dev);
}
+static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+{
+ struct net_device *dev = dst->dev;
+ struct rt6_info *rt = (struct rt6_info *)dst;
+
+ daddr = choose_neigh_daddr(rt, NULL, daddr);
+ if (!daddr)
+ return;
+ if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
+ return;
+ if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
+ return;
+ __ipv6_confirm_neigh(dev, daddr);
+}
+
static struct dst_ops ip6_dst_ops_template = {
.family = AF_INET6,
.gc = ip6_dst_gc,
@@ -233,6 +254,7 @@ static struct dst_ops ip6_dst_ops_template = {
.redirect = rt6_do_redirect,
.local_out = __ip6_local_out,
.neigh_lookup = ip6_neigh_lookup,
+ .confirm_neigh = ip6_confirm_neigh,
};
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
@@ -1359,6 +1381,7 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
const struct ipv6hdr *iph, u32 mtu)
{
+ const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
if (rt6->rt6i_flags & RTF_LOCAL)
@@ -1367,26 +1390,26 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
if (dst_metric_locked(dst, RTAX_MTU))
return;
- dst_confirm(dst);
+ if (iph) {
+ daddr = &iph->daddr;
+ saddr = &iph->saddr;
+ } else if (sk) {
+ daddr = &sk->sk_v6_daddr;
+ saddr = &inet6_sk(sk)->saddr;
+ } else {
+ daddr = NULL;
+ saddr = NULL;
+ }
+ dst_confirm_neigh(dst, daddr);
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
if (mtu >= dst_mtu(dst))
return;
if (!rt6_cache_allowed_for_pmtu(rt6)) {
rt6_do_update_pmtu(rt6, mtu);
- } else {
- const struct in6_addr *daddr, *saddr;
+ } else if (daddr) {
struct rt6_info *nrt6;
- if (iph) {
- daddr = &iph->daddr;
- saddr = &iph->saddr;
- } else if (sk) {
- daddr = &sk->sk_v6_daddr;
- saddr = &inet6_sk(sk)->saddr;
- } else {
- return;
- }
nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
if (nrt6) {
rt6_do_update_pmtu(nrt6, mtu);
@@ -1897,7 +1920,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
if (cfg->fc_encap) {
struct lwtunnel_state *lwtstate;
- err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ err = lwtunnel_build_state(cfg->fc_encap_type,
cfg->fc_encap, AF_INET6, cfg,
&lwtstate);
if (err)
@@ -2143,6 +2166,54 @@ int ip6_del_rt(struct rt6_info *rt)
return __ip6_del_rt(rt, &info);
}
+static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
+{
+ struct nl_info *info = &cfg->fc_nlinfo;
+ struct sk_buff *skb = NULL;
+ struct fib6_table *table;
+ int err;
+
+ table = rt->rt6i_table;
+ write_lock_bh(&table->tb6_lock);
+
+ if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
+ struct rt6_info *sibling, *next_sibling;
+
+ /* prefer to send a single notification with all hops */
+ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
+ if (skb) {
+ u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
+
+ if (rt6_fill_node(info->nl_net, skb, rt,
+ NULL, NULL, 0, RTM_DELROUTE,
+ info->portid, seq, 0) < 0) {
+ kfree_skb(skb);
+ skb = NULL;
+ } else
+ info->skip_notify = 1;
+ }
+
+ list_for_each_entry_safe(sibling, next_sibling,
+ &rt->rt6i_siblings,
+ rt6i_siblings) {
+ err = fib6_del(sibling, info);
+ if (err)
+ goto out;
+ }
+ }
+
+ err = fib6_del(rt, info);
+out:
+ write_unlock_bh(&table->tb6_lock);
+ ip6_rt_put(rt);
+
+ if (skb) {
+ rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE,
+ info->nlh, gfp_any());
+ }
+ return err;
+}
+
static int ip6_route_del(struct fib6_config *cfg)
{
struct fib6_table *table;
@@ -2179,7 +2250,11 @@ static int ip6_route_del(struct fib6_config *cfg)
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- return __ip6_del_rt(rt, &cfg->fc_nlinfo);
+ /* if gateway was specified only delete the one hop */
+ if (cfg->fc_flags & RTF_GATEWAY)
+ return __ip6_del_rt(rt, &cfg->fc_nlinfo);
+
+ return __ip6_del_rt_siblings(rt, cfg);
}
}
read_unlock_bh(&table->tb6_lock);
@@ -2258,7 +2333,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
* Look, redirects are sent only in response to data packets,
* so that this nexthop apparently is reachable. --ANK
*/
- dst_confirm(&rt->dst);
+ dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
if (!neigh)
@@ -2634,6 +2709,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->dst.output = ip6_output;
rt->rt6i_idev = idev;
+ rt->rt6i_protocol = RTPROT_KERNEL;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
if (anycast)
rt->rt6i_flags |= RTF_ANYCAST;
@@ -2711,13 +2787,16 @@ struct arg_dev_net {
struct net *net;
};
+/* called with write lock held for table with rt */
static int fib6_ifdown(struct rt6_info *rt, void *arg)
{
const struct arg_dev_net *adn = arg;
const struct net_device *dev = adn->dev;
if ((rt->dst.dev == dev || !dev) &&
- rt != adn->net->ipv6.ip6_null_entry)
+ rt != adn->net->ipv6.ip6_null_entry &&
+ (rt->rt6i_nsiblings == 0 ||
+ !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
return -1;
return 0;
@@ -2948,7 +3027,7 @@ static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
struct rt6_nh *nh;
list_for_each_entry(nh, rt6_nh_list, next) {
- pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
+ pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
&nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
nh->r_cfg.fc_ifindex);
}
@@ -2987,13 +3066,37 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
return 0;
}
+static void ip6_route_mpath_notify(struct rt6_info *rt,
+ struct rt6_info *rt_last,
+ struct nl_info *info,
+ __u16 nlflags)
+{
+ /* if this is an APPEND route, then rt points to the first route
+ * inserted and rt_last points to last route inserted. Userspace
+ * wants a consistent dump of the route which starts at the first
+ * nexthop. Since sibling routes are always added at the end of
+ * the list, find the first sibling of the last route appended
+ */
+ if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
+ rt = list_first_entry(&rt_last->rt6i_siblings,
+ struct rt6_info,
+ rt6i_siblings);
+ }
+
+ if (rt)
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
+}
+
static int ip6_route_multipath_add(struct fib6_config *cfg)
{
+ struct rt6_info *rt_notif = NULL, *rt_last = NULL;
+ struct nl_info *info = &cfg->fc_nlinfo;
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
struct rt6_info *rt;
struct rt6_nh *err_nh;
struct rt6_nh *nh, *nh_safe;
+ __u16 nlflags;
int remaining;
int attrlen;
int err = 1;
@@ -3002,6 +3105,10 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
LIST_HEAD(rt6_nh_list);
+ nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
+ if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
+ nlflags |= NLM_F_APPEND;
+
remaining = cfg->fc_mp_len;
rtnh = (struct rtnexthop *)cfg->fc_mp;
@@ -3044,9 +3151,20 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
rtnh = rtnh_next(rtnh, &remaining);
}
+ /* for add and replace send one notification with all nexthops.
+ * Skip the notification in fib6_add_rt2node and send one with
+ * the full route when done
+ */
+ info->skip_notify = 1;
+
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
- err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
+ rt_last = nh->rt6_info;
+ err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc);
+ /* save reference to first route for notification */
+ if (!rt_notif && !err)
+ rt_notif = nh->rt6_info;
+
/* nh->rt6_info is used or freed at this point, reset to NULL*/
nh->rt6_info = NULL;
if (err) {
@@ -3068,9 +3186,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
nhn++;
}
+ /* success ... tell user about new route */
+ ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
goto cleanup;
add_errout:
+ /* send notification for routes that were added so that
+ * the delete notifications sent by ip6_route_del are
+ * coherent
+ */
+ if (rt_notif)
+ ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
+
/* Delete routes that were already added */
list_for_each_entry(nh, &rt6_nh_list, next) {
if (err_nh == nh)
@@ -3138,8 +3265,10 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
if (cfg.fc_mp)
return ip6_route_multipath_del(&cfg);
- else
+ else {
+ cfg.fc_delete_all_nh = 1;
return ip6_route_del(&cfg);
+ }
}
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -3157,8 +3286,20 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return ip6_route_add(&cfg);
}
-static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
+static size_t rt6_nlmsg_size(struct rt6_info *rt)
{
+ int nexthop_len = 0;
+
+ if (rt->rt6i_nsiblings) {
+ nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
+ + NLA_ALIGN(sizeof(struct rtnexthop))
+ + nla_total_size(16) /* RTA_GATEWAY */
+ + nla_total_size(4) /* RTA_OIF */
+ + lwtunnel_get_encap_size(rt->dst.lwtstate);
+
+ nexthop_len *= rt->rt6i_nsiblings;
+ }
+
return NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(16) /* RTA_SRC */
+ nla_total_size(16) /* RTA_DST */
@@ -3172,14 +3313,69 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
+ nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+ nla_total_size(1) /* RTA_PREF */
- + lwtunnel_get_encap_size(rt->dst.lwtstate);
+ + lwtunnel_get_encap_size(rt->dst.lwtstate)
+ + nexthop_len;
+}
+
+static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
+ unsigned int *flags)
+{
+ if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
+ *flags |= RTNH_F_LINKDOWN;
+ if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
+ *flags |= RTNH_F_DEAD;
+ }
+
+ if (rt->rt6i_flags & RTF_GATEWAY) {
+ if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
+ goto nla_put_failure;
+ }
+
+ if (rt->dst.dev &&
+ nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+ goto nla_put_failure;
+
+ if (rt->dst.lwtstate &&
+ lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
+{
+ struct rtnexthop *rtnh;
+ unsigned int flags = 0;
+
+ rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+ if (!rtnh)
+ goto nla_put_failure;
+
+ rtnh->rtnh_hops = 0;
+ rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
+
+ if (rt6_nexthop_info(skb, rt, &flags) < 0)
+ goto nla_put_failure;
+
+ rtnh->rtnh_flags = flags;
+
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
}
static int rt6_fill_node(struct net *net,
struct sk_buff *skb, struct rt6_info *rt,
struct in6_addr *dst, struct in6_addr *src,
int iif, int type, u32 portid, u32 seq,
- int prefix, int nowait, unsigned int flags)
+ unsigned int flags)
{
u32 metrics[RTAX_MAX];
struct rtmsg *rtm;
@@ -3187,13 +3383,6 @@ static int rt6_fill_node(struct net *net,
long expires;
u32 table;
- if (prefix) { /* user wants prefix routes only */
- if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
- /* success since this is not a prefix route */
- return 1;
- }
- }
-
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
@@ -3233,11 +3422,6 @@ static int rt6_fill_node(struct net *net,
else
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
- if (!netif_carrier_ok(rt->dst.dev)) {
- rtm->rtm_flags |= RTNH_F_LINKDOWN;
- if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
- rtm->rtm_flags |= RTNH_F_DEAD;
- }
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags & RTF_DYNAMIC)
@@ -3271,19 +3455,12 @@ static int rt6_fill_node(struct net *net,
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
- int err = ip6mr_get_route(net, skb, rtm, nowait,
- portid);
-
- if (err <= 0) {
- if (!nowait) {
- if (err == 0)
- return 0;
- goto nla_put_failure;
- } else {
- if (err == -EMSGSIZE)
- goto nla_put_failure;
- }
- }
+ int err = ip6mr_get_route(net, skb, rtm, portid);
+
+ if (err == 0)
+ return 0;
+ if (err < 0)
+ goto nla_put_failure;
} else
#endif
if (nla_put_u32(skb, RTA_IIF, iif))
@@ -3308,17 +3485,35 @@ static int rt6_fill_node(struct net *net,
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
- if (rt->rt6i_flags & RTF_GATEWAY) {
- if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
- goto nla_put_failure;
- }
-
- if (rt->dst.dev &&
- nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
- goto nla_put_failure;
if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
goto nla_put_failure;
+ /* For multipath routes, walk the siblings list and add
+ * each as a nexthop within RTA_MULTIPATH.
+ */
+ if (rt->rt6i_nsiblings) {
+ struct rt6_info *sibling, *next_sibling;
+ struct nlattr *mp;
+
+ mp = nla_nest_start(skb, RTA_MULTIPATH);
+ if (!mp)
+ goto nla_put_failure;
+
+ if (rt6_add_nexthop(skb, rt) < 0)
+ goto nla_put_failure;
+
+ list_for_each_entry_safe(sibling, next_sibling,
+ &rt->rt6i_siblings, rt6i_siblings) {
+ if (rt6_add_nexthop(skb, sibling) < 0)
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(skb, mp);
+ } else {
+ if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0)
+ goto nla_put_failure;
+ }
+
expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
@@ -3327,8 +3522,6 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
- if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
- goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -3341,18 +3534,26 @@ nla_put_failure:
int rt6_dump_route(struct rt6_info *rt, void *p_arg)
{
struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
- int prefix;
+ struct net *net = arg->net;
+
+ if (rt == net->ipv6.ip6_null_entry)
+ return 0;
if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
- prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
- } else
- prefix = 0;
- return rt6_fill_node(arg->net,
+ /* user wants prefix routes only */
+ if (rtm->rtm_flags & RTM_F_PREFIX &&
+ !(rt->rt6i_flags & RTF_PREFIX_RT)) {
+ /* success since this is not a prefix route */
+ return 1;
+ }
+ }
+
+ return rt6_fill_node(net,
arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
- prefix, 0, NLM_F_MULTI);
+ NLM_F_MULTI);
}
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
@@ -3433,17 +3634,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
goto errout;
}
- /* Reserve room for dummy headers, this skb can pass
- through good chunk of routing engine.
- */
- skb_reset_mac_header(skb);
- skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
-
skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, 0, 0);
+ nlh->nlmsg_seq, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -3470,7 +3665,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
- event, info->portid, seq, 0, 0, nlm_flags);
+ event, info->portid, seq, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index 6ef3dfb6e811..f950cb53d5e3 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -45,7 +45,7 @@
#include <net/seg6_hmac.h>
#include <linux/random.h>
-static char * __percpu *hmac_ring;
+static DEFINE_PER_CPU(char [SEG6_HMAC_RING_SIZE], hmac_ring);
static int seg6_hmac_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
{
@@ -192,7 +192,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
*/
local_bh_disable();
- ring = *this_cpu_ptr(hmac_ring);
+ ring = this_cpu_ptr(hmac_ring);
off = ring;
/* source address */
@@ -353,27 +353,6 @@ out:
}
EXPORT_SYMBOL(seg6_push_hmac);
-static int seg6_hmac_init_ring(void)
-{
- int i;
-
- hmac_ring = alloc_percpu(char *);
-
- if (!hmac_ring)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- char *ring = kzalloc(SEG6_HMAC_RING_SIZE, GFP_KERNEL);
-
- if (!ring)
- return -ENOMEM;
-
- *per_cpu_ptr(hmac_ring, i) = ring;
- }
-
- return 0;
-}
-
static int seg6_hmac_init_algo(void)
{
struct seg6_hmac_algo *algo;
@@ -410,7 +389,8 @@ static int seg6_hmac_init_algo(void)
return -ENOMEM;
for_each_possible_cpu(cpu) {
- shash = kzalloc(shsize, GFP_KERNEL);
+ shash = kzalloc_node(shsize, GFP_KERNEL,
+ cpu_to_node(cpu));
if (!shash)
return -ENOMEM;
*per_cpu_ptr(algo->shashs, cpu) = shash;
@@ -422,16 +402,7 @@ static int seg6_hmac_init_algo(void)
int __init seg6_hmac_init(void)
{
- int ret;
-
- ret = seg6_hmac_init_ring();
- if (ret < 0)
- goto out;
-
- ret = seg6_hmac_init_algo();
-
-out:
- return ret;
+ return seg6_hmac_init_algo();
}
EXPORT_SYMBOL(seg6_hmac_init);
@@ -450,13 +421,6 @@ void seg6_hmac_exit(void)
struct seg6_hmac_algo *algo = NULL;
int i, alg_count, cpu;
- for_each_possible_cpu(i) {
- char *ring = *per_cpu_ptr(hmac_ring, i);
-
- kfree(ring);
- }
- free_percpu(hmac_ring);
-
alg_count = sizeof(hmac_algos) / sizeof(struct seg6_hmac_algo);
for (i = 0; i < alg_count; i++) {
algo = &hmac_algos[i];
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index c46f8cbf5ab5..85582257d3af 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -55,8 +55,8 @@ static const struct nla_policy seg6_iptunnel_policy[SEG6_IPTUNNEL_MAX + 1] = {
[SEG6_IPTUNNEL_SRH] = { .type = NLA_BINARY },
};
-int nla_put_srh(struct sk_buff *skb, int attrtype,
- struct seg6_iptunnel_encap *tuninfo)
+static int nla_put_srh(struct sk_buff *skb, int attrtype,
+ struct seg6_iptunnel_encap *tuninfo)
{
struct seg6_iptunnel_encap *data;
struct nlattr *nla;
@@ -235,7 +235,7 @@ static int seg6_do_srh(struct sk_buff *skb)
return 0;
}
-int seg6_input(struct sk_buff *skb)
+static int seg6_input(struct sk_buff *skb)
{
int err;
@@ -251,7 +251,7 @@ int seg6_input(struct sk_buff *skb)
return dst_input(skb);
}
-int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
@@ -303,7 +303,7 @@ drop:
return err;
}
-static int seg6_build_state(struct net_device *dev, struct nlattr *nla,
+static int seg6_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index a4d49760bf43..895ff650db43 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -16,7 +16,7 @@
#include <linux/tcp.h>
#include <linux/random.h>
-#include <linux/cryptohash.h>
+#include <linux/siphash.h>
#include <linux/kernel.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -24,7 +24,7 @@
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
+static siphash_key_t syncookie6_secret[2] __read_mostly;
/* RFC 2460, Section 8.3:
* [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
@@ -41,30 +41,27 @@ static __u16 const msstab[] = {
9000 - 60,
};
-static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv6_cookie_scratch);
-
-static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
+static u32 cookie_hash(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
__be16 sport, __be16 dport, u32 count, int c)
{
- __u32 *tmp;
+ const struct {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 count;
+ __be16 sport;
+ __be16 dport;
+ } __aligned(SIPHASH_ALIGNMENT) combined = {
+ .saddr = *saddr,
+ .daddr = *daddr,
+ .count = count,
+ .sport = sport,
+ .dport = dport
+ };
net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
-
- tmp = this_cpu_ptr(ipv6_cookie_scratch);
-
- /*
- * we have 320 bits of information to hash, copy in the remaining
- * 192 bits required for sha_transform, from the syncookie6_secret
- * and overwrite the digest with the secret
- */
- memcpy(tmp + 10, syncookie6_secret[c], 44);
- memcpy(tmp, saddr, 16);
- memcpy(tmp + 4, daddr, 16);
- tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
- tmp[9] = count;
- sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
-
- return tmp[17];
+ return siphash(&combined, offsetofend(typeof(combined), dport),
+ &syncookie6_secret[c]);
}
static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4c60c6f71cd3..21c719965b6b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -123,6 +123,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct dst_entry *dst;
int addr_type;
int err;
+ struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
@@ -263,7 +264,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(sk, dst, NULL, NULL);
- if (tcp_death_row.sysctl_tw_recycle &&
+ if (tcp_death_row->sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
tcp_fetch_timewait_stamp(sk, dst);
@@ -278,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_dport = usin->sin6_port;
tcp_set_state(sk, TCP_SYN_SENT);
- err = inet6_hash_connect(&tcp_death_row, sk);
+ err = inet6_hash_connect(tcp_death_row, sk);
if (err)
goto late_failure;
@@ -291,6 +292,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_dport,
&tp->tsoffset);
+ if (tcp_fastopen_defer_connect(sk, &err))
+ return err;
+ if (err)
+ goto late_failure;
+
err = tcp_connect(sk);
if (err)
goto late_failure;
@@ -299,7 +305,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
tcp_set_state(sk, TCP_CLOSE);
- __sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
@@ -1157,10 +1162,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
tcp_ca_openreq_child(newsk, dst);
tcp_sync_mss(newsk, dst_mtu(dst));
- newtp->advmss = dst_metric_advmss(dst);
- if (tcp_sk(sk)->rx_opt.user_mss &&
- tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
- newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+ newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
tcp_initialize_rcv_mss(newsk);
@@ -1627,7 +1629,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
- .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
@@ -1658,7 +1659,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
- .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
@@ -1751,7 +1751,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
srcp = ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
@@ -1895,6 +1895,7 @@ struct proto tcpv6_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
+ .keepalive = tcp_set_keepalive,
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
@@ -1955,7 +1956,7 @@ static void __net_exit tcpv6_net_exit(struct net *net)
static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
{
- inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
+ inet_twsk_purge(&tcp_hashinfo, AF_INET6);
}
static struct pernet_operations tcpv6_net_ops = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 221825a9407a..4e4c401e3bc6 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -55,6 +55,16 @@
#include <trace/events/skb.h>
#include "udp_impl.h"
+static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_udp_l3mdev_accept &&
+ skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr,
const u16 lport,
@@ -103,7 +113,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
- return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
+ return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
@@ -118,7 +128,7 @@ static void udp_v6_rehash(struct sock *sk)
static int compute_score(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned short hnum,
- int dif)
+ int dif, bool exact_dif)
{
int score;
struct inet_sock *inet;
@@ -149,7 +159,7 @@ static int compute_score(struct sock *sk, struct net *net,
score++;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
@@ -165,7 +175,7 @@ static int compute_score(struct sock *sk, struct net *net,
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2,
+ bool exact_dif, struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
@@ -176,7 +186,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif);
+ daddr, hnum, dif, exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -212,6 +222,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+ bool exact_dif = udp6_lib_exact_dif_match(net, skb);
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
@@ -223,7 +234,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
- daddr, hnum, dif,
+ daddr, hnum, dif, exact_dif,
hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
@@ -239,7 +250,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, skb);
+ exact_dif, hslot2,
+ skb);
}
return result;
}
@@ -247,7 +259,8 @@ begin:
result = NULL;
badness = -1;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
+ score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
+ exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -1299,7 +1312,8 @@ out:
return err;
do_confirm:
- dst_confirm(dst);
+ if (msg->msg_flags & MSG_PROBE)
+ dst_confirm_neigh(dst, &fl6.daddr);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index b5789562aded..08a807b29298 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -33,6 +33,8 @@ EXPORT_SYMBOL(xfrm6_rcv_spi);
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
skb_network_header(skb)[IP6CB(skb)->nhoff] =
XFRM_MODE_SKB_CB(skb)->protocol;
@@ -44,6 +46,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
ipv6_hdr(skb)->payload_len = htons(skb->len);
__skb_push(skb, skb->data - skb_network_header(skb));
+ if (xo && (xo->flags & XFRM_GRO)) {
+ skb_mac_header_rebuild(skb);
+ return -1;
+ }
+
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
ip6_rcv_finish);
@@ -69,18 +76,9 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
struct xfrm_state *x = NULL;
int i = 0;
- /* Allocate new secpath or COW existing one. */
- if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
- struct sec_path *sp;
-
- sp = secpath_dup(skb->sp);
- if (!sp) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
- goto drop;
- }
- if (skb->sp)
- secpath_put(skb->sp);
- skb->sp = sp;
+ if (secpath_set(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+ goto drop;
}
if (1 + skb->sp->len == XFRM_MAX_DEPTH) {
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 4e344105b3fd..4439ee44c8b0 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -47,6 +47,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
+ struct xfrm_offload *xo = xfrm_offload(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
@@ -55,7 +56,8 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
sizeof(struct ipv6hdr));
- skb_reset_transport_header(skb);
+ if (!xo || !(xo->flags & XFRM_GRO))
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index e0f71c01d728..79651bc71bf0 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -25,8 +25,6 @@
#include <net/mip6.h>
#endif
-static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
-
static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
@@ -220,7 +218,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
{
struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
- xfrm6_policy_afinfo.garbage_collect(net);
+ xfrm_garbage_collect_deferred(net);
return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
}
@@ -291,8 +289,7 @@ static struct dst_ops xfrm6_dst_ops_template = {
.gc_thresh = INT_MAX,
};
-static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
- .family = AF_INET6,
+static const struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.dst_ops = &xfrm6_dst_ops_template,
.dst_lookup = xfrm6_dst_lookup,
.get_saddr = xfrm6_get_saddr,
@@ -305,7 +302,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
static int __init xfrm6_policy_init(void)
{
- return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo);
+ return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo, AF_INET6);
}
static void xfrm6_policy_fini(void)
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index 54d13f8dbbae..b2dc8ce49378 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -162,9 +162,8 @@ static const struct inet6_protocol ipcomp6_protocol = {
.flags = INET6_PROTO_NOPOLICY,
};
-static struct xfrm_input_afinfo xfrm6_input_afinfo = {
+static const struct xfrm_input_afinfo xfrm6_input_afinfo = {
.family = AF_INET6,
- .owner = THIS_MODULE,
.callback = xfrm6_rcv_cb,
};