summaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp_offload.c
diff options
context:
space:
mode:
authorAlexander Duyck <aduyck@mirantis.com>2016-03-11 14:05:47 -0800
committerDavid S. Miller <davem@davemloft.net>2016-03-13 23:55:14 -0400
commit08334824951dd6d1295860da07b1236d18b0b8df (patch)
tree6cb060f2472662019329028c10af017473a5549e /net/ipv4/udp_offload.c
parent1e94082963747b551b129528714827f76a090e93 (diff)
downloadlinux-08334824951dd6d1295860da07b1236d18b0b8df.tar.gz
linux-08334824951dd6d1295860da07b1236d18b0b8df.tar.xz
GSO/UDP: Use skb->len instead of udph->len to determine length of original skb
It is possible for tunnels to end up generating IP or IPv6 datagrams that are larger than 64K and expecting to be segmented. As such we need to deal with length values greater than 64K. In order to accommodate this we need to update the code to work with a 32b length value instead of a 16b one. Signed-off-by: Alexander Duyck <aduyck@mirantis.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp_offload.c')
-rw-r--r--net/ipv4/udp_offload.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f5abb1ae1358..8a3405a80260 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -40,13 +40,19 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
__be16 protocol = skb->protocol;
u16 mac_len = skb->mac_len;
int udp_offset, outer_hlen;
- u32 partial;
+ __wsum partial;
if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
goto out;
- /* adjust partial header checksum to negate old length */
- partial = (__force u32)uh->check + (__force u16)~uh->len;
+ /* Adjust partial header checksum to negate old length.
+ * We cannot rely on the value contained in uh->len as it is
+ * possible that the actual value exceeds the boundaries of the
+ * 16 bit length field due to the header being added outside of an
+ * IP or IPv6 frame that was already limited to 64K - 1.
+ */
+ partial = csum_sub(csum_unfold(uh->check),
+ (__force __wsum)htonl(skb->len));
/* setup inner skb. */
skb->encapsulation = 0;
@@ -119,8 +125,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
if (!need_csum)
continue;
- uh->check = ~csum_fold((__force __wsum)
- ((__force u32)len + partial));
+ uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len));
if (skb->encapsulation || !offload_csum) {
uh->check = gso_make_checksum(skb, ~uh->check);