summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-04-07 12:45:20 -0600
committerJens Axboe <axboe@fb.com>2017-04-07 12:45:20 -0600
commit65f619d2535197d97067eeeef75a40f25b552e69 (patch)
tree13f96c51a2b591ed22f1771541998bb77178f685 /net
parentfbbaf700e7b163a0f1704b2d542ee28be11fce21 (diff)
parent6d8c6c0f97ad8a3517c42b179c1dc8e77397d0e2 (diff)
downloadlinux-0-day-65f619d2535197d97067eeeef75a40f25b552e69.tar.gz
linux-0-day-65f619d2535197d97067eeeef75a40f25b552e69.tar.xz
Merge branch 'for-linus' into for-4.12/block
We've added a considerable amount of fixes for stalls and issues with the blk-mq scheduling in the 4.11 series since forking off the for-4.12/block branch. We need to do improvements on top of that for 4.12, so pull in the previous fixes to make our lives easier going forward. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/bat_iv_ogm.c11
-rw-r--r--net/batman-adv/bat_v.c14
-rw-r--r--net/batman-adv/fragmentation.c20
-rw-r--r--net/batman-adv/gateway_common.c5
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_netfilter_hooks.c12
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/core/netclassid_cgroup.c32
-rw-r--r--net/core/skbuff.c27
-rw-r--r--net/core/sock.c16
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/ip_fragment.c25
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/mpls/af_mpls.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c13
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nft_ct.c21
-rw-r--r--net/netfilter/nft_meta.c40
-rw-r--r--net/netfilter/nft_nat.c8
-rw-r--r--net/netfilter/nft_set_bitmap.c165
-rw-r--r--net/netlink/af_netlink.c41
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/output.c7
-rw-r--r--net/sctp/outqueue.c11
-rw-r--r--net/socket.c13
-rw-r--r--net/tipc/subscr.c7
-rw-r--r--net/unix/garbage.c17
-rw-r--r--net/vmw_vsock/af_vsock.c14
-rw-r--r--net/vmw_vsock/virtio_transport.c42
-rw-r--r--net/vmw_vsock/virtio_transport_common.c7
-rw-r--r--net/wireless/nl80211.c127
-rw-r--r--net/xfrm/xfrm_user.c9
50 files changed, 516 insertions, 313 deletions
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7c3d994e90d87..71343d0fec94b 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
batadv_iv_ogm_schedule(hard_iface);
}
+/**
+ * batadv_iv_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default TQ difference threshold to 20 */
+ atomic_set(&bat_priv->gw.sel_class, 20);
+}
+
static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.del_if = batadv_iv_ogm_orig_del_if,
},
.gw = {
+ .init_sel_class = batadv_iv_init_sel_class,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0acd081dd2869..a36c8e7291d61 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -668,6 +668,16 @@ err_ifinfo1:
return ret;
}
+/**
+ * batadv_v_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default throughput difference threshold to 5Mbps */
+ atomic_set(&bat_priv->gw.sel_class, 50);
+}
+
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
char *buff, size_t count)
{
@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.dump = batadv_v_orig_dump,
},
.gw = {
+ .init_sel_class = batadv_v_init_sel_class,
.store_sel_class = batadv_v_store_sel_class,
.show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
if (ret < 0)
return ret;
- /* set default throughput difference threshold to 5Mbps */
- atomic_set(&bat_priv->gw.sel_class, 50);
-
return 0;
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 11a23fd6e1a07..8f964beaac284 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -404,7 +404,7 @@ out:
* batadv_frag_create - create a fragment from skb
* @skb: skb to create fragment from
* @frag_head: header to use in new fragment
- * @mtu: size of new fragment
+ * @fragment_size: size of new fragment
*
* Split the passed skb into two fragments: A new one with size matching the
* passed mtu and the old one with the rest. The new skb contains data from the
@@ -414,11 +414,11 @@ out:
*/
static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
struct batadv_frag_packet *frag_head,
- unsigned int mtu)
+ unsigned int fragment_size)
{
struct sk_buff *skb_fragment;
unsigned int header_size = sizeof(*frag_head);
- unsigned int fragment_size = mtu - header_size;
+ unsigned int mtu = fragment_size + header_size;
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
if (!skb_fragment)
@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
struct sk_buff *skb_fragment;
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned int header_size = sizeof(frag_header);
- unsigned int max_fragment_size, max_packet_size;
+ unsigned int max_fragment_size, num_fragments;
int ret;
/* To avoid merge and refragmentation at next-hops we never send
@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
*/
mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
max_fragment_size = mtu - header_size;
- max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+ if (skb->len == 0 || max_fragment_size == 0)
+ return -EINVAL;
+
+ num_fragments = (skb->len - 1) / max_fragment_size + 1;
+ max_fragment_size = (skb->len - 1) / num_fragments + 1;
/* Don't even try to fragment, if we need more than 16 fragments */
- if (skb->len > max_packet_size) {
+ if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
ret = -EAGAIN;
goto free_skb;
}
@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
goto put_primary_if;
}
- skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+ skb_fragment = batadv_frag_create(skb, &frag_header,
+ max_fragment_size);
if (!skb_fragment) {
ret = -ENOMEM;
goto put_primary_if;
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 5db2e43e3775e..33940c5c74a87 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
*/
void batadv_gw_init(struct batadv_priv *bat_priv)
{
+ if (bat_priv->algo_ops->gw.init_sel_class)
+ bat_priv->algo_ops->gw.init_sel_class(bat_priv);
+ else
+ atomic_set(&bat_priv->gw.sel_class, 1);
+
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5d099b2e6cfcc..d042c99af028e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
#endif
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
- atomic_set(&bat_priv->gw.sel_class, 20);
atomic_set(&bat_priv->gw.bandwidth_down, 100);
atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 66b25e410a413..246f21b4973bc 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
/**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ * @init_sel_class: initialize GW selection class (optional)
* @store_sel_class: parse and stores a new GW selection class (optional)
* @show_sel_class: prints the current GW selection class (optional)
* @get_best_gw_node: select the best GW from the list of available nodes
@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
* @dump: dump gateways to a netlink socket (optional)
*/
struct batadv_algo_gw_ops {
+ void (*init_sel_class)(struct batadv_priv *bat_priv);
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count);
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4f598dc2d9168..6e08b7199dd74 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
- WARN_ON_ONCE(!br_hash_lock_held(br));
+ lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock();
fdb = fdb_find_rcu(head, addr, vid);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index fa87fbd62bb78..1f1e62095464f 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
- unsigned int mtu_reserved;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ unsigned int mtu, mtu_reserved;
mtu_reserved = nf_bridge_mtu_reduction(skb);
+ mtu = skb->dev->mtu;
+
+ if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+ mtu = nf_bridge->frag_max_size;
- if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+ if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
}
- nf_bridge = nf_bridge_info_get(skb);
-
/* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting.
*/
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2288fca7756c5..61368186edea5 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid);
-static inline bool br_hash_lock_held(struct net_bridge *br)
-{
-#ifdef CONFIG_LOCKDEP
- return lockdep_is_held(&br->hash_lock);
-#else
- return true;
-#endif
-}
-
/* br_forward.c */
enum br_pkt_type {
BR_PKT_UNICAST,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 38dcf1eb427de..f76bb33326138 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/net.h>
#include <linux/nsproxy.h>
+#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
{
struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
struct socket *sock;
+ unsigned int noio_flag;
int ret;
BUG_ON(con->sock);
+
+ /* sock_create_kern() allocates with GFP_KERNEL */
+ noio_flag = memalloc_noio_save();
ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
+ memalloc_noio_restore(noio_flag);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6ae56037bb133..029a61ac6cdd8 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
return 0;
}
-static void update_classid(struct cgroup_subsys_state *css, void *v)
+static void cgrp_attach(struct cgroup_taskset *tset)
{
- struct css_task_iter it;
+ struct cgroup_subsys_state *css;
struct task_struct *p;
- css_task_iter_start(css, &it);
- while ((p = css_task_iter_next(&it))) {
+ cgroup_taskset_for_each(p, css, tset) {
task_lock(p);
- iterate_fd(p->files, 0, update_classid_sock, v);
+ iterate_fd(p->files, 0, update_classid_sock,
+ (void *)(unsigned long)css_cls_state(css)->classid);
task_unlock(p);
}
- css_task_iter_end(&it);
-}
-
-static void cgrp_attach(struct cgroup_taskset *tset)
-{
- struct cgroup_subsys_state *css;
-
- cgroup_taskset_first(tset, &css);
- update_classid(css,
- (void *)(unsigned long)css_cls_state(css)->classid);
}
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
u64 value)
{
struct cgroup_cls_state *cs = css_cls_state(css);
+ struct css_task_iter it;
+ struct task_struct *p;
cgroup_sk_alloc_disable();
cs->classid = (u32)value;
- update_classid(css, (void *)(unsigned long)cs->classid);
+ css_task_iter_start(css, &it);
+ while ((p = css_task_iter_next(&it))) {
+ task_lock(p);
+ iterate_fd(p->files, 0, update_classid_sock,
+ (void *)(unsigned long)cs->classid);
+ task_unlock(p);
+ }
+ css_task_iter_end(&it);
+
return 0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cd4ba8c6b6091..9f781092fda9c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
+static void skb_set_err_queue(struct sk_buff *skb)
+{
+ /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
+ * So, it is safe to (mis)use it to mark skbs on the error queue.
+ */
+ skb->pkt_type = PACKET_OUTGOING;
+ BUILD_BUG_ON(PACKET_OUTGOING == 0);
+}
+
/*
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
*/
@@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_rmem_free;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ skb_set_err_queue(skb);
/* before exiting rcu section, make sure dst is refcounted */
skb_dst_force(skb);
@@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
static void __skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock *sk,
- int tstype)
+ int tstype,
+ bool opt_stats)
{
struct sock_exterr_skb *serr;
int err;
+ BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
+
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = tstype;
+ serr->opt_stats = opt_stats;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk->sk_protocol == IPPROTO_TCP &&
@@ -3833,7 +3847,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
*/
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
*skb_hwtstamps(skb) = *hwtstamps;
- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
sock_put(sk);
}
}
@@ -3844,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
struct sock *sk, int tstype)
{
struct sk_buff *skb;
- bool tsonly;
+ bool tsonly, opt_stats = false;
if (!sk)
return;
@@ -3857,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
#ifdef CONFIG_INET
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM)
+ sk->sk_type == SOCK_STREAM) {
skb = tcp_get_timestamping_opt_stats(sk);
- else
+ opt_stats = true;
+ } else
#endif
skb = alloc_skb(0, GFP_ATOMIC);
} else {
@@ -3878,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
else
skb->tstamp = ktime_get_real();
- __skb_complete_tx_timestamp(skb, sk, tstype);
+ __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
}
EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
diff --git a/net/core/sock.c b/net/core/sock.c
index a96d5f7a5734a..2c4f574168fbd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
pr_debug("%s: optmem leakage (%d bytes) detected\n",
__func__, atomic_read(&sk->sk_omem_alloc));
+ if (sk->sk_frag.page) {
+ put_page(sk->sk_frag.page);
+ sk->sk_frag.page = NULL;
+ }
+
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
@@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
is_charged = sk_filter_charge(newsk, filter);
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+ /* We need to make sure that we don't uncharge the new
+ * socket if we couldn't charge it in the first place
+ * as otherwise we uncharge the parent's filter.
+ */
+ if (!is_charged)
+ RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
@@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk)
sk_refcnt_debug_release(sk);
- if (sk->sk_frag.page) {
- put_page(sk->sk_frag.page);
- sk->sk_frag.page = NULL;
- }
-
sock_put(sk);
}
EXPORT_SYMBOL(sk_common_release);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 42bfd08109dd7..8f2133ffc2ff1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+ if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+ skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index bbe7f72db9c15..b3cdeec85f1f2 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags);
+ rcu_read_lock();
spin_lock(&qp->q.lock);
if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
if (!inet_frag_evicting(&qp->q)) {
- struct sk_buff *head = qp->q.fragments;
+ struct sk_buff *clone, *head = qp->q.fragments;
const struct iphdr *iph;
int err;
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
goto out;
- rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
- goto out_rcu_unlock;
+ goto out;
+
/* skb has no dst, perform route lookup again */
iph = ip_hdr(head);
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
if (err)
- goto out_rcu_unlock;
+ goto out;
/* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
if (frag_expire_skip_icmp(qp->user) &&
(skb_rtable(head)->rt_type != RTN_LOCAL))
- goto out_rcu_unlock;
+ goto out;
+
+ clone = skb_clone(head, GFP_ATOMIC);
/* Send an ICMP "Fragment Reassembly Timeout" message. */
- icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-out_rcu_unlock:
- rcu_read_unlock();
+ if (clone) {
+ spin_unlock(&qp->q.lock);
+ icmp_send(clone, ICMP_TIME_EXCEEDED,
+ ICMP_EXC_FRAGTIME, 0);
+ consume_skb(clone);
+ goto out_rcu_unlock;
+ }
}
out:
spin_unlock(&qp->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
ipq_put(qp);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index bc1486f2c0643..2e14ed11a35cf 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
+
+ if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+ return NF_ACCEPT;
+
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
}
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f8aad03d674b0..6f5e8d01b8769 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
/* maniptype == SRC for postrouting. */
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
- /* We never see fragments: conntrack defrags on pre-routing
- * and local-out, and nf_nat_out protects post-routing.
- */
- NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would
* have dropped it. Hence it's the user's responsibilty to
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index a0ea8aad1bf15..f186772771193 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_min];
- range.max_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_max];
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
}
regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
&range, nft_out(pkt));
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index 1650ed23c15dd..5120be1d31185 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
- mr.range[0].min.all =
- *(__be16 *)&regs->data[priv->sreg_proto_min];
- mr.range[0].max.all =
- *(__be16 *)&regs->data[priv->sreg_proto_max];
+ mr.range[0].min.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ mr.range[0].max.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf4555581282c..1e319a525d51b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 now = tcp_time_stamp, intv;
+ u32 now, intv;
u64 rate64;
bool slow;
u32 rate;
@@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
+ now = tcp_time_stamp;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39c393cc0fd3c..c43119726a62e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5541,6 +5541,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
+ icsk->icsk_ack.lrcvtime = tcp_time_stamp;
if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5759,7 +5760,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7e16243cdb58c..65c0f3d13eca4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0;
newtp->retrans_out = 0;
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 6c5b5b1830a74..4146536e9c151 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_min];
- range.max_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_max];
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
}
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
nft_out(pkt));
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index f5ac080fc0849..a27e424f690d6 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_min],
- range.max_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_max],
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 35c58b669ebdf..9db1418993f2b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3423,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
}
else if (rt->rt6i_flags & RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL;
+ else if (rt->rt6i_flags & RTF_ANYCAST)
+ rtm->rtm_type = RTN_ANYCAST;
else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4e4c401e3bc69..e28082f0a307e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.hlimit = -1;
ipc6.tclass = -1;
ipc6.dontfrag = -1;
+ sockc.tsflags = sk->sk_tsflags;
/* destination address check */
if (sin6) {
@@ -1159,7 +1160,6 @@ do_udp_sendmsg:
fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sk->sk_uid;
- sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
opt = &opt_space;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 33211f9a26560..6414079aa7297 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
+ unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
+ unsigned int alive;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
if (!rt)
continue;
+ alive = 0;
change_nexthops(rt) {
if (rtnl_dereference(nh->nh_dev) != dev)
- continue;
+ goto next;
+
switch (event) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
@@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
/* fall through */
case NETDEV_CHANGE:
nh->nh_flags |= RTNH_F_LINKDOWN;
- if (event != NETDEV_UNREGISTER)
- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
break;
}
if (event == NETDEV_UNREGISTER)
RCU_INIT_POINTER(nh->nh_dev, NULL);
+next:
+ if (!(nh->nh_flags & nh_flags))
+ alive++;
} endfor_nexthops(rt);
+
+ WRITE_ONCE(rt->rt_nhn_alive, alive);
}
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 071b97fcbefb0..ffb78e5f7b709 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
seqcount_t nf_conntrack_generation __read_mostly;
-DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
+ * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
+ * alignment to enforce this.
+ */
+DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static unsigned int nf_conntrack_hash_rnd __read_mostly;
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 31d358691af09..804e8a0ab36ef 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
enum nf_nat_manip_type maniptype)
{
sctp_sctphdr_t *hdr;
+ int hdrsize = 8;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ /* This could be an inner header returned in imcp packet; in such
+ * cases we cannot update the checksum field since it is outside
+ * of the 8 bytes of transport layer headers we are guaranteed.
+ */
+ if (skb->len >= hdroff + sizeof(*hdr))
+ hdrsize = sizeof(*hdr);
+
+ if (!skb_make_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct sctphdr *)(skb->data + hdroff);
@@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
hdr->dest = tuple->dst.u.sctp.port;
}
+ if (hdrsize < sizeof(*hdr))
+ return true;
+
if (skb->ip_summed != CHECKSUM_PARTIAL) {
hdr->checksum = sctp_compute_cksum(skb, hdroff);
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5e0ccfd5bb37d..434c739dfecaa 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_bind_check_setelem;
- iter.flush = false;
set->ops->walk(ctx, set, &iter);
if (iter.err < 0)
@@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
args.iter.count = 0;
args.iter.err = 0;
args.iter.fn = nf_tables_dump_setelem;
- args.iter.flush = false;
set->ops->walk(&ctx, set, &args.iter);
nla_nest_end(skb, nest);
@@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
struct nft_set_iter iter = {
.genmask = genmask,
.fn = nft_flush_set,
- .flush = true,
};
set->ops->walk(&ctx, set, &iter);
@@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_loop_check_setelem;
- iter.flush = false;
set->ops->walk(ctx, set, &iter);
if (iter.err < 0)
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bf548a7a71ec9..0264258c46feb 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
switch (priv->key) {
case NFT_CT_DIRECTION:
- *dest = CTINFO2DIR(ctinfo);
+ nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return;
case NFT_CT_STATUS:
*dest = ct->status;
@@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
return;
}
case NFT_CT_L3PROTOCOL:
- *dest = nf_ct_l3num(ct);
+ nft_reg_store8(dest, nf_ct_l3num(ct));
return;
case NFT_CT_PROTOCOL:
- *dest = nf_ct_protonum(ct);
+ nft_reg_store8(dest, nf_ct_protonum(ct));
return;
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE: {
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+ u16 zoneid;
if (priv->dir < IP_CT_DIR_MAX)
- *dest = nf_ct_zone_id(zone, priv->dir);
+ zoneid = nf_ct_zone_id(zone, priv->dir);
else
- *dest = zone->id;
+ zoneid = zone->id;
+ nft_reg_store16(dest, zoneid);
return;
}
#endif
@@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
case NFT_CT_PROTO_SRC:
- *dest = (__force __u16)tuple->src.u.all;
+ nft_reg_store16(dest, (__force u16)tuple->src.u.all);
return;
case NFT_CT_PROTO_DST:
- *dest = (__force __u16)tuple->dst.u.all;
+ nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
return;
default:
break;
@@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
enum ip_conntrack_info ctinfo;
- u16 value = regs->data[priv->sreg];
+ u16 value = nft_reg_load16(&regs->data[priv->sreg]);
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
@@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
case IP_CT_DIR_REPLY:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto err1;
}
}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e1f5ca9b423b5..7b60e01f38ff9 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = skb->len;
break;
case NFT_META_PROTOCOL:
- *dest = 0;
- *(__be16 *)dest = skb->protocol;
+ nft_reg_store16(dest, (__force u16)skb->protocol);
break;
case NFT_META_NFPROTO:
- *dest = nft_pf(pkt);
+ nft_reg_store8(dest, nft_pf(pkt));
break;
case NFT_META_L4PROTO:
if (!pkt->tprot_set)
goto err;
- *dest = pkt->tprot;
+ nft_reg_store8(dest, pkt->tprot);
break;
case NFT_META_PRIORITY:
*dest = skb->priority;
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
case NFT_META_IIFTYPE:
if (in == NULL)
goto err;
- *dest = 0;
- *(u16 *)dest = in->type;
+ nft_reg_store16(dest, in->type);
break;
case NFT_META_OIFTYPE:
if (out == NULL)
goto err;
- *dest = 0;
- *(u16 *)dest = out->type;
+ nft_reg_store16(dest, out->type);
break;
case NFT_META_SKUID:
sk = skb_to_full_sk(skb);
@@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
#endif
case NFT_META_PKTTYPE:
if (skb->pkt_type != PACKET_LOOPBACK) {
- *dest = skb->pkt_type;
+ nft_reg_store8(dest, skb->pkt_type);
break;
}
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
else
- *dest = PACKET_BROADCAST;
+ nft_reg_store8(dest, PACKET_BROADCAST);
break;
case NFPROTO_IPV6:
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
break;
case NFPROTO_NETDEV:
switch (skb->protocol) {
@@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err;
if (ipv4_is_multicast(iph->daddr))
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
else
- *dest = PACKET_BROADCAST;
+ nft_reg_store8(dest, PACKET_BROADCAST);
break;
}
case htons(ETH_P_IPV6):
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
break;
default:
WARN_ON_ONCE(1);
@@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
{
const struct nft_meta *meta = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
- u32 value = regs->data[meta->sreg];
+ u32 *sreg = &regs->data[meta->sreg];
+ u32 value = *sreg;
+ u8 pkt_type;
switch (meta->key) {
case NFT_META_MARK:
@@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
skb->priority = value;
break;
case NFT_META_PKTTYPE:
- if (skb->pkt_type != value &&
- skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
- skb->pkt_type = value;
+ pkt_type = nft_reg_load8(sreg);
+
+ if (skb->pkt_type != pkt_type &&
+ skb_pkt_type_ok(pkt_type) &&
+ skb_pkt_type_ok(skb->pkt_type))
+ skb->pkt_type = pkt_type;
break;
case NFT_META_NFTRACE:
skb->nf_trace = !!value;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 19a7bf3236f96..439e0bd152a00 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
}
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_min];
- range.max_proto.all =
- *(__be16 *)&regs->data[priv->sreg_proto_max];
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 152d226552c17..8ebbc2940f4c5 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -15,6 +15,11 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+struct nft_bitmap_elem {
+ struct list_head head;
+ struct nft_set_ext ext;
+};
+
/* This bitmap uses two bits to represent one element. These two bits determine
* the element state in the current and the future generation.
*
@@ -41,13 +46,22 @@
* restore its previous state.
*/
struct nft_bitmap {
- u16 bitmap_size;
- u8 bitmap[];
+ struct list_head list;
+ u16 bitmap_size;
+ u8 bitmap[];
};
-static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off)
+static inline void nft_bitmap_location(const struct nft_set *set,
+ const void *key,
+ u32 *idx, u32 *off)
{
- u32 k = (key << 1);
+ u32 k;
+
+ if (set->klen == 2)
+ k = *(u16 *)key;
+ else
+ k = *(u8 *)key;
+ k <<= 1;
*idx = k / BITS_PER_BYTE;
*off = k % BITS_PER_BYTE;
@@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
u8 genmask = nft_genmask_cur(net);
u32 idx, off;
- nft_bitmap_location(*key, &idx, &off);
+ nft_bitmap_location(set, key, &idx, &off);
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
}
+static struct nft_bitmap_elem *
+nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
+ u8 genmask)
+{
+ const struct nft_bitmap *priv = nft_set_priv(set);
+ struct nft_bitmap_elem *be;
+
+ list_for_each_entry_rcu(be, &priv->list, head) {
+ if (memcmp(nft_set_ext_key(&be->ext),
+ nft_set_ext_key(&this->ext), set->klen) ||
+ !nft_set_elem_active(&be->ext, genmask))
+ continue;
+
+ return be;
+ }
+ return NULL;
+}
+
static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
- struct nft_set_ext **_ext)
+ struct nft_set_ext **ext)
{
struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext *ext = elem->priv;
+ struct nft_bitmap_elem *new = elem->priv, *be;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
- if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
+ be = nft_bitmap_elem_find(set, new, genmask);
+ if (be) {
+ *ext = &be->ext;
return -EEXIST;
+ }
+ nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
/* Enter 01 state. */
priv->bitmap[idx] |= (genmask << off);
+ list_add_tail_rcu(&new->head, &priv->list);
return 0;
}
@@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext *ext = elem->priv;
+ struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 00 state. */
priv->bitmap[idx] &= ~(genmask << off);
+ list_del_rcu(&be->head);
}
static void nft_bitmap_activate(const struct net *net,
@@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext *ext = elem->priv;
+ struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 11 state. */
priv->bitmap[idx] |= (genmask << off);
+ nft_set_elem_change_active(net, set, &be->ext);
}
static bool nft_bitmap_flush(const struct net *net,
- const struct nft_set *set, void *ext)
+ const struct nft_set *set, void *_be)
{
struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
+ struct nft_bitmap_elem *be = _be;
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 10 state, similar to deactivation. */
priv->bitmap[idx] &= ~(genmask << off);
+ nft_set_elem_change_active(net, set, &be->ext);
return true;
}
-static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
- const struct nft_set_elem *elem)
-{
- struct nft_set_ext_tmpl tmpl;
- struct nft_set_ext *ext;
-
- nft_set_ext_prepare(&tmpl);
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
- ext = kzalloc(tmpl.len, GFP_KERNEL);
- if (!ext)
- return NULL;
-
- nft_set_ext_init(ext, &tmpl);
- memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
-
- return ext;
-}
-
static void *nft_bitmap_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
+ struct nft_bitmap_elem *this = elem->priv, *be;
u8 genmask = nft_genmask_next(net);
- struct nft_set_ext *ext;
- u32 idx, off, key = 0;
-
- memcpy(&key, elem->key.val.data, set->klen);
- nft_bitmap_location(key, &idx, &off);
+ u32 idx, off;
- if (!nft_bitmap_active(priv->bitmap, idx, off, genmask))
- return NULL;
+ nft_bitmap_location(set, elem->key.val.data, &idx, &off);
- /* We have no real set extension since this is a bitmap, allocate this
- * dummy object that is released from the commit/abort path.
- */
- ext = nft_bitmap_ext_alloc(set, elem);
- if (!ext)
+ be = nft_bitmap_elem_find(set, this, genmask);
+ if (!be)
return NULL;
/* Enter 10 state. */
priv->bitmap[idx] &= ~(genmask << off);
+ nft_set_elem_change_active(net, set, &be->ext);
- return ext;
+ return be;
}
static void nft_bitmap_walk(const struct nft_ctx *ctx,
@@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
struct nft_set_iter *iter)
{
const struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext_tmpl tmpl;
+ struct nft_bitmap_elem *be;
struct nft_set_elem elem;
- struct nft_set_ext *ext;
- int idx, off;
- u16 key;
-
- nft_set_ext_prepare(&tmpl);
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
- for (idx = 0; idx < priv->bitmap_size; idx++) {
- for (off = 0; off < BITS_PER_BYTE; off += 2) {
- if (iter->count < iter->skip)
- goto cont;
-
- if (!nft_bitmap_active(priv->bitmap, idx, off,
- iter->genmask))
- goto cont;
-
- ext = kzalloc(tmpl.len, GFP_KERNEL);
- if (!ext) {
- iter->err = -ENOMEM;
- return;
- }
- nft_set_ext_init(ext, &tmpl);
- key = ((idx * BITS_PER_BYTE) + off) >> 1;
- memcpy(nft_set_ext_key(ext), &key, set->klen);
-
- elem.priv = ext;
- iter->err = iter->fn(ctx, set, iter, &elem);
-
- /* On set flush, this dummy extension object is released
- * from the commit/abort path.
- */
- if (!iter->flush)
- kfree(ext);
-
- if (iter->err < 0)
- return;
+
+ list_for_each_entry_rcu(be, &priv->list, head) {
+ if (iter->count < iter->skip)
+ goto cont;
+ if (!nft_set_elem_active(&be->ext, iter->genmask))
+ goto cont;
+
+ elem.priv = be;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+
+ if (iter->err < 0)
+ return;
cont:
- iter->count++;
- }
+ iter->count++;
}
}
@@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
{
struct nft_bitmap *priv = nft_set_priv(set);
+ INIT_LIST_HEAD(&priv->list);
priv->bitmap_size = nft_bitmap_size(set->klen);
return 0;
@@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
static struct nft_set_ops nft_bitmap_ops __read_mostly = {
.privsize = nft_bitmap_privsize,
+ .elemsize = offsetof(struct nft_bitmap_elem, ext),
.estimate = nft_bitmap_estimate,
.init = nft_bitmap_init,
.destroy = nft_bitmap_destroy,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7b73c7c161a96..596eaff66649e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
+static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
+
+static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+ "nlk_cb_mutex-ROUTE",
+ "nlk_cb_mutex-1",
+ "nlk_cb_mutex-USERSOCK",
+ "nlk_cb_mutex-FIREWALL",
+ "nlk_cb_mutex-SOCK_DIAG",
+ "nlk_cb_mutex-NFLOG",
+ "nlk_cb_mutex-XFRM",
+ "nlk_cb_mutex-SELINUX",
+ "nlk_cb_mutex-ISCSI",
+ "nlk_cb_mutex-AUDIT",
+ "nlk_cb_mutex-FIB_LOOKUP",
+ "nlk_cb_mutex-CONNECTOR",
+ "nlk_cb_mutex-NETFILTER",
+ "nlk_cb_mutex-IP6_FW",
+ "nlk_cb_mutex-DNRTMSG",
+ "nlk_cb_mutex-KOBJECT_UEVENT",
+ "nlk_cb_mutex-GENERIC",
+ "nlk_cb_mutex-17",
+ "nlk_cb_mutex-SCSITRANSPORT",
+ "nlk_cb_mutex-ECRYPTFS",
+ "nlk_cb_mutex-RDMA",
+ "nlk_cb_mutex-CRYPTO",
+ "nlk_cb_mutex-SMC",
+ "nlk_cb_mutex-23",
+ "nlk_cb_mutex-24",
+ "nlk_cb_mutex-25",
+ "nlk_cb_mutex-26",
+ "nlk_cb_mutex-27",
+ "nlk_cb_mutex-28",
+ "nlk_cb_mutex-29",
+ "nlk_cb_mutex-30",
+ "nlk_cb_mutex-31",
+ "nlk_cb_mutex-MAX_LINKS"
+};
+
static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb);
@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
} else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
+ lockdep_set_class_and_name(nlk->cb_mutex,
+ nlk_cb_mutex_keys + protocol,
+ nlk_cb_mutex_key_strings[protocol]);
}
init_waitqueue_head(&nlk->wait);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index fb6e10fdb2174..92e0981f74040 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- skb, CTRL_CMD_NEWFAMILY) < 0)
+ skb, CTRL_CMD_NEWFAMILY) < 0) {
+ n--;
break;
+ }
}
cb->args[0] = n;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 6f5fa50f716d0..1105a838bab83 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
- SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
@@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
tun_flags |= TUNNEL_VXLAN_OPT;
opts_type = type;
break;
+ case OVS_TUNNEL_KEY_ATTR_PAD:
+ break;
default:
OVS_NLERR(log, "Unknown IP tunnel attribute %d",
type);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3f9d8d7ec6323..b099b64366f35 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
rxrpc_conn_retransmit_call(conn, skb);
return 0;
+ case RXRPC_PACKET_TYPE_BUSY:
+ /* Just ignore BUSY packets for now. */
+ return 0;
+
case RXRPC_PACKET_TYPE_ABORT:
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) < 0)
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 802ac7c2e5e87..5334e309f17f0 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
if (p->set_tc_index) {
+ int wlen = skb_network_offset(skb);
+
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
- if (skb_cow_head(skb, sizeof(struct iphdr)))
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break;
case htons(ETH_P_IPV6):
- if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2a6835b4562b6..0439a1a683678 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
- int i;
sctp_paramhdr_t *p;
- int err;
+ int i;
/* Retrieve the SCTP per socket area. */
sp = sctp_sk((struct sock *)sk);
@@ -264,8 +263,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
- err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
- if (err)
+ if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
goto fail_init;
asoc->active_key_id = ep->active_key_id;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 71ce6b945dcb5..1224421036b3e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -546,7 +546,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
struct sctp_association *asoc = tp->asoc;
struct sctp_chunk *chunk, *tmp;
int pkt_count, gso = 0;
- int confirm;
struct dst_entry *dst;
struct sk_buff *head;
struct sctphdr *sh;
@@ -625,13 +624,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
asoc->peer.last_sent_to = tp;
}
head->ignore_df = packet->ipfragok;
- confirm = tp->dst_pending_confirm;
- if (confirm)
+ if (tp->dst_pending_confirm)
skb_set_dst_pending_confirm(head, 1);
/* neighbour should be confirmed on successful transmission or
* positive error
*/
- if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
+ if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
+ tp->dst_pending_confirm)
tp->dst_pending_confirm = 0;
out:
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index db352e5d61f89..025ccff670720 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
}
static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
- struct sctp_sndrcvinfo *sinfo,
- struct list_head *queue, int msg_len)
+ struct sctp_sndrcvinfo *sinfo, int msg_len)
{
+ struct sctp_outq *q = &asoc->outqueue;
struct sctp_chunk *chk, *temp;
- list_for_each_entry_safe(chk, temp, queue, list) {
+ list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
continue;
list_del_init(&chk->list);
+ q->out_qlen -= chk->skb->len;
asoc->sent_cnt_removable--;
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
return;
}
- sctp_prsctp_prune_unsent(asoc, sinfo,
- &asoc->outqueue.out_chunk_list,
- msg_len);
+ sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
}
/* Mark all the eligible packets on a transport for retransmission. */
diff --git a/net/socket.c b/net/socket.c
index e034fe4164bee..985ef06792d6e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
}
EXPORT_SYMBOL(kernel_sendmsg);
+static bool skb_is_err_queue(const struct sk_buff *skb)
+{
+ /* pkt_type of skbs enqueued on the error queue are set to
+ * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
+ * in recvmsg, since skbs received on a local socket will never
+ * have a pkt_type of PACKET_OUTGOING.
+ */
+ return skb->pkt_type == PACKET_OUTGOING;
+}
+
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
@@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss);
- if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
+ if (skb_is_err_queue(skb) && skb->len &&
+ SKB_EXT_ERR(skb)->opt_stats)
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
skb->len, skb->data);
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 9d94e65d08941..271cd66e4b3b6 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
+ struct tipc_subscriber *subscriber = sub->subscriber;
+
+ spin_lock_bh(&subscriber->lock);
+ tipc_nametbl_unsubscribe(sub);
+ spin_unlock_bh(&subscriber->lock);
/* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
struct tipc_subscriber *subscriber = sub->subscriber;
spin_lock_bh(&subscriber->lock);
- tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscrp_list);
atomic_dec(&tn->subscription_count);
spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
continue;
+ tipc_nametbl_unsubscribe(sub);
tipc_subscrp_get(sub);
spin_unlock_bh(&subscriber->lock);
tipc_subscrp_delete(sub);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d48525fcf9..c36757e728442 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
if (s) {
struct unix_sock *u = unix_sk(s);
+ BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
}
list_del(&cursor);
+ /* Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
+ */
+ skb_queue_head_init(&hitlist);
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, inc_inflight, &hitlist);
+
/* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
@@ -350,14 +359,6 @@ void unix_gc(void)
list_move_tail(&u->link, &gc_inflight_list);
}
- /* Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, inc_inflight, &hitlist);
-
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9f770f33c1009..6f7f6757ceefb 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
.sendpage = sock_no_sendpage,
};
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ if (!transport->cancel_pkt)
+ return -EOPNOTSUPP;
+
+ return transport->cancel_pkt(vsk);
+}
+
static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
+ int cancel = 0;
vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk);
@@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
+ cancel = 1;
}
release_sock(sk);
+ if (cancel)
+ vsock_transport_cancel_pkt(vsk);
sock_put(sk);
}
@@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
err = sock_intr_errno(timeout);
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
goto out_wait;
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 9d24c0e958b18..68675a151f22b 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return len;
}
+static int
+virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ struct virtio_vsock *vsock;
+ struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0;
+ LIST_HEAD(freeme);
+
+ vsock = virtio_vsock_get();
+ if (!vsock) {
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+ if (pkt->vsk != vsk)
+ continue;
+ list_move(&pkt->list, &freeme);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ list_for_each_entry_safe(pkt, n, &freeme, list) {
+ if (pkt->reply)
+ cnt++;
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+
+ if (cnt) {
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+ int new_cnt;
+
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+ if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
+ new_cnt < virtqueue_get_vring_size(rx_vq))
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+ }
+
+ return 0;
+}
+
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
+ .cancel_pkt = virtio_transport_cancel_pkt,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 8d592a45b5978..af087b44ceea2 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
pkt->len = len;
pkt->hdr.len = cpu_to_le32(len);
pkt->reply = info->reply;
+ pkt->vsk = info->vsk;
if (info->msg && len > 0) {
pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
.type = type,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_REQUEST,
.type = VIRTIO_VSOCK_TYPE_STREAM,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
(mode & SEND_SHUTDOWN ?
VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
.type = VIRTIO_VSOCK_TYPE_STREAM,
.msg = msg,
.pkt_len = len,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
.op = VIRTIO_VSOCK_OP_RST,
.type = VIRTIO_VSOCK_TYPE_STREAM,
.reply = !!pkt,
+ .vsk = vsk,
};
/* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
.remote_port = le32_to_cpu(pkt->hdr.src_port),
.reply = true,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7f8be4e321a3..2312dc2ffdb98 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
{
int err;
- rtnl_lock();
-
if (!cb->args[0]) {
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
genl_family_attrbuf(&nl80211_fam),
nl80211_fam.maxattr, nl80211_policy);
if (err)
- goto out_unlock;
+ return err;
*wdev = __cfg80211_wdev_from_attrs(
sock_net(skb->sk),
genl_family_attrbuf(&nl80211_fam));
- if (IS_ERR(*wdev)) {
- err = PTR_ERR(*wdev);
- goto out_unlock;
- }
+ if (IS_ERR(*wdev))
+ return PTR_ERR(*wdev);
*rdev = wiphy_to_rdev((*wdev)->wiphy);
/* 0 is the first index - add 1 to parse only once */
cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
- if (!wiphy) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!wiphy)
+ return -ENODEV;
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
@@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
}
}
- if (!*wdev) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!*wdev)
+ return -ENODEV;
}
return 0;
- out_unlock:
- rtnl_unlock();
- return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
- rtnl_unlock();
}
/* IE validation */
@@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
int filter_wiphy = -1;
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
+ int ret;
rtnl_lock();
if (!cb->args[2]) {
struct nl80211_dump_wiphy_state state = {
.filter_wiphy = -1,
};
- int ret;
ret = nl80211_dump_wiphy_parse(skb, cb, &state);
if (ret)
- return ret;
+ goto out_unlock;
filter_wiphy = state.filter_wiphy;
@@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
wp_idx++;
}
out:
- rtnl_unlock();
-
cb->args[0] = wp_idx;
cb->args[1] = if_idx;
- return skb->len;
+ ret = skb->len;
+ out_unlock:
+ rtnl_unlock();
+
+ return ret;
}
static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
int sta_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!wdev->netdev) {
err = -EINVAL;
@@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
cb->args[2] = sta_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
int path_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!rdev->ops->dump_mpath) {
err = -EOPNOTSUPP;
@@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
cb->args[2] = path_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
int path_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!rdev->ops->dump_mpp) {
err = -EOPNOTSUPP;
@@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
cb->args[2] = path_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
int start = cb->args[2], idx = 0;
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
- if (err)
+ if (err) {
+ rtnl_unlock();
return err;
+ }
wdev_lock(wdev);
spin_lock_bh(&rdev->bss_lock);
@@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
wdev_unlock(wdev);
cb->args[2] = idx;
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return skb->len;
}
@@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
int res;
bool radio_stats;
+ rtnl_lock();
res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (res)
- return res;
+ goto out_err;
/* prepare_wdev_dump parsed the attributes */
radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[2] = survey_idx;
res = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return res;
}
@@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
void *data = NULL;
unsigned int data_len = 0;
- rtnl_lock();
-
if (cb->args[0]) {
/* subtract the 1 again here */
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
- if (!wiphy) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!wiphy)
+ return -ENODEV;
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
@@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
attrbuf, nl80211_fam.maxattr, nl80211_policy);
if (err)
- goto out_unlock;
+ return err;
if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
- !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
- err = -EINVAL;
- goto out_unlock;
- }
+ !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+ return -EINVAL;
*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
if (IS_ERR(*wdev))
*wdev = NULL;
*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
- if (IS_ERR(*rdev)) {
- err = PTR_ERR(*rdev);
- goto out_unlock;
- }
+ if (IS_ERR(*rdev))
+ return PTR_ERR(*rdev);
vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
continue;
- if (!vcmd->dumpit) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
+ if (!vcmd->dumpit)
+ return -EOPNOTSUPP;
vcmd_idx = i;
break;
}
- if (vcmd_idx < 0) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
+ if (vcmd_idx < 0)
+ return -EOPNOTSUPP;
if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
/* keep rtnl locked in successful case */
return 0;
- out_unlock:
- rtnl_unlock();
- return err;
}
static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
int err;
struct nlattr *vendor_data;
+ rtnl_lock();
err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out;
vcmd_idx = cb->args[2];
data = (void *)cb->args[3];
@@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV)) {
- if (!wdev)
- return -EINVAL;
+ if (!wdev) {
+ err = -EINVAL;
+ goto out;
+ }
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
- !wdev->netdev)
- return -EINVAL;
+ !wdev->netdev) {
+ err = -EINVAL;
+ goto out;
+ }
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
- if (!wdev_running(wdev))
- return -ENETDOWN;
+ if (!wdev_running(wdev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
}
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 9705c279494b2..40a8aa39220d6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
up = nla_data(rp);
ulen = xfrm_replay_state_esn_len(up);
- if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+ /* Check the overall length and the internal bitmap length to avoid
+ * potential overflow. */
+ if (nla_len(rp) < ulen ||
+ xfrm_replay_state_esn_len(replay_esn) != ulen ||
+ replay_esn->bmp_len != up->bmp_len)
+ return -EINVAL;
+
+ if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
return 0;