summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
commit8d65b08debc7e62b2c6032d7fe7389d895b92cbc (patch)
tree0c3141b60c3a03cc32742b5750c5e763b9dae489 /lib
parent5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (diff)
parent5d15af6778b8e4ed1fd41b040283af278e7a9a72 (diff)
downloadlinux-0-day-8d65b08debc7e62b2c6032d7fe7389d895b92cbc.tar.gz
linux-0-day-8d65b08debc7e62b2c6032d7fe7389d895b92cbc.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Millar: "Here are some highlights from the 2065 networking commits that happened this development cycle: 1) XDP support for IXGBE (John Fastabend) and thunderx (Sunil Kowuri) 2) Add a generic XDP driver, so that anyone can test XDP even if they lack a networking device whose driver has explicit XDP support (me). 3) Sparc64 now has an eBPF JIT too (me) 4) Add a BPF program testing framework via BPF_PROG_TEST_RUN (Alexei Starovoitov) 5) Make netfitler network namespace teardown less expensive (Florian Westphal) 6) Add symmetric hashing support to nft_hash (Laura Garcia Liebana) 7) Implement NAPI and GRO in netvsc driver (Stephen Hemminger) 8) Support TC flower offload statistics in mlxsw (Arkadi Sharshevsky) 9) Multiqueue support in stmmac driver (Joao Pinto) 10) Remove TCP timewait recycling, it never really could possibly work well in the real world and timestamp randomization really zaps any hint of usability this feature had (Soheil Hassas Yeganeh) 11) Support level3 vs level4 ECMP route hashing in ipv4 (Nikolay Aleksandrov) 12) Add socket busy poll support to epoll (Sridhar Samudrala) 13) Netlink extended ACK support (Johannes Berg, Pablo Neira Ayuso, and several others) 14) IPSEC hw offload infrastructure (Steffen Klassert)" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2065 commits) tipc: refactor function tipc_sk_recv_stream() tipc: refactor function tipc_sk_recvmsg() net: thunderx: Optimize page recycling for XDP net: thunderx: Support for XDP header adjustment net: thunderx: Add support for XDP_TX net: thunderx: Add support for XDP_DROP net: thunderx: Add basic XDP support net: thunderx: Cleanup receive buffer allocation net: thunderx: Optimize CQE_TX handling net: thunderx: Optimize RBDR descriptor handling net: thunderx: Support for page recycling ipx: call ipxitf_put() in ioctl error path net: sched: add helpers to handle extended actions qed*: Fix issues in the ptp filter config implementation. qede: Fix concurrency issue in PTP Tx path processing. stmmac: Add support for SIMATIC IOT2000 platform net: hns: fix ethtool_get_strings overflow in hns driver tcp: fix wraparound issue in tcp_lp bpf, arm64: fix jit branch offset related to ldimm64 bpf, arm64: implement jiting of BPF_XADD ...
Diffstat (limited to 'lib')
-rw-r--r--lib/nlattr.c28
-rw-r--r--lib/rhashtable.c33
-rw-r--r--lib/test_bpf.c150
3 files changed, 177 insertions, 34 deletions
diff --git a/lib/nlattr.c b/lib/nlattr.c
index b42b8577fc234..a7e0b16078dff 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -112,6 +112,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
@@ -120,20 +121,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
* Returns 0 on success or a negative error code.
*/
int nla_validate(const struct nlattr *head, int len, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
- int rem, err;
+ int rem;
nla_for_each_attr(nla, head, len, rem) {
- err = validate_nla(nla, maxtype, policy);
- if (err < 0)
- goto errout;
+ int err = validate_nla(nla, maxtype, policy);
+
+ if (err < 0) {
+ if (extack)
+ extack->bad_attr = nla;
+ return err;
+ }
}
- err = 0;
-errout:
- return err;
+ return 0;
}
EXPORT_SYMBOL(nla_validate);
@@ -180,7 +184,8 @@ EXPORT_SYMBOL(nla_policy_len);
* Returns 0 on success or a negative error code.
*/
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy)
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
int rem, err;
@@ -193,8 +198,11 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
if (type > 0 && type <= maxtype) {
if (policy) {
err = validate_nla(nla, maxtype, policy);
- if (err < 0)
+ if (err < 0) {
+ if (extack)
+ extack->bad_attr = nla;
goto errout;
+ }
}
tb[type] = (struct nlattr *)nla;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index f8635fd574425..a930e436db5d8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -535,7 +535,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
struct rhash_head *head;
int elasticity;
- elasticity = ht->elasticity;
+ elasticity = RHT_ELASTICITY;
pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *list;
@@ -958,35 +958,20 @@ int rhashtable_init(struct rhashtable *ht,
if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);
- if (params->max_size)
- ht->p.max_size = rounddown_pow_of_two(params->max_size);
+ /* Cap total entries at 2^31 to avoid nelems overflow. */
+ ht->max_elems = 1u << 31;
- if (params->insecure_max_entries)
- ht->p.insecure_max_entries =
- rounddown_pow_of_two(params->insecure_max_entries);
- else
- ht->p.insecure_max_entries = ht->p.max_size * 2;
+ if (params->max_size) {
+ ht->p.max_size = rounddown_pow_of_two(params->max_size);
+ if (ht->p.max_size < ht->max_elems / 2)
+ ht->max_elems = ht->p.max_size * 2;
+ }
- ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+ ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
if (params->nelem_hint)
size = rounded_hashtable_size(&ht->p);
- /* The maximum (not average) chain length grows with the
- * size of the hash table, at a rate of (log N)/(log log N).
- * The value of 16 is selected so that even if the hash
- * table grew to 2^32 you would not expect the maximum
- * chain length to exceed it unless we are under attack
- * (or extremely unlucky).
- *
- * As this limit is only to detect attacks, we don't need
- * to set it to a lower value as you'd need the chain
- * length to vastly exceed 16 to have any real effect
- * on the system.
- */
- if (!params->insecure_elasticity)
- ht->elasticity = 16;
-
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
else
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0362da0b66c35..a0f66280ea502 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -434,6 +434,41 @@ loop:
return 0;
}
+static int __bpf_fill_stxdw(struct bpf_test *self, int size)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
+ insn[1] = BPF_ST_MEM(size, R10, -40, 42);
+
+ for (i = 2; i < len - 2; i++)
+ insn[i] = BPF_STX_XADD(size, R10, R0, -40);
+
+ insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_stxw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_W);
+}
+
+static int bpf_fill_stxdw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_DW);
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -4303,6 +4338,41 @@ static struct bpf_test tests[] = {
{ { 0, 0x22 } },
},
{
+ "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R1, R10),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12 } },
+ },
+ {
+ "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxw,
+ },
+ {
"STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
@@ -4315,6 +4385,41 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x22 } },
},
+ {
+ "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R1, R10),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12 } },
+ },
+ {
+ "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxdw,
+ },
/* BPF_JMP | BPF_EXIT */
{
"JMP_EXIT",
@@ -4656,6 +4761,51 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ /* Mainly testing JIT + imm64 here. */
+ "JMP_JGE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",