summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp/bpf/main.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/bpf/main.h')
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h66
1 files changed, 64 insertions, 2 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4981c8944ca35..654fe7823e5ed 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -39,6 +39,7 @@
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/rhashtable.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -81,10 +82,16 @@ enum static_regs {
enum pkt_vec {
PKT_VEC_PKT_LEN = 0,
PKT_VEC_PKT_PTR = 2,
+ PKT_VEC_QSEL_SET = 4,
+ PKT_VEC_QSEL_VAL = 6,
};
+#define PKT_VEL_QSEL_SET_BIT 4
+
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
+#define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
+#define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
#define stack_reg(np) reg_a(STATIC_REG_STACK)
#define stack_imm(np) imm_b(np)
@@ -114,6 +121,8 @@ enum pkt_vec {
* @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps
*
+ * @maps_neutral: hash table of offload-neutral maps (on pointer)
+ *
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
* @adjust_head.off_min: minimal packet offset within buffer required
@@ -133,8 +142,10 @@ enum pkt_vec {
* @helpers.map_lookup: map lookup helper address
* @helpers.map_update: map update helper address
* @helpers.map_delete: map delete helper address
+ * @helpers.perf_event_output: output perf event to a ring buffer
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
+ * @queue_select: BPF can set the RX queue ID in packet vector
*/
struct nfp_app_bpf {
struct nfp_app *app;
@@ -150,6 +161,8 @@ struct nfp_app_bpf {
unsigned int maps_in_use;
unsigned int map_elems_in_use;
+ struct rhashtable maps_neutral;
+
struct nfp_bpf_cap_adjust_head {
u32 flags;
int off_min;
@@ -171,9 +184,11 @@ struct nfp_app_bpf {
u32 map_lookup;
u32 map_update;
u32 map_delete;
+ u32 perf_event_output;
} helpers;
bool pseudo_random;
+ bool queue_select;
};
enum nfp_bpf_map_use {
@@ -199,6 +214,14 @@ struct nfp_bpf_map {
enum nfp_bpf_map_use use_map[];
};
+struct nfp_bpf_neutral_map {
+ struct rhash_head l;
+ struct bpf_map *ptr;
+ u32 count;
+};
+
+extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
+
struct nfp_prog;
struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -236,9 +259,12 @@ struct nfp_bpf_reg_state {
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
+ * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
+ * @umin: copy of core verifier umin_value.
+ * @umax: copy of core verifier umax_value.
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -264,13 +290,23 @@ struct nfp_insn_meta {
bool xadd_maybe_16bit;
};
/* jump */
- struct nfp_insn_meta *jmp_dst;
+ struct {
+ struct nfp_insn_meta *jmp_dst;
+ bool jump_neg_op;
+ };
/* function calls */
struct {
u32 func_id;
struct bpf_reg_state arg1;
struct nfp_bpf_reg_state arg2;
};
+ /* We are interested in range info for some operands,
+ * for example, the shift amount.
+ */
+ struct {
+ u64 umin;
+ u64 umax;
+ };
};
unsigned int off;
unsigned short n;
@@ -348,6 +384,25 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
+static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+{
+ u8 code = meta->insn.code;
+ bool is_alu, is_shift;
+ u8 opclass, opcode;
+
+ opclass = BPF_CLASS(code);
+ is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
+ if (!is_alu)
+ return false;
+
+ opcode = BPF_OP(code);
+ is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
+ if (!is_shift)
+ return false;
+
+ return BPF_SRC(code) == BPF_X;
+}
+
/**
* struct nfp_prog - nfp BPF program
* @bpf: backpointer to the bpf app priv structure
@@ -363,6 +418,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
* @adjust_head_location: if program has single adjust head call - the insn no.
+ * @map_records_cnt: the number of map pointers recorded for this prog
+ * @map_records: the map record pointers from bpf->maps_neutral
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -386,6 +443,9 @@ struct nfp_prog {
unsigned int stack_depth;
unsigned int adjust_head_location;
+ unsigned int map_records_cnt;
+ struct nfp_bpf_neutral_map **map_records;
+
struct list_head insns;
};
@@ -436,5 +496,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif